blob: bedc768c8cdfe6c80068020745a884df27989b8e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28
29#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080030#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
Marcel Holtmann767c5eb2007-09-09 08:39:34 +020040#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/unaligned.h>
47
48#include <net/bluetooth/bluetooth.h>
49#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010050#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Marcel Holtmanncd82e612012-02-20 20:34:38 +010052static atomic_t monitor_promisc = ATOMIC_INIT(0);
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054/* ----- HCI socket interface ----- */
55
56static inline int hci_test_bit(int nr, void *addr)
57{
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59}
60
61/* Security filter */
62static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020066 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020071 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020073 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020075 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020077 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020079 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 }
81};
82
83static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070084 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085};
86
87/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010088void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 struct sock *sk;
91 struct hlist_node *node;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010092 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200110 continue;
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 continue;
118
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121
122 if (!hci_test_bit(evt, &flt->event_mask))
123 continue;
124
David S. Miller4498c802006-11-21 16:17:41 -0800125 if (flt->opcode &&
126 ((evt == HCI_EV_CMD_COMPLETE &&
127 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800128 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800129 (evt == HCI_EV_CMD_STATUS &&
130 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800131 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 continue;
133 }
134
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200146 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 continue;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100152
153 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100154
155 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
162 struct hlist_node *node;
163
164 BT_DBG("len %d", skb->len);
165
166 read_lock(&hci_sk_list.lock);
167
168 sk_for_each(sk, node, &hci_sk_list.head) {
169 struct sk_buff *nskb;
170
171 /* Skip the original socket */
172 if (sk == skip_sk)
173 continue;
174
175 if (sk->sk_state != BT_BOUND)
176 continue;
177
178 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
179 continue;
180
181 nskb = skb_clone(skb, GFP_ATOMIC);
182 if (!nskb)
183 continue;
184
185 if (sock_queue_rcv_skb(sk, nskb))
186 kfree_skb(nskb);
187 }
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 read_unlock(&hci_sk_list.lock);
190}
191
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100192/* Send frame to monitor socket */
193void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
194{
195 struct sock *sk;
196 struct hlist_node *node;
197 struct sk_buff *skb_copy = NULL;
198 __le16 opcode;
199
200 if (!atomic_read(&monitor_promisc))
201 return;
202
203 BT_DBG("hdev %p len %d", hdev, skb->len);
204
205 switch (bt_cb(skb)->pkt_type) {
206 case HCI_COMMAND_PKT:
207 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
208 break;
209 case HCI_EVENT_PKT:
210 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
211 break;
212 case HCI_ACLDATA_PKT:
213 if (bt_cb(skb)->incoming)
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
215 else
216 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
217 break;
218 case HCI_SCODATA_PKT:
219 if (bt_cb(skb)->incoming)
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
221 else
222 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
223 break;
224 default:
225 return;
226 }
227
228 read_lock(&hci_sk_list.lock);
229
230 sk_for_each(sk, node, &hci_sk_list.head) {
231 struct sk_buff *nskb;
232
233 if (sk->sk_state != BT_BOUND)
234 continue;
235
236 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
237 continue;
238
239 if (!skb_copy) {
240 struct hci_mon_hdr *hdr;
241
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
244 if (!skb_copy)
245 continue;
246
247 /* Put header before the data */
248 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
249 hdr->opcode = opcode;
250 hdr->index = cpu_to_le16(hdev->id);
251 hdr->len = cpu_to_le16(skb->len);
252 }
253
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (!nskb)
256 continue;
257
258 if (sock_queue_rcv_skb(sk, nskb))
259 kfree_skb(nskb);
260 }
261
262 read_unlock(&hci_sk_list.lock);
263
264 kfree_skb(skb_copy);
265}
266
267static void send_monitor_event(struct sk_buff *skb)
268{
269 struct sock *sk;
270 struct hlist_node *node;
271
272 BT_DBG("len %d", skb->len);
273
274 read_lock(&hci_sk_list.lock);
275
276 sk_for_each(sk, node, &hci_sk_list.head) {
277 struct sk_buff *nskb;
278
279 if (sk->sk_state != BT_BOUND)
280 continue;
281
282 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
283 continue;
284
285 nskb = skb_clone(skb, GFP_ATOMIC);
286 if (!nskb)
287 continue;
288
289 if (sock_queue_rcv_skb(sk, nskb))
290 kfree_skb(nskb);
291 }
292
293 read_unlock(&hci_sk_list.lock);
294}
295
296static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
297{
298 struct hci_mon_hdr *hdr;
299 struct hci_mon_new_index *ni;
300 struct sk_buff *skb;
301 __le16 opcode;
302
303 switch (event) {
304 case HCI_DEV_REG:
305 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
306 if (!skb)
307 return NULL;
308
309 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
310 ni->type = hdev->dev_type;
311 ni->bus = hdev->bus;
312 bacpy(&ni->bdaddr, &hdev->bdaddr);
313 memcpy(ni->name, hdev->name, 8);
314
315 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
316 break;
317
318 case HCI_DEV_UNREG:
319 skb = bt_skb_alloc(0, GFP_ATOMIC);
320 if (!skb)
321 return NULL;
322
323 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
324 break;
325
326 default:
327 return NULL;
328 }
329
330 __net_timestamp(skb);
331
332 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
333 hdr->opcode = opcode;
334 hdr->index = cpu_to_le16(hdev->id);
335 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
336
337 return skb;
338}
339
340static void send_monitor_replay(struct sock *sk)
341{
342 struct hci_dev *hdev;
343
344 read_lock(&hci_dev_list_lock);
345
346 list_for_each_entry(hdev, &hci_dev_list, list) {
347 struct sk_buff *skb;
348
349 skb = create_monitor_event(hdev, HCI_DEV_REG);
350 if (!skb)
351 continue;
352
353 if (sock_queue_rcv_skb(sk, skb))
354 kfree_skb(skb);
355 }
356
357 read_unlock(&hci_dev_list_lock);
358}
359
Marcel Holtmann040030e2012-02-20 14:50:37 +0100360/* Generate internal stack event */
361static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
362{
363 struct hci_event_hdr *hdr;
364 struct hci_ev_stack_internal *ev;
365 struct sk_buff *skb;
366
367 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
368 if (!skb)
369 return;
370
371 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
372 hdr->evt = HCI_EV_STACK_INTERNAL;
373 hdr->plen = sizeof(*ev) + dlen;
374
375 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
376 ev->type = type;
377 memcpy(ev->data, data, dlen);
378
379 bt_cb(skb)->incoming = 1;
380 __net_timestamp(skb);
381
382 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
383 skb->dev = (void *) hdev;
384 hci_send_to_sock(hdev, skb);
385 kfree_skb(skb);
386}
387
388void hci_sock_dev_event(struct hci_dev *hdev, int event)
389{
390 struct hci_ev_si_device ev;
391
392 BT_DBG("hdev %s event %d", hdev->name, event);
393
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100394 /* Send event to monitor */
395 if (atomic_read(&monitor_promisc)) {
396 struct sk_buff *skb;
397
398 skb = create_monitor_event(hdev, event);
399 if (skb) {
400 send_monitor_event(skb);
401 kfree_skb(skb);
402 }
403 }
404
Marcel Holtmann040030e2012-02-20 14:50:37 +0100405 /* Send event to sockets */
406 ev.event = event;
407 ev.dev_id = hdev->id;
408 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
409
410 if (event == HCI_DEV_UNREG) {
411 struct sock *sk;
412 struct hlist_node *node;
413
414 /* Detach sockets from device */
415 read_lock(&hci_sk_list.lock);
416 sk_for_each(sk, node, &hci_sk_list.head) {
417 bh_lock_sock_nested(sk);
418 if (hci_pi(sk)->hdev == hdev) {
419 hci_pi(sk)->hdev = NULL;
420 sk->sk_err = EPIPE;
421 sk->sk_state = BT_OPEN;
422 sk->sk_state_change(sk);
423
424 hci_dev_put(hdev);
425 }
426 bh_unlock_sock(sk);
427 }
428 read_unlock(&hci_sk_list.lock);
429 }
430}
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432static int hci_sock_release(struct socket *sock)
433{
434 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100435 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
437 BT_DBG("sock %p sk %p", sock, sk);
438
439 if (!sk)
440 return 0;
441
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100442 hdev = hci_pi(sk)->hdev;
443
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100444 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
445 atomic_dec(&monitor_promisc);
446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 bt_sock_unlink(&hci_sk_list, sk);
448
449 if (hdev) {
450 atomic_dec(&hdev->promisc);
451 hci_dev_put(hdev);
452 }
453
454 sock_orphan(sk);
455
456 skb_queue_purge(&sk->sk_receive_queue);
457 skb_queue_purge(&sk->sk_write_queue);
458
459 sock_put(sk);
460 return 0;
461}
462
Antti Julkub2a66aa2011-06-15 12:01:14 +0300463static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200464{
465 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300466 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200467
468 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
469 return -EFAULT;
470
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300471 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300472
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200473 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300474
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300475 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300476
477 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200478}
479
Antti Julkub2a66aa2011-06-15 12:01:14 +0300480static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200481{
482 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300483 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200484
485 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
486 return -EFAULT;
487
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300488 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300489
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200490 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300491
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300492 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300493
494 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200495}
496
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900497/* Ioctls that require bound socket */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
499{
500 struct hci_dev *hdev = hci_pi(sk)->hdev;
501
502 if (!hdev)
503 return -EBADFD;
504
505 switch (cmd) {
506 case HCISETRAW:
507 if (!capable(CAP_NET_ADMIN))
508 return -EACCES;
509
510 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
511 return -EPERM;
512
513 if (arg)
514 set_bit(HCI_RAW, &hdev->flags);
515 else
516 clear_bit(HCI_RAW, &hdev->flags);
517
518 return 0;
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200521 return hci_get_conn_info(hdev, (void __user *) arg);
522
523 case HCIGETAUTHINFO:
524 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Johan Hedbergf0358562010-05-18 13:20:32 +0200526 case HCIBLOCKADDR:
527 if (!capable(CAP_NET_ADMIN))
528 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300529 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200530
531 case HCIUNBLOCKADDR:
532 if (!capable(CAP_NET_ADMIN))
533 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300534 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 default:
537 if (hdev->ioctl)
538 return hdev->ioctl(hdev, cmd, arg);
539 return -EINVAL;
540 }
541}
542
543static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
544{
545 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200546 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 int err;
548
549 BT_DBG("cmd %x arg %lx", cmd, arg);
550
551 switch (cmd) {
552 case HCIGETDEVLIST:
553 return hci_get_dev_list(argp);
554
555 case HCIGETDEVINFO:
556 return hci_get_dev_info(argp);
557
558 case HCIGETCONNLIST:
559 return hci_get_conn_list(argp);
560
561 case HCIDEVUP:
562 if (!capable(CAP_NET_ADMIN))
563 return -EACCES;
564 return hci_dev_open(arg);
565
566 case HCIDEVDOWN:
567 if (!capable(CAP_NET_ADMIN))
568 return -EACCES;
569 return hci_dev_close(arg);
570
571 case HCIDEVRESET:
572 if (!capable(CAP_NET_ADMIN))
573 return -EACCES;
574 return hci_dev_reset(arg);
575
576 case HCIDEVRESTAT:
577 if (!capable(CAP_NET_ADMIN))
578 return -EACCES;
579 return hci_dev_reset_stat(arg);
580
581 case HCISETSCAN:
582 case HCISETAUTH:
583 case HCISETENCRYPT:
584 case HCISETPTYPE:
585 case HCISETLINKPOL:
586 case HCISETLINKMODE:
587 case HCISETACLMTU:
588 case HCISETSCOMTU:
589 if (!capable(CAP_NET_ADMIN))
590 return -EACCES;
591 return hci_dev_cmd(cmd, argp);
592
593 case HCIINQUIRY:
594 return hci_inquiry(argp);
595
596 default:
597 lock_sock(sk);
598 err = hci_sock_bound_ioctl(sk, cmd, arg);
599 release_sock(sk);
600 return err;
601 }
602}
603
604static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
605{
Johan Hedberg03811012010-12-08 00:21:06 +0200606 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 struct sock *sk = sock->sk;
608 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200609 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 BT_DBG("sock %p sk %p", sock, sk);
612
Johan Hedberg03811012010-12-08 00:21:06 +0200613 if (!addr)
614 return -EINVAL;
615
616 memset(&haddr, 0, sizeof(haddr));
617 len = min_t(unsigned int, sizeof(haddr), addr_len);
618 memcpy(&haddr, addr, len);
619
620 if (haddr.hci_family != AF_BLUETOOTH)
621 return -EINVAL;
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 lock_sock(sk);
624
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100625 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 err = -EALREADY;
627 goto done;
628 }
629
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100630 switch (haddr.hci_channel) {
631 case HCI_CHANNEL_RAW:
632 if (hci_pi(sk)->hdev) {
633 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 goto done;
635 }
636
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
639 if (!hdev) {
640 err = -ENODEV;
641 goto done;
642 }
643
644 atomic_inc(&hdev->promisc);
645 }
646
647 hci_pi(sk)->hdev = hdev;
648 break;
649
650 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100651 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100652 err = -EINVAL;
653 goto done;
654 }
655
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100656 if (!capable(CAP_NET_ADMIN)) {
657 err = -EPERM;
658 goto done;
659 }
660
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100661 break;
662
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
665 err = -EINVAL;
666 goto done;
667 }
668
669 if (!capable(CAP_NET_RAW)) {
670 err = -EPERM;
671 goto done;
672 }
673
674 send_monitor_replay(sk);
675
676 atomic_inc(&monitor_promisc);
677 break;
678
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100679 default:
680 err = -EINVAL;
681 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100684
Johan Hedberg03811012010-12-08 00:21:06 +0200685 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 sk->sk_state = BT_BOUND;
687
688done:
689 release_sock(sk);
690 return err;
691}
692
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
694{
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100697 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 BT_DBG("sock %p sk %p", sock, sk);
700
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100701 if (!hdev)
702 return -EBADFD;
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 lock_sock(sk);
705
706 *addr_len = sizeof(*haddr);
707 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100708 haddr->hci_dev = hdev->id;
Mathias Krause639edee2012-08-15 11:31:47 +0000709 haddr->hci_channel= 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 release_sock(sk);
712 return 0;
713}
714
715static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
716{
717 __u32 mask = hci_pi(sk)->cmsg_mask;
718
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700719 if (mask & HCI_CMSG_DIR) {
720 int incoming = bt_cb(skb)->incoming;
721 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700724 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100725#ifdef CONFIG_COMPAT
726 struct compat_timeval ctv;
727#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700728 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200729 void *data;
730 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700731
732 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200733
David S. Miller1da97f82007-09-12 14:10:58 +0200734 data = &tv;
735 len = sizeof(tv);
736#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800737 if (!COMPAT_USE_64BIT_TIME &&
738 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200739 ctv.tv_sec = tv.tv_sec;
740 ctv.tv_usec = tv.tv_usec;
741 data = &ctv;
742 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200743 }
David S. Miller1da97f82007-09-12 14:10:58 +0200744#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200745
746 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900749
750static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 struct msghdr *msg, size_t len, int flags)
752{
753 int noblock = flags & MSG_DONTWAIT;
754 struct sock *sk = sock->sk;
755 struct sk_buff *skb;
756 int copied, err;
757
758 BT_DBG("sock %p, sk %p", sock, sk);
759
760 if (flags & (MSG_OOB))
761 return -EOPNOTSUPP;
762
763 if (sk->sk_state == BT_CLOSED)
764 return 0;
765
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200766 skb = skb_recv_datagram(sk, flags, noblock, &err);
767 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 return err;
769
770 msg->msg_namelen = 0;
771
772 copied = skb->len;
773 if (len < copied) {
774 msg->msg_flags |= MSG_TRUNC;
775 copied = len;
776 }
777
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300778 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
780
Marcel Holtmann3a208622012-02-20 14:50:34 +0100781 switch (hci_pi(sk)->channel) {
782 case HCI_CHANNEL_RAW:
783 hci_sock_cmsg(sk, msg, skb);
784 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100785 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100786 case HCI_CHANNEL_MONITOR:
787 sock_recv_timestamp(msg, sk, skb);
788 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 skb_free_datagram(sk, skb);
792
793 return err ? : copied;
794}
795
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900796static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 struct msghdr *msg, size_t len)
798{
799 struct sock *sk = sock->sk;
800 struct hci_dev *hdev;
801 struct sk_buff *skb;
802 int err;
803
804 BT_DBG("sock %p sk %p", sock, sk);
805
806 if (msg->msg_flags & MSG_OOB)
807 return -EOPNOTSUPP;
808
809 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
810 return -EINVAL;
811
812 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
813 return -EINVAL;
814
815 lock_sock(sk);
816
Johan Hedberg03811012010-12-08 00:21:06 +0200817 switch (hci_pi(sk)->channel) {
818 case HCI_CHANNEL_RAW:
819 break;
820 case HCI_CHANNEL_CONTROL:
821 err = mgmt_control(sk, msg, len);
822 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100823 case HCI_CHANNEL_MONITOR:
824 err = -EOPNOTSUPP;
825 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200826 default:
827 err = -EINVAL;
828 goto done;
829 }
830
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200831 hdev = hci_pi(sk)->hdev;
832 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 err = -EBADFD;
834 goto done;
835 }
836
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100837 if (!test_bit(HCI_UP, &hdev->flags)) {
838 err = -ENETDOWN;
839 goto done;
840 }
841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
843 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 goto done;
845
846 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
847 err = -EFAULT;
848 goto drop;
849 }
850
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700851 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 skb_pull(skb, 1);
853 skb->dev = (void *) hdev;
854
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700855 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700856 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 u16 ogf = hci_opcode_ogf(opcode);
858 u16 ocf = hci_opcode_ocf(opcode);
859
860 if (((ogf > HCI_SFLT_MAX_OGF) ||
861 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
862 !capable(CAP_NET_RAW)) {
863 err = -EPERM;
864 goto drop;
865 }
866
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200867 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200869 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 } else {
871 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200872 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
874 } else {
875 if (!capable(CAP_NET_RAW)) {
876 err = -EPERM;
877 goto drop;
878 }
879
880 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200881 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
883
884 err = len;
885
886done:
887 release_sock(sk);
888 return err;
889
890drop:
891 kfree_skb(skb);
892 goto done;
893}
894
David S. Millerb7058842009-09-30 16:12:20 -0700895static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 struct hci_ufilter uf = { .opcode = 0 };
898 struct sock *sk = sock->sk;
899 int err = 0, opt = 0;
900
901 BT_DBG("sk %p, opt %d", sk, optname);
902
903 lock_sock(sk);
904
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100905 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
906 err = -EINVAL;
907 goto done;
908 }
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 switch (optname) {
911 case HCI_DATA_DIR:
912 if (get_user(opt, (int __user *)optval)) {
913 err = -EFAULT;
914 break;
915 }
916
917 if (opt)
918 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
919 else
920 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
921 break;
922
923 case HCI_TIME_STAMP:
924 if (get_user(opt, (int __user *)optval)) {
925 err = -EFAULT;
926 break;
927 }
928
929 if (opt)
930 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
931 else
932 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
933 break;
934
935 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200936 {
937 struct hci_filter *f = &hci_pi(sk)->filter;
938
939 uf.type_mask = f->type_mask;
940 uf.opcode = f->opcode;
941 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
942 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
943 }
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 len = min_t(unsigned int, len, sizeof(uf));
946 if (copy_from_user(&uf, optval, len)) {
947 err = -EFAULT;
948 break;
949 }
950
951 if (!capable(CAP_NET_RAW)) {
952 uf.type_mask &= hci_sec_filter.type_mask;
953 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
954 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
955 }
956
957 {
958 struct hci_filter *f = &hci_pi(sk)->filter;
959
960 f->type_mask = uf.type_mask;
961 f->opcode = uf.opcode;
962 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
963 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
964 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900965 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967 default:
968 err = -ENOPROTOOPT;
969 break;
970 }
971
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100972done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 release_sock(sk);
974 return err;
975}
976
977static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
978{
979 struct hci_ufilter uf;
980 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100981 int len, opt, err = 0;
982
983 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 if (get_user(len, optlen))
986 return -EFAULT;
987
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100988 lock_sock(sk);
989
990 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
991 err = -EINVAL;
992 goto done;
993 }
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 switch (optname) {
996 case HCI_DATA_DIR:
997 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
998 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900999 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 opt = 0;
1001
1002 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001003 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 break;
1005
1006 case HCI_TIME_STAMP:
1007 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1008 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001009 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 opt = 0;
1011
1012 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001013 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 break;
1015
1016 case HCI_FILTER:
1017 {
1018 struct hci_filter *f = &hci_pi(sk)->filter;
1019
Mathias Krause87c42a12012-08-15 11:31:46 +00001020 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 uf.type_mask = f->type_mask;
1022 uf.opcode = f->opcode;
1023 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1024 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1025 }
1026
1027 len = min_t(unsigned int, len, sizeof(uf));
1028 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001029 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 break;
1031
1032 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001033 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 break;
1035 }
1036
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001037done:
1038 release_sock(sk);
1039 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001042static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 .family = PF_BLUETOOTH,
1044 .owner = THIS_MODULE,
1045 .release = hci_sock_release,
1046 .bind = hci_sock_bind,
1047 .getname = hci_sock_getname,
1048 .sendmsg = hci_sock_sendmsg,
1049 .recvmsg = hci_sock_recvmsg,
1050 .ioctl = hci_sock_ioctl,
1051 .poll = datagram_poll,
1052 .listen = sock_no_listen,
1053 .shutdown = sock_no_shutdown,
1054 .setsockopt = hci_sock_setsockopt,
1055 .getsockopt = hci_sock_getsockopt,
1056 .connect = sock_no_connect,
1057 .socketpair = sock_no_socketpair,
1058 .accept = sock_no_accept,
1059 .mmap = sock_no_mmap
1060};
1061
1062static struct proto hci_sk_proto = {
1063 .name = "HCI",
1064 .owner = THIS_MODULE,
1065 .obj_size = sizeof(struct hci_pinfo)
1066};
1067
Eric Paris3f378b62009-11-05 22:18:14 -08001068static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1069 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
1071 struct sock *sk;
1072
1073 BT_DBG("sock %p", sock);
1074
1075 if (sock->type != SOCK_RAW)
1076 return -ESOCKTNOSUPPORT;
1077
1078 sock->ops = &hci_sock_ops;
1079
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001080 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (!sk)
1082 return -ENOMEM;
1083
1084 sock_init_data(sock, sk);
1085
1086 sock_reset_flag(sk, SOCK_ZAPPED);
1087
1088 sk->sk_protocol = protocol;
1089
1090 sock->state = SS_UNCONNECTED;
1091 sk->sk_state = BT_OPEN;
1092
1093 bt_sock_link(&hci_sk_list, sk);
1094 return 0;
1095}
1096
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001097static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 .family = PF_BLUETOOTH,
1099 .owner = THIS_MODULE,
1100 .create = hci_sock_create,
1101};
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103int __init hci_sock_init(void)
1104{
1105 int err;
1106
1107 err = proto_register(&hci_sk_proto, 0);
1108 if (err < 0)
1109 return err;
1110
1111 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1112 if (err < 0)
1113 goto error;
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 BT_INFO("HCI socket layer initialized");
1116
1117 return 0;
1118
1119error:
1120 BT_ERR("HCI socket registration failed");
1121 proto_unregister(&hci_sk_proto);
1122 return err;
1123}
1124
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301125void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
1127 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1128 BT_ERR("HCI socket unregistration failed");
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131}