blob: 59e68f1991782983f6c2dc3ad177e3f01031ba27 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020048 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020053 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020055 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020057 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020059 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 }
63};
64
65static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070066 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
Marcel Holtmannf81fe642013-08-25 23:25:15 -070069static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70{
71 struct hci_filter *flt;
72 int flt_type, flt_event;
73
74 /* Apply filter */
75 flt = &hci_pi(sk)->filter;
76
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78 flt_type = 0;
79 else
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82 if (!test_bit(flt_type, &flt->type_mask))
83 return true;
84
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87 return false;
88
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91 if (!hci_test_bit(flt_event, &flt->event_mask))
92 return true;
93
94 /* Check filter only when opcode is set */
95 if (!flt->opcode)
96 return false;
97
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100 return true;
101
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104 return true;
105
106 return false;
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100113 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115 BT_DBG("hdev %p len %d", hdev, skb->len);
116
117 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100118
Sasha Levinb67bfe02013-02-27 17:06:00 -0800119 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 struct sk_buff *nskb;
121
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123 continue;
124
125 /* Don't send frame to the socket it came from */
126 if (skb->sk == sk)
127 continue;
128
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200130 continue;
131
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700132 if (is_filtered_packet(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200146 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 continue;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100152
153 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100154
155 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100162
163 BT_DBG("len %d", skb->len);
164
165 read_lock(&hci_sk_list.lock);
166
Sasha Levinb67bfe02013-02-27 17:06:00 -0800167 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100168 struct sk_buff *nskb;
169
170 /* Skip the original socket */
171 if (sk == skip_sk)
172 continue;
173
174 if (sk->sk_state != BT_BOUND)
175 continue;
176
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
178 continue;
179
180 nskb = skb_clone(skb, GFP_ATOMIC);
181 if (!nskb)
182 continue;
183
184 if (sock_queue_rcv_skb(sk, nskb))
185 kfree_skb(nskb);
186 }
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 read_unlock(&hci_sk_list.lock);
189}
190
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100191/* Send frame to monitor socket */
192void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
193{
194 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100195 struct sk_buff *skb_copy = NULL;
196 __le16 opcode;
197
198 if (!atomic_read(&monitor_promisc))
199 return;
200
201 BT_DBG("hdev %p len %d", hdev, skb->len);
202
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
206 break;
207 case HCI_EVENT_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
209 break;
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
213 else
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
215 break;
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
219 else
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
221 break;
222 default:
223 return;
224 }
225
226 read_lock(&hci_sk_list.lock);
227
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100229 struct sk_buff *nskb;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
235 continue;
236
237 if (!skb_copy) {
238 struct hci_mon_hdr *hdr;
239
240 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
242 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243 if (!skb_copy)
244 continue;
245
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264}
265
266static void send_monitor_event(struct sk_buff *skb)
267{
268 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269
270 BT_DBG("len %d", skb->len);
271
272 read_lock(&hci_sk_list.lock);
273
Sasha Levinb67bfe02013-02-27 17:06:00 -0800274 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 struct sk_buff *nskb;
276
277 if (sk->sk_state != BT_BOUND)
278 continue;
279
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
281 continue;
282
283 nskb = skb_clone(skb, GFP_ATOMIC);
284 if (!nskb)
285 continue;
286
287 if (sock_queue_rcv_skb(sk, nskb))
288 kfree_skb(nskb);
289 }
290
291 read_unlock(&hci_sk_list.lock);
292}
293
294static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
295{
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
298 struct sk_buff *skb;
299 __le16 opcode;
300
301 switch (event) {
302 case HCI_DEV_REG:
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
309 ni->bus = hdev->bus;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
312
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
314 break;
315
316 case HCI_DEV_UNREG:
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
322 break;
323
324 default:
325 return NULL;
326 }
327
328 __net_timestamp(skb);
329
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
334
335 return skb;
336}
337
338static void send_monitor_replay(struct sock *sk)
339{
340 struct hci_dev *hdev;
341
342 read_lock(&hci_dev_list_lock);
343
344 list_for_each_entry(hdev, &hci_dev_list, list) {
345 struct sk_buff *skb;
346
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
348 if (!skb)
349 continue;
350
351 if (sock_queue_rcv_skb(sk, skb))
352 kfree_skb(skb);
353 }
354
355 read_unlock(&hci_dev_list_lock);
356}
357
Marcel Holtmann040030e2012-02-20 14:50:37 +0100358/* Generate internal stack event */
359static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
360{
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
363 struct sk_buff *skb;
364
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
366 if (!skb)
367 return;
368
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
372
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
374 ev->type = type;
375 memcpy(ev->data, data, dlen);
376
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
379
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
383 kfree_skb(skb);
384}
385
386void hci_sock_dev_event(struct hci_dev *hdev, int event)
387{
388 struct hci_ev_si_device ev;
389
390 BT_DBG("hdev %s event %d", hdev->name, event);
391
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
394 struct sk_buff *skb;
395
396 skb = create_monitor_event(hdev, event);
397 if (skb) {
398 send_monitor_event(skb);
399 kfree_skb(skb);
400 }
401 }
402
Marcel Holtmann040030e2012-02-20 14:50:37 +0100403 /* Send event to sockets */
404 ev.event = event;
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
407
408 if (event == HCI_DEV_UNREG) {
409 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100410
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800413 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
417 sk->sk_err = EPIPE;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
420
421 hci_dev_put(hdev);
422 }
423 bh_unlock_sock(sk);
424 }
425 read_unlock(&hci_sk_list.lock);
426 }
427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429static int hci_sock_release(struct socket *sock)
430{
431 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100432 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 BT_DBG("sock %p sk %p", sock, sk);
435
436 if (!sk)
437 return 0;
438
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100439 hdev = hci_pi(sk)->hdev;
440
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 bt_sock_unlink(&hci_sk_list, sk);
445
446 if (hdev) {
447 atomic_dec(&hdev->promisc);
448 hci_dev_put(hdev);
449 }
450
451 sock_orphan(sk);
452
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
455
456 sock_put(sk);
457 return 0;
458}
459
Antti Julkub2a66aa2011-06-15 12:01:14 +0300460static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200461{
462 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300463 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200464
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
466 return -EFAULT;
467
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300468 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300469
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200470 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
474 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200475}
476
Antti Julkub2a66aa2011-06-15 12:01:14 +0300477static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200478{
479 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300480 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200481
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
483 return -EFAULT;
484
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300485 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300486
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200487 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300488
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300489 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300490
491 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200492}
493
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900494/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300495static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
496 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
499
500 if (!hdev)
501 return -EBADFD;
502
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700503 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
504 return -EBUSY;
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 switch (cmd) {
507 case HCISETRAW:
508 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000509 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 return -EPERM;
513
514 if (arg)
515 set_bit(HCI_RAW, &hdev->flags);
516 else
517 clear_bit(HCI_RAW, &hdev->flags);
518
519 return 0;
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200522 return hci_get_conn_info(hdev, (void __user *) arg);
523
524 case HCIGETAUTHINFO:
525 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Johan Hedbergf0358562010-05-18 13:20:32 +0200527 case HCIBLOCKADDR:
528 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000529 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300530 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200531
532 case HCIUNBLOCKADDR:
533 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000534 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300535 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700537
538 if (hdev->ioctl)
539 return hdev->ioctl(hdev, cmd, arg);
540
541 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300544static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
545 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200547 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700548 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 int err;
550
551 BT_DBG("cmd %x arg %lx", cmd, arg);
552
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700553 lock_sock(sk);
554
555 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
556 err = -EBADFD;
557 goto done;
558 }
559
560 release_sock(sk);
561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 switch (cmd) {
563 case HCIGETDEVLIST:
564 return hci_get_dev_list(argp);
565
566 case HCIGETDEVINFO:
567 return hci_get_dev_info(argp);
568
569 case HCIGETCONNLIST:
570 return hci_get_conn_list(argp);
571
572 case HCIDEVUP:
573 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000574 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 return hci_dev_open(arg);
576
577 case HCIDEVDOWN:
578 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000579 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 return hci_dev_close(arg);
581
582 case HCIDEVRESET:
583 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000584 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return hci_dev_reset(arg);
586
587 case HCIDEVRESTAT:
588 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000589 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return hci_dev_reset_stat(arg);
591
592 case HCISETSCAN:
593 case HCISETAUTH:
594 case HCISETENCRYPT:
595 case HCISETPTYPE:
596 case HCISETLINKPOL:
597 case HCISETLINKMODE:
598 case HCISETACLMTU:
599 case HCISETSCOMTU:
600 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000601 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return hci_dev_cmd(cmd, argp);
603
604 case HCIINQUIRY:
605 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700607
608 lock_sock(sk);
609
610 err = hci_sock_bound_ioctl(sk, cmd, arg);
611
612done:
613 release_sock(sk);
614 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615}
616
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300617static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
618 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619{
Johan Hedberg03811012010-12-08 00:21:06 +0200620 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 struct sock *sk = sock->sk;
622 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200623 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 BT_DBG("sock %p sk %p", sock, sk);
626
Johan Hedberg03811012010-12-08 00:21:06 +0200627 if (!addr)
628 return -EINVAL;
629
630 memset(&haddr, 0, sizeof(haddr));
631 len = min_t(unsigned int, sizeof(haddr), addr_len);
632 memcpy(&haddr, addr, len);
633
634 if (haddr.hci_family != AF_BLUETOOTH)
635 return -EINVAL;
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 lock_sock(sk);
638
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100639 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 err = -EALREADY;
641 goto done;
642 }
643
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100644 switch (haddr.hci_channel) {
645 case HCI_CHANNEL_RAW:
646 if (hci_pi(sk)->hdev) {
647 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 goto done;
649 }
650
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100651 if (haddr.hci_dev != HCI_DEV_NONE) {
652 hdev = hci_dev_get(haddr.hci_dev);
653 if (!hdev) {
654 err = -ENODEV;
655 goto done;
656 }
657
658 atomic_inc(&hdev->promisc);
659 }
660
661 hci_pi(sk)->hdev = hdev;
662 break;
663
664 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100665 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100666 err = -EINVAL;
667 goto done;
668 }
669
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100670 if (!capable(CAP_NET_ADMIN)) {
671 err = -EPERM;
672 goto done;
673 }
674
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100675 break;
676
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100677 case HCI_CHANNEL_MONITOR:
678 if (haddr.hci_dev != HCI_DEV_NONE) {
679 err = -EINVAL;
680 goto done;
681 }
682
683 if (!capable(CAP_NET_RAW)) {
684 err = -EPERM;
685 goto done;
686 }
687
688 send_monitor_replay(sk);
689
690 atomic_inc(&monitor_promisc);
691 break;
692
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100693 default:
694 err = -EINVAL;
695 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100698
Johan Hedberg03811012010-12-08 00:21:06 +0200699 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 sk->sk_state = BT_BOUND;
701
702done:
703 release_sock(sk);
704 return err;
705}
706
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300707static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
708 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
710 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
711 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700712 struct hci_dev *hdev;
713 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
715 BT_DBG("sock %p sk %p", sock, sk);
716
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700717 if (peer)
718 return -EOPNOTSUPP;
719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 lock_sock(sk);
721
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700722 hdev = hci_pi(sk)->hdev;
723 if (!hdev) {
724 err = -EBADFD;
725 goto done;
726 }
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 *addr_len = sizeof(*haddr);
729 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100730 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700731 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700733done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700735 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300738static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
739 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
741 __u32 mask = hci_pi(sk)->cmsg_mask;
742
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700743 if (mask & HCI_CMSG_DIR) {
744 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300745 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
746 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700749 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100750#ifdef CONFIG_COMPAT
751 struct compat_timeval ctv;
752#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700753 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200754 void *data;
755 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700756
757 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200758
David S. Miller1da97f82007-09-12 14:10:58 +0200759 data = &tv;
760 len = sizeof(tv);
761#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800762 if (!COMPAT_USE_64BIT_TIME &&
763 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200764 ctv.tv_sec = tv.tv_sec;
765 ctv.tv_usec = tv.tv_usec;
766 data = &ctv;
767 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200768 }
David S. Miller1da97f82007-09-12 14:10:58 +0200769#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200770
771 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700772 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900774
775static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300776 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
778 int noblock = flags & MSG_DONTWAIT;
779 struct sock *sk = sock->sk;
780 struct sk_buff *skb;
781 int copied, err;
782
783 BT_DBG("sock %p, sk %p", sock, sk);
784
785 if (flags & (MSG_OOB))
786 return -EOPNOTSUPP;
787
788 if (sk->sk_state == BT_CLOSED)
789 return 0;
790
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200791 skb = skb_recv_datagram(sk, flags, noblock, &err);
792 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 return err;
794
795 msg->msg_namelen = 0;
796
797 copied = skb->len;
798 if (len < copied) {
799 msg->msg_flags |= MSG_TRUNC;
800 copied = len;
801 }
802
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300803 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
805
Marcel Holtmann3a208622012-02-20 14:50:34 +0100806 switch (hci_pi(sk)->channel) {
807 case HCI_CHANNEL_RAW:
808 hci_sock_cmsg(sk, msg, skb);
809 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100810 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100811 case HCI_CHANNEL_MONITOR:
812 sock_recv_timestamp(msg, sk, skb);
813 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 skb_free_datagram(sk, skb);
817
818 return err ? : copied;
819}
820
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900821static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 struct msghdr *msg, size_t len)
823{
824 struct sock *sk = sock->sk;
825 struct hci_dev *hdev;
826 struct sk_buff *skb;
827 int err;
828
829 BT_DBG("sock %p sk %p", sock, sk);
830
831 if (msg->msg_flags & MSG_OOB)
832 return -EOPNOTSUPP;
833
834 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
835 return -EINVAL;
836
837 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
838 return -EINVAL;
839
840 lock_sock(sk);
841
Johan Hedberg03811012010-12-08 00:21:06 +0200842 switch (hci_pi(sk)->channel) {
843 case HCI_CHANNEL_RAW:
844 break;
845 case HCI_CHANNEL_CONTROL:
846 err = mgmt_control(sk, msg, len);
847 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100848 case HCI_CHANNEL_MONITOR:
849 err = -EOPNOTSUPP;
850 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200851 default:
852 err = -EINVAL;
853 goto done;
854 }
855
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200856 hdev = hci_pi(sk)->hdev;
857 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 err = -EBADFD;
859 goto done;
860 }
861
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100862 if (!test_bit(HCI_UP, &hdev->flags)) {
863 err = -ENETDOWN;
864 goto done;
865 }
866
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200867 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
868 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 goto done;
870
871 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
872 err = -EFAULT;
873 goto drop;
874 }
875
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700876 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 skb_pull(skb, 1);
878 skb->dev = (void *) hdev;
879
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700880 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700881 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 u16 ogf = hci_opcode_ogf(opcode);
883 u16 ocf = hci_opcode_ocf(opcode);
884
885 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300886 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
887 &hci_sec_filter.ocf_mask[ogf])) &&
888 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 err = -EPERM;
890 goto drop;
891 }
892
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200893 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200895 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 } else {
Johan Hedberg11714b32013-03-05 20:37:47 +0200897 /* Stand-alone HCI commands must be flaged as
898 * single-command requests.
899 */
900 bt_cb(skb)->req.start = true;
901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200903 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 }
905 } else {
906 if (!capable(CAP_NET_RAW)) {
907 err = -EPERM;
908 goto drop;
909 }
910
911 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200912 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 }
914
915 err = len;
916
917done:
918 release_sock(sk);
919 return err;
920
921drop:
922 kfree_skb(skb);
923 goto done;
924}
925
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300926static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
927 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
929 struct hci_ufilter uf = { .opcode = 0 };
930 struct sock *sk = sock->sk;
931 int err = 0, opt = 0;
932
933 BT_DBG("sk %p, opt %d", sk, optname);
934
935 lock_sock(sk);
936
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100937 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -0700938 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100939 goto done;
940 }
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 switch (optname) {
943 case HCI_DATA_DIR:
944 if (get_user(opt, (int __user *)optval)) {
945 err = -EFAULT;
946 break;
947 }
948
949 if (opt)
950 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
951 else
952 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
953 break;
954
955 case HCI_TIME_STAMP:
956 if (get_user(opt, (int __user *)optval)) {
957 err = -EFAULT;
958 break;
959 }
960
961 if (opt)
962 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
963 else
964 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
965 break;
966
967 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200968 {
969 struct hci_filter *f = &hci_pi(sk)->filter;
970
971 uf.type_mask = f->type_mask;
972 uf.opcode = f->opcode;
973 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
974 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
975 }
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 len = min_t(unsigned int, len, sizeof(uf));
978 if (copy_from_user(&uf, optval, len)) {
979 err = -EFAULT;
980 break;
981 }
982
983 if (!capable(CAP_NET_RAW)) {
984 uf.type_mask &= hci_sec_filter.type_mask;
985 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
986 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
987 }
988
989 {
990 struct hci_filter *f = &hci_pi(sk)->filter;
991
992 f->type_mask = uf.type_mask;
993 f->opcode = uf.opcode;
994 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
995 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900997 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 default:
1000 err = -ENOPROTOOPT;
1001 break;
1002 }
1003
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001004done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 release_sock(sk);
1006 return err;
1007}
1008
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001009static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1010 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
1012 struct hci_ufilter uf;
1013 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001014 int len, opt, err = 0;
1015
1016 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 if (get_user(len, optlen))
1019 return -EFAULT;
1020
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001021 lock_sock(sk);
1022
1023 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001024 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001025 goto done;
1026 }
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 switch (optname) {
1029 case HCI_DATA_DIR:
1030 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1031 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001032 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 opt = 0;
1034
1035 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001036 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 break;
1038
1039 case HCI_TIME_STAMP:
1040 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1041 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001042 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 opt = 0;
1044
1045 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001046 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 break;
1048
1049 case HCI_FILTER:
1050 {
1051 struct hci_filter *f = &hci_pi(sk)->filter;
1052
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001053 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 uf.type_mask = f->type_mask;
1055 uf.opcode = f->opcode;
1056 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1057 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1058 }
1059
1060 len = min_t(unsigned int, len, sizeof(uf));
1061 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001062 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 break;
1064
1065 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001066 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 break;
1068 }
1069
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001070done:
1071 release_sock(sk);
1072 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073}
1074
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001075static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 .family = PF_BLUETOOTH,
1077 .owner = THIS_MODULE,
1078 .release = hci_sock_release,
1079 .bind = hci_sock_bind,
1080 .getname = hci_sock_getname,
1081 .sendmsg = hci_sock_sendmsg,
1082 .recvmsg = hci_sock_recvmsg,
1083 .ioctl = hci_sock_ioctl,
1084 .poll = datagram_poll,
1085 .listen = sock_no_listen,
1086 .shutdown = sock_no_shutdown,
1087 .setsockopt = hci_sock_setsockopt,
1088 .getsockopt = hci_sock_getsockopt,
1089 .connect = sock_no_connect,
1090 .socketpair = sock_no_socketpair,
1091 .accept = sock_no_accept,
1092 .mmap = sock_no_mmap
1093};
1094
1095static struct proto hci_sk_proto = {
1096 .name = "HCI",
1097 .owner = THIS_MODULE,
1098 .obj_size = sizeof(struct hci_pinfo)
1099};
1100
Eric Paris3f378b62009-11-05 22:18:14 -08001101static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1102 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103{
1104 struct sock *sk;
1105
1106 BT_DBG("sock %p", sock);
1107
1108 if (sock->type != SOCK_RAW)
1109 return -ESOCKTNOSUPPORT;
1110
1111 sock->ops = &hci_sock_ops;
1112
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001113 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 if (!sk)
1115 return -ENOMEM;
1116
1117 sock_init_data(sock, sk);
1118
1119 sock_reset_flag(sk, SOCK_ZAPPED);
1120
1121 sk->sk_protocol = protocol;
1122
1123 sock->state = SS_UNCONNECTED;
1124 sk->sk_state = BT_OPEN;
1125
1126 bt_sock_link(&hci_sk_list, sk);
1127 return 0;
1128}
1129
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001130static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 .family = PF_BLUETOOTH,
1132 .owner = THIS_MODULE,
1133 .create = hci_sock_create,
1134};
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136int __init hci_sock_init(void)
1137{
1138 int err;
1139
1140 err = proto_register(&hci_sk_proto, 0);
1141 if (err < 0)
1142 return err;
1143
1144 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001145 if (err < 0) {
1146 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001148 }
1149
Al Virob0316612013-04-04 19:14:33 -04001150 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001151 if (err < 0) {
1152 BT_ERR("Failed to create HCI proc file");
1153 bt_sock_unregister(BTPROTO_HCI);
1154 goto error;
1155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 BT_INFO("HCI socket layer initialized");
1158
1159 return 0;
1160
1161error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 proto_unregister(&hci_sk_proto);
1163 return err;
1164}
1165
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301166void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001168 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001169 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171}