blob: cb43a9caf45c68db01697740238755c21f024242 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18
19#include <linux/skbuff.h>
20#include <linux/list.h>
21#include <linux/workqueue.h>
22#include <linux/timer.h>
23
24#include <linux/crypto.h>
25#include <linux/scatterlist.h>
26#include <linux/err.h>
27#include <crypto/hash.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/amp.h>
33
34static struct workqueue_struct *amp_workqueue;
35
36LIST_HEAD(amp_mgr_list);
37DEFINE_RWLOCK(amp_mgr_list_lock);
38
39static int send_a2mp(struct socket *sock, u8 *data, int len);
40
41static void ctx_timeout(unsigned long data);
42
43static void launch_ctx(struct amp_mgr *mgr);
44static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
45static int kill_ctx(struct amp_ctx *ctx);
46static int cancel_ctx(struct amp_ctx *ctx);
47
48static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
49
50static void remove_amp_mgr(struct amp_mgr *mgr)
51{
52 BT_DBG("mgr %p", mgr);
53
54 write_lock_bh(&amp_mgr_list_lock);
55 list_del(&mgr->list);
56 write_unlock_bh(&amp_mgr_list_lock);
57
58 read_lock_bh(&mgr->ctx_list_lock);
59 while (!list_empty(&mgr->ctx_list)) {
60 struct amp_ctx *ctx;
61 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
62 read_unlock_bh(&mgr->ctx_list_lock);
63 BT_DBG("kill ctx %p", ctx);
64 kill_ctx(ctx);
65 read_lock_bh(&mgr->ctx_list_lock);
66 }
67 read_unlock_bh(&mgr->ctx_list_lock);
68
69 kfree(mgr->ctrls);
70
71 kfree(mgr);
72}
73
74static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
75{
76 struct amp_mgr *mgr;
77 struct amp_mgr *found = NULL;
78
79 read_lock_bh(&amp_mgr_list_lock);
80 list_for_each_entry(mgr, &amp_mgr_list, list) {
81 if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
82 found = mgr;
83 break;
84 }
85 }
86 read_unlock_bh(&amp_mgr_list_lock);
87 return found;
88}
89
90static struct amp_mgr *get_create_amp_mgr(struct l2cap_conn *conn,
91 struct sk_buff *skb)
92{
93 struct amp_mgr *mgr;
94
95 write_lock_bh(&amp_mgr_list_lock);
96 list_for_each_entry(mgr, &amp_mgr_list, list) {
97 if (mgr->l2cap_conn == conn) {
98 BT_DBG("conn %p found %p", conn, mgr);
99 goto gc_finished;
100 }
101 }
102
103 mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
104 if (!mgr)
105 goto gc_finished;
106
107 mgr->l2cap_conn = conn;
108 mgr->next_ident = 1;
109 INIT_LIST_HEAD(&mgr->ctx_list);
110 rwlock_init(&mgr->ctx_list_lock);
111 mgr->skb = skb;
112 BT_DBG("conn %p mgr %p", conn, mgr);
113 mgr->a2mp_sock = open_fixed_channel(conn->src, conn->dst);
114 if (!mgr->a2mp_sock) {
115 kfree(mgr);
116 goto gc_finished;
117 }
118 list_add(&(mgr->list), &amp_mgr_list);
119
120gc_finished:
121 write_unlock_bh(&amp_mgr_list_lock);
122 return mgr;
123}
124
125static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
126{
127 if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
128 return mgr->ctrls;
129 else
130 return NULL;
131}
132
133static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
134{
135 struct amp_ctrl *ctrl;
136
137 BT_DBG("mgr %p, id %d", mgr, id);
138 if ((mgr->ctrls) && (mgr->ctrls->id == id))
139 ctrl = mgr->ctrls;
140 else {
141 kfree(mgr->ctrls);
142 ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
143 if (ctrl) {
144 ctrl->mgr = mgr;
145 ctrl->id = id;
146 }
147 mgr->ctrls = ctrl;
148 }
149
150 return ctrl;
151}
152
153static struct amp_ctx *create_ctx(u8 type, u8 state)
154{
155 struct amp_ctx *ctx = NULL;
156
157 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
158 if (ctx) {
159 ctx->type = type;
160 ctx->state = state;
161 init_timer(&(ctx->timer));
162 ctx->timer.function = ctx_timeout;
163 ctx->timer.data = (unsigned long) ctx;
164 }
165 BT_DBG("ctx %p, type %d", ctx, type);
166 return ctx;
167}
168
169static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
170{
171 BT_DBG("ctx %p", ctx);
172 write_lock_bh(&mgr->ctx_list_lock);
173 list_add(&ctx->list, &mgr->ctx_list);
174 write_unlock_bh(&mgr->ctx_list_lock);
175 ctx->mgr = mgr;
176 execute_ctx(ctx, AMP_INIT, 0);
177}
178
179static void destroy_ctx(struct amp_ctx *ctx)
180{
181 struct amp_mgr *mgr = ctx->mgr;
182
183 BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
184 del_timer(&ctx->timer);
185 write_lock_bh(&mgr->ctx_list_lock);
186 list_del(&ctx->list);
187 write_unlock_bh(&mgr->ctx_list_lock);
188 if (ctx->deferred)
189 execute_ctx(ctx->deferred, AMP_INIT, 0);
190 kfree(ctx);
191}
192
193static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
194{
195 struct amp_ctx *fnd = NULL;
196 struct amp_ctx *ctx;
197
198 read_lock_bh(&mgr->ctx_list_lock);
199 list_for_each_entry(ctx, &mgr->ctx_list, list) {
200 if (ctx->type == type) {
201 fnd = ctx;
202 break;
203 }
204 }
205 read_unlock_bh(&mgr->ctx_list_lock);
206 return fnd;
207}
208
209static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
210{
211 struct amp_mgr *mgr = cur->mgr;
212 struct amp_ctx *fnd = NULL;
213 struct amp_ctx *ctx;
214
215 read_lock_bh(&mgr->ctx_list_lock);
216 list_for_each_entry(ctx, &mgr->ctx_list, list) {
217 if ((ctx->type == type) && (ctx != cur)) {
218 fnd = ctx;
219 break;
220 }
221 }
222 read_unlock_bh(&mgr->ctx_list_lock);
223 return fnd;
224}
225
226static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
227{
228 struct amp_ctx *fnd = NULL;
229 struct amp_ctx *ctx;
230
231 read_lock_bh(&mgr->ctx_list_lock);
232 list_for_each_entry(ctx, &mgr->ctx_list, list) {
233 if ((ctx->evt_type & AMP_A2MP_RSP) &&
234 (ctx->rsp_ident == ident)) {
235 fnd = ctx;
236 break;
237 }
238 }
239 read_unlock_bh(&mgr->ctx_list_lock);
240 return fnd;
241}
242
243static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
244 u16 evt_value)
245{
246 struct amp_mgr *mgr;
247 struct amp_ctx *fnd = NULL;
248
249 read_lock_bh(&amp_mgr_list_lock);
250 list_for_each_entry(mgr, &amp_mgr_list, list) {
251 struct amp_ctx *ctx;
252 read_lock_bh(&mgr->ctx_list_lock);
253 list_for_each_entry(ctx, &mgr->ctx_list, list) {
254 struct hci_dev *ctx_hdev;
255 ctx_hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
256 if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
257 switch (evt_type) {
258 case AMP_HCI_CMD_STATUS:
259 case AMP_HCI_CMD_CMPLT:
260 if (ctx->opcode == evt_value)
261 fnd = ctx;
262 break;
263 case AMP_HCI_EVENT:
264 if (ctx->evt_code == (u8) evt_value)
265 fnd = ctx;
266 break;
267 }
268 }
269 if (ctx_hdev)
270 hci_dev_put(ctx_hdev);
271
272 if (fnd)
273 break;
274 }
275 read_unlock_bh(&mgr->ctx_list_lock);
276 }
277 read_unlock_bh(&amp_mgr_list_lock);
278 return fnd;
279}
280
281static inline u8 next_ident(struct amp_mgr *mgr)
282{
283 if (++mgr->next_ident == 0)
284 mgr->next_ident = 1;
285 return mgr->next_ident;
286}
287
288static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
289 u16 len, void *data, u16 len2, void *data2)
290{
291 struct a2mp_cmd_hdr *hdr;
292 int plen;
293 u8 *p, *cmd;
294
295 BT_DBG("ident %d code 0x%02x", ident, code);
296 if (!mgr->a2mp_sock)
297 return;
298 plen = sizeof(*hdr) + len + len2;
299 cmd = kzalloc(plen, GFP_ATOMIC);
300 if (!cmd)
301 return;
302 hdr = (struct a2mp_cmd_hdr *) cmd;
303 hdr->code = code;
304 hdr->ident = ident;
305 hdr->len = cpu_to_le16(len+len2);
306 p = cmd + sizeof(*hdr);
307 memcpy(p, data, len);
308 p += len;
309 memcpy(p, data2, len2);
310 send_a2mp(mgr->a2mp_sock, cmd, plen);
311 kfree(cmd);
312}
313
314static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
315 u8 code, u16 len, void *data)
316{
317 send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
318}
319
320static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
321{
322 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
323 struct a2mp_cmd_rej *rej;
324 struct amp_ctx *ctx;
325
326 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
327 rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
328 if (skb->len < sizeof(*rej))
329 return -EINVAL;
330 BT_DBG("reason %d", le16_to_cpu(rej->reason));
331 ctx = get_ctx_a2mp(mgr, hdr->ident);
332 if (ctx)
333 kill_ctx(ctx);
334 skb_pull(skb, sizeof(*rej));
335 return 0;
336}
337
338static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
339 void *msg)
340{
341 struct a2mp_cl clist[16];
342 struct a2mp_cl *cl;
343 struct hci_dev *hdev;
344 int num_ctrls = 1, id;
345
346 cl = clist;
347 cl->id = 0;
348 cl->type = 0;
349 cl->status = 1;
350
351 for (id = 0; id < 16; ++id) {
352 hdev = hci_dev_get(id);
353 if (hdev) {
354 if ((hdev->amp_type != HCI_BREDR) &&
355 test_bit(HCI_UP, &hdev->flags)) {
356 (cl + num_ctrls)->id = HCI_A2MP_ID(hdev->id);
357 (cl + num_ctrls)->type = hdev->amp_type;
358 (cl + num_ctrls)->status = hdev->amp_status;
359 ++num_ctrls;
360 }
361 hci_dev_put(hdev);
362 }
363 }
364 send_a2mp_cmd2(mgr, ident, code, len, msg,
365 num_ctrls*sizeof(*cl), clist);
366
367 return 0;
368}
369
370static void send_a2mp_change_notify(void)
371{
372 struct amp_mgr *mgr;
373
374 read_lock_bh(&amp_mgr_list_lock);
375 list_for_each_entry(mgr, &amp_mgr_list, list) {
376 if (mgr->discovered)
377 send_a2mp_cl(mgr, next_ident(mgr),
378 A2MP_CHANGE_NOTIFY, 0, NULL);
379 }
380 read_unlock_bh(&amp_mgr_list_lock);
381}
382
383static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
384{
385 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
386 struct a2mp_discover_req *req;
387 u16 *efm;
388 struct a2mp_discover_rsp rsp;
389
390 req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
391 if (skb->len < sizeof(*req))
392 return -EINVAL;
393 efm = (u16 *) skb_pull(skb, sizeof(*req));
394
395 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
396 le16_to_cpu(req->ext_feat));
397
398 while (le16_to_cpu(req->ext_feat) & 0x8000) {
399 if (skb->len < sizeof(*efm))
400 return -EINVAL;
401 req->ext_feat = *efm;
402 BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
403 efm = (u16 *) skb_pull(skb, sizeof(*efm));
404 }
405
406 rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
407 rsp.ext_feat = 0;
408
409 mgr->discovered = 1;
410
411 return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
412 sizeof(rsp), &rsp);
413}
414
415static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
416{
417 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
418 struct a2mp_cl *cl;
419
420 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
421 while (skb->len >= sizeof(*cl)) {
422 struct amp_ctrl *ctrl;
423 if (cl->id != 0) {
424 ctrl = get_create_ctrl(mgr, cl->id);
425 if (ctrl != NULL) {
426 ctrl->type = cl->type;
427 ctrl->status = cl->status;
428 }
429 }
430 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
431 }
432
433 /* TODO find controllers in manager that were not on received */
434 /* controller list and destroy them */
435 send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
436
437 return 0;
438}
439
440static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
441{
442 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
443 u8 *data;
444 int id;
445 struct hci_dev *hdev;
446 struct a2mp_getinfo_rsp rsp;
447
448 data = (u8 *) skb_pull(skb, sizeof(*hdr));
449 if (le16_to_cpu(hdr->len) < sizeof(*data))
450 return -EINVAL;
451 if (skb->len < sizeof(*data))
452 return -EINVAL;
453 id = *data;
454 skb_pull(skb, sizeof(*data));
455 rsp.id = id;
456 rsp.status = 1;
457
458 BT_DBG("id %d", id);
459 hdev = hci_dev_get(A2MP_HCI_ID(id));
460
461 if (hdev && hdev->amp_type != HCI_BREDR) {
462 rsp.status = 0;
463 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
464 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
465 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
466 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
467 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
468 }
469
470 send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
471
472 if (hdev)
473 hci_dev_put(hdev);
474
475 return 0;
476}
477
478static void create_physical(struct l2cap_conn *conn, struct sock *sk)
479{
480 struct amp_mgr *mgr;
481 struct amp_ctx *ctx = NULL;
482
483 BT_DBG("conn %p", conn);
484 mgr = get_create_amp_mgr(conn, NULL);
485 if (!mgr)
486 goto cp_finished;
487 BT_DBG("mgr %p", mgr);
488 ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
489 if (!ctx)
490 goto cp_finished;
491 ctx->sk = sk;
492 sock_hold(sk);
493 start_ctx(mgr, ctx);
494 return;
495
496cp_finished:
497 l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
498}
499
500static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
501{
502 struct amp_mgr *mgr;
503 struct hci_dev *hdev;
504 struct hci_conn *conn;
505 struct amp_ctx *aplctx = NULL;
506 u8 remote_id = 0;
507 int result = -EINVAL;
508
509 BT_DBG("lcon %p", lcon);
510 mgr = get_create_amp_mgr(lcon, NULL);
511 if (!mgr)
512 goto ap_finished;
513 BT_DBG("mgr %p", mgr);
514 hdev = hci_dev_get(A2MP_HCI_ID(id));
515 if (!hdev)
516 goto ap_finished;
517 BT_DBG("hdev %p", hdev);
518 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
519 &mgr->l2cap_conn->hcon->dst);
520 if (conn) {
521 BT_DBG("conn %p", hdev);
522 result = 0;
523 remote_id = conn->dst_id;
524 goto ap_finished;
525 }
526 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
527 if (!aplctx)
528 goto ap_finished;
529 aplctx->sk = sk;
530 sock_hold(sk);
531 return;
532
533ap_finished:
534 l2cap_amp_physical_complete(result, id, remote_id, sk);
535}
536
537static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
538{
539 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
540 struct amp_ctx *ctx;
541 struct a2mp_getampassoc_req *req;
542
543 if (hdr->len < sizeof(*req))
544 return -EINVAL;
545 req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
546 skb_pull(skb, sizeof(*req));
547
548 ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
549 if (!ctx)
550 return -ENOMEM;
551 ctx->id = req->id;
552 ctx->d.gaa.req_ident = hdr->ident;
553 ctx->hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
554 if (ctx->hdev)
555 ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
556 GFP_ATOMIC);
557 start_ctx(mgr, ctx);
558 return 0;
559}
560
561static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
562{
563 struct sk_buff *skb = (struct sk_buff *) data;
564 struct hci_cp_read_local_amp_assoc cp;
565 struct hci_rp_read_local_amp_assoc *rp;
566 struct a2mp_getampassoc_rsp rsp;
567 u16 rem_len;
568 u16 frag_len;
569
570 rsp.status = 1;
571 if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
572 goto gaa_finished;
573
574 switch (ctx->state) {
575 case AMP_GAA_INIT:
576 ctx->state = AMP_GAA_RLAA_COMPLETE;
577 ctx->evt_type = AMP_HCI_CMD_CMPLT;
578 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
579 ctx->d.gaa.len_so_far = 0;
580 cp.phy_handle = 0;
581 cp.len_so_far = 0;
582 cp.max_len = ctx->hdev->amp_assoc_size;
583 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
584 break;
585
586 case AMP_GAA_RLAA_COMPLETE:
587 if (skb->len < 4)
588 goto gaa_finished;
589 rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
590 if (rp->status)
591 goto gaa_finished;
592 rem_len = le16_to_cpu(rp->rem_len);
593 skb_pull(skb, 4);
594 frag_len = skb->len;
595
596 if (ctx->d.gaa.len_so_far + rem_len <=
597 ctx->hdev->amp_assoc_size) {
598 struct hci_cp_read_local_amp_assoc cp;
599 u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
600 memcpy(assoc, rp->frag, frag_len);
601 ctx->d.gaa.len_so_far += rem_len;
602 rem_len -= frag_len;
603 if (rem_len == 0) {
604 rsp.status = 0;
605 goto gaa_finished;
606 }
607 /* more assoc data to read */
608 cp.phy_handle = 0;
609 cp.len_so_far = ctx->d.gaa.len_so_far;
610 cp.max_len = ctx->hdev->amp_assoc_size;
611 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
612 }
613 break;
614
615 default:
616 goto gaa_finished;
617 break;
618 }
619 return 0;
620
621gaa_finished:
622 rsp.id = ctx->id;
623 send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
624 sizeof(rsp), &rsp,
625 ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
626 kfree(ctx->d.gaa.assoc);
627 if (ctx->hdev)
628 hci_dev_put(ctx->hdev);
629 return 1;
630}
631
632struct hmac_sha256_result {
633 struct completion completion;
634 int err;
635};
636
637static void hmac_sha256_final(struct crypto_async_request *req, int err)
638{
639 struct hmac_sha256_result *r = req->data;
640 if (err == -EINPROGRESS)
641 return;
642 r->err = err;
643 complete(&r->completion);
644}
645
646int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
647 u8 *output, u8 outlen)
648{
649 int ret = 0;
650 struct crypto_ahash *tfm;
651 struct scatterlist sg;
652 struct ahash_request *req;
653 struct hmac_sha256_result tresult;
654 void *hash_buff = NULL;
655
656 unsigned char hash_result[64];
657 int i;
658
659 memset(output, 0, outlen);
660
661 init_completion(&tresult.completion);
662
663 tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
664 CRYPTO_ALG_TYPE_AHASH_MASK);
665 if (IS_ERR(tfm)) {
666 BT_DBG("crypto_alloc_ahash failed");
667 ret = PTR_ERR(tfm);
668 goto err_tfm;
669 }
670
671 req = ahash_request_alloc(tfm, GFP_KERNEL);
672 if (!req) {
673 BT_DBG("failed to allocate request for hmac(sha256)");
674 ret = -ENOMEM;
675 goto err_req;
676 }
677
678 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
679 hmac_sha256_final, &tresult);
680
681 hash_buff = kzalloc(psize, GFP_KERNEL);
682 if (!hash_buff) {
683 BT_DBG("failed to kzalloc hash_buff");
684 ret = -ENOMEM;
685 goto err_hash_buf;
686 }
687
688 memset(hash_result, 0, 64);
689 memcpy(hash_buff, plaintext, psize);
690 sg_init_one(&sg, hash_buff, psize);
691
692 if (ksize) {
693 crypto_ahash_clear_flags(tfm, ~0);
694 ret = crypto_ahash_setkey(tfm, key, ksize);
695
696 if (ret) {
697 BT_DBG("crypto_ahash_setkey failed");
698 goto err_setkey;
699 }
700 }
701
702 ahash_request_set_crypt(req, &sg, hash_result, psize);
703 ret = crypto_ahash_digest(req);
704
705 BT_DBG("ret 0x%x", ret);
706
707 switch (ret) {
708 case 0:
709 for (i = 0; i < outlen; i++)
710 output[i] = hash_result[i];
711 break;
712 case -EINPROGRESS:
713 case -EBUSY:
714 ret = wait_for_completion_interruptible(&tresult.completion);
715 if (!ret && !tresult.err) {
716 INIT_COMPLETION(tresult.completion);
717 break;
718 } else {
719 BT_DBG("wait_for_completion_interruptible failed");
720 if (!ret)
721 ret = tresult.err;
722 goto out;
723 }
724 default:
725 goto out;
726 }
727
728out:
729err_setkey:
730 kfree(hash_buff);
731err_hash_buf:
732 ahash_request_free(req);
733err_req:
734 crypto_free_ahash(tfm);
735err_tfm:
736 return ret;
737}
738
739static void show_key(u8 *k)
740{
741 int i = 0;
742 for (i = 0; i < 32; i += 8)
743 BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
744 *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
745 *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
746}
747
748static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
749{
750 u8 bt2_key[32];
751 u8 gamp_key[32];
752 u8 b802_key[32];
753 int result;
754
755 if (!hci_conn_check_link_mode(conn))
756 return -EACCES;
757
758 BT_DBG("key_type %d", conn->key_type);
759 if (conn->key_type < 3)
760 return -EACCES;
761
762 *type = conn->key_type;
763 *len = 32;
764 memcpy(&bt2_key[0], conn->link_key, 16);
765 memcpy(&bt2_key[16], conn->link_key, 16);
766 result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
767 if (result)
768 goto ps_finished;
769
770 if (conn->key_type == 3) {
771 BT_DBG("gamp_key");
772 show_key(gamp_key);
773 memcpy(data, gamp_key, 32);
774 goto ps_finished;
775 }
776
777 result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
778 if (result)
779 goto ps_finished;
780
781 BT_DBG("802b_key");
782 show_key(b802_key);
783 memcpy(data, b802_key, 32);
784
785ps_finished:
786 return result;
787}
788
789static u8 amp_next_handle;
790static inline u8 physlink_handle(struct hci_dev *hdev)
791{
792 /* TODO amp_next_handle should be part of hci_dev */
793 if (amp_next_handle == 0)
794 amp_next_handle = 1;
795 return amp_next_handle++;
796}
797
798/* Start an Accept Physical Link sequence */
799static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
800{
801 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
802 struct amp_ctx *ctx = NULL;
803 struct a2mp_createphyslink_req *req;
804
805 if (hdr->len < sizeof(*req))
806 return -EINVAL;
807 req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
808 skb_pull(skb, sizeof(*req));
809 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
810
811 /* initialize the context */
812 ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
813 if (!ctx)
814 return -ENOMEM;
815 ctx->d.apl.req_ident = hdr->ident;
816 ctx->d.apl.remote_id = req->local_id;
817 ctx->id = req->remote_id;
818
819 /* add the supplied remote assoc to the context */
820 ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
821 if (ctx->d.apl.remote_assoc)
822 memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
823 ctx->d.apl.len_so_far = 0;
824 ctx->d.apl.rem_len = skb->len;
825 skb_pull(skb, skb->len);
826 ctx->hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
827 start_ctx(mgr, ctx);
828 return 0;
829}
830
831static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
832{
833 struct sk_buff *skb = data;
834 struct hci_cp_accept_phys_link acp;
835 struct hci_cp_write_remote_amp_assoc wcp;
836 struct hci_rp_write_remote_amp_assoc *wrp;
837 struct hci_ev_cmd_status *cs = data;
838 struct hci_ev_phys_link_complete *ev;
839 struct a2mp_createphyslink_rsp rsp;
840 struct amp_ctx *cplctx;
841 struct amp_ctx *aplctx;
842 u16 frag_len;
843 struct hci_conn *conn;
844 int result;
845
846 BT_DBG("state %d", ctx->state);
847 result = -EINVAL;
848 rsp.status = 1; /* Invalid Controller ID */
849 if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
850 goto apl_finished;
851 if (evt_type == AMP_KILLED) {
852 result = -EAGAIN;
853 rsp.status = 4; /* Disconnect request received */
854 goto apl_finished;
855 }
856 if (!ctx->d.apl.remote_assoc) {
857 result = -ENOMEM;
858 rsp.status = 2; /* Unable to Start */
859 goto apl_finished;
860 }
861
862 switch (ctx->state) {
863 case AMP_APL_INIT:
864 BT_DBG("local_id %d, remote_id %d",
865 ctx->id, ctx->d.apl.remote_id);
866 conn = hci_conn_hash_lookup_id(ctx->hdev,
867 &ctx->mgr->l2cap_conn->hcon->dst,
868 ctx->d.apl.remote_id);
869 if (conn) {
870 result = -EEXIST;
871 rsp.status = 5; /* Already Exists */
872 goto apl_finished;
873 }
874
875 aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
876 if ((aplctx) &&
877 (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
878 BT_DBG("deferred to %p", aplctx);
879 aplctx->deferred = ctx;
880 break;
881 }
882
883 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
884 if ((cplctx) &&
885 (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
886 struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
887 BT_DBG("local %s remote %s",
888 batostr(&bcon->hdev->bdaddr),
889 batostr(&bcon->dst));
890 if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
891 (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
892 BT_DBG("COLLISION LOSER");
893 cplctx->deferred = ctx;
894 cancel_ctx(cplctx);
895 break;
896 } else {
897 BT_DBG("COLLISION WINNER");
898 result = -EISCONN;
899 rsp.status = 3; /* Collision */
900 goto apl_finished;
901 }
902 }
903
904 result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
905 &acp.key_len, &acp.type);
906 if (result) {
907 BT_DBG("SECURITY");
908 rsp.status = 6; /* Security Violation */
909 goto apl_finished;
910 }
911
912 ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
913 ctx->state = AMP_APL_APL_STATUS;
914 ctx->evt_type = AMP_HCI_CMD_STATUS;
915 ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
916 acp.phy_handle = ctx->d.apl.phy_handle;
917 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
918 break;
919
920 case AMP_APL_APL_STATUS:
921 if (cs->status != 0)
922 goto apl_finished;
923 /* PAL will accept link, send a2mp response */
924 rsp.local_id = ctx->id;
925 rsp.remote_id = ctx->d.apl.remote_id;
926 rsp.status = 0;
927 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
928 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
929
930 /* send the first assoc fragment */
931 wcp.phy_handle = ctx->d.apl.phy_handle;
932 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
933 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
934 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
935 memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
936 ctx->state = AMP_APL_WRA_COMPLETE;
937 ctx->evt_type = AMP_HCI_CMD_CMPLT;
938 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
939 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
940 break;
941
942 case AMP_APL_WRA_COMPLETE:
943 /* received write remote amp assoc command complete event */
944 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
945 if (wrp->status != 0)
946 goto apl_finished;
947 if (wrp->phy_handle != ctx->d.apl.phy_handle)
948 goto apl_finished;
949 /* update progress */
950 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
951 ctx->d.apl.len_so_far += frag_len;
952 ctx->d.apl.rem_len -= frag_len;
953 if (ctx->d.apl.rem_len > 0) {
954 u8 *assoc;
955 /* another assoc fragment to send */
956 wcp.phy_handle = ctx->d.apl.phy_handle;
957 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
958 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
959 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
960 assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
961 memcpy(wcp.frag, assoc, frag_len);
962 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
963 break;
964 }
965 /* wait for physical link complete event */
966 ctx->state = AMP_APL_PL_COMPLETE;
967 ctx->evt_type = AMP_HCI_EVENT;
968 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
969 break;
970
971 case AMP_APL_PL_COMPLETE:
972 /* physical link complete event received */
973 if (skb->len < sizeof(*ev))
974 goto apl_finished;
975 ev = (struct hci_ev_phys_link_complete *) skb->data;
976 if (ev->phy_handle != ctx->d.apl.phy_handle)
977 break;
978 if (ev->status != 0)
979 goto apl_finished;
980 conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
981 if (!conn)
982 goto apl_finished;
983 result = 0;
984 BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
985 conn->dst_id = ctx->d.apl.remote_id;
986 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
987 goto apl_finished;
988 break;
989
990 default:
991 goto apl_finished;
992 break;
993 }
994 return 0;
995
996apl_finished:
997 if (ctx->sk)
998 l2cap_amp_physical_complete(result, ctx->id,
999 ctx->d.apl.remote_id, ctx->sk);
1000 if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
1001 rsp.local_id = ctx->id;
1002 rsp.remote_id = ctx->d.apl.remote_id;
1003 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
1004 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
1005 }
1006 kfree(ctx->d.apl.remote_assoc);
1007 if (ctx->sk)
1008 sock_put(ctx->sk);
1009 if (ctx->hdev)
1010 hci_dev_put(ctx->hdev);
1011 return 1;
1012}
1013
1014static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
1015{
1016 struct hci_cp_disconn_phys_link dcp;
1017
1018 ctx->state = AMP_CPL_PL_CANCEL;
1019 ctx->evt_type = AMP_HCI_EVENT;
1020 ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
1021 dcp.phy_handle = ctx->d.cpl.phy_handle;
1022 dcp.reason = reason;
1023 hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
1024}
1025
1026static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
1027{
1028 struct amp_ctrl *ctrl;
1029 struct sk_buff *skb = data;
1030 struct a2mp_cmd_hdr *hdr;
1031 struct hci_ev_cmd_status *cs = data;
1032 struct amp_ctx *cplctx;
1033 struct a2mp_discover_req dreq;
1034 struct a2mp_discover_rsp *drsp;
1035 u16 *efm;
1036 struct a2mp_getinfo_req greq;
1037 struct a2mp_getinfo_rsp *grsp;
1038 struct a2mp_cl *cl;
1039 struct a2mp_getampassoc_req areq;
1040 struct a2mp_getampassoc_rsp *arsp;
1041 struct hci_cp_create_phys_link cp;
1042 struct hci_cp_write_remote_amp_assoc wcp;
1043 struct hci_rp_write_remote_amp_assoc *wrp;
1044 struct hci_ev_channel_selected *cev;
1045 struct hci_cp_read_local_amp_assoc rcp;
1046 struct hci_rp_read_local_amp_assoc *rrp;
1047 struct a2mp_createphyslink_req creq;
1048 struct a2mp_createphyslink_rsp *crsp;
1049 struct hci_ev_phys_link_complete *pev;
1050 struct hci_ev_disconn_phys_link_complete *dev;
1051 u8 *assoc, *rassoc, *lassoc;
1052 u16 frag_len;
1053 u16 rem_len;
1054 int result = -EAGAIN;
1055 struct hci_conn *conn;
1056
1057 BT_DBG("state %d", ctx->state);
1058 if (evt_type == AMP_KILLED)
1059 goto cpl_finished;
1060
1061 if (evt_type == AMP_CANCEL) {
1062 if ((ctx->state < AMP_CPL_CPL_STATUS) ||
1063 ((ctx->state == AMP_CPL_PL_COMPLETE) &&
1064 !(ctx->evt_type & AMP_HCI_EVENT)))
1065 goto cpl_finished;
1066
1067 cancel_cpl_ctx(ctx, 0x16);
1068 return 0;
1069 }
1070
1071 switch (ctx->state) {
1072 case AMP_CPL_INIT:
1073 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
1074 if (cplctx) {
1075 BT_DBG("deferred to %p", cplctx);
1076 cplctx->deferred = ctx;
1077 break;
1078 }
1079 ctx->state = AMP_CPL_DISC_RSP;
1080 ctx->evt_type = AMP_A2MP_RSP;
1081 ctx->rsp_ident = next_ident(ctx->mgr);
1082 dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
1083 dreq.ext_feat = 0;
1084 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
1085 sizeof(dreq), &dreq);
1086 break;
1087
1088 case AMP_CPL_DISC_RSP:
1089 drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
1090 if (skb->len < (sizeof(*drsp))) {
1091 result = -EINVAL;
1092 goto cpl_finished;
1093 }
1094
1095 efm = (u16 *) skb_pull(skb, sizeof(*drsp));
1096 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
1097 le16_to_cpu(drsp->ext_feat));
1098
1099 while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
1100 if (skb->len < sizeof(*efm)) {
1101 result = -EINVAL;
1102 goto cpl_finished;
1103 }
1104 drsp->ext_feat = *efm;
1105 BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
1106 efm = (u16 *) skb_pull(skb, sizeof(*efm));
1107 }
1108 cl = (struct a2mp_cl *) efm;
1109
1110 /* find the first remote and local controller with the
1111 * same type
1112 */
1113 greq.id = 0;
1114 result = -ENODEV;
1115 while (skb->len >= sizeof(*cl)) {
1116 if ((cl->id != 0) && (greq.id == 0)) {
1117 struct hci_dev *hdev;
1118 hdev = hci_dev_get_type(cl->type);
1119 if (hdev) {
1120 struct hci_conn *conn;
1121 ctx->hdev = hdev;
1122 ctx->id = HCI_A2MP_ID(hdev->id);
1123 ctx->d.cpl.remote_id = cl->id;
1124 conn = hci_conn_hash_lookup_ba(hdev,
1125 ACL_LINK,
1126 &ctx->mgr->l2cap_conn->hcon->dst);
1127 if (conn) {
1128 BT_DBG("PL_COMPLETE exists %x",
1129 (int) conn->handle);
1130 result = 0;
1131 }
1132 ctrl = get_create_ctrl(ctx->mgr,
1133 cl->id);
1134 if (ctrl) {
1135 ctrl->type = cl->type;
1136 ctrl->status = cl->status;
1137 }
1138 greq.id = cl->id;
1139 }
1140 }
1141 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
1142 }
1143 if ((!greq.id) || (!result))
1144 goto cpl_finished;
1145 ctx->state = AMP_CPL_GETINFO_RSP;
1146 ctx->evt_type = AMP_A2MP_RSP;
1147 ctx->rsp_ident = next_ident(ctx->mgr);
1148 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
1149 sizeof(greq), &greq);
1150 break;
1151
1152 case AMP_CPL_GETINFO_RSP:
1153 if (skb->len < sizeof(*grsp))
1154 goto cpl_finished;
1155 grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
1156 if (grsp->status)
1157 goto cpl_finished;
1158 if (grsp->id != ctx->d.cpl.remote_id)
1159 goto cpl_finished;
1160 ctrl = get_ctrl(ctx->mgr, grsp->id);
1161 if (!ctrl)
1162 goto cpl_finished;
1163 ctrl->status = grsp->status;
1164 ctrl->total_bw = le32_to_cpu(grsp->total_bw);
1165 ctrl->max_bw = le32_to_cpu(grsp->max_bw);
1166 ctrl->min_latency = le32_to_cpu(grsp->min_latency);
1167 ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
1168 ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
1169 skb_pull(skb, sizeof(*grsp));
1170
1171 ctx->d.cpl.max_len = ctrl->max_assoc_size;
1172
1173 /* setup up GAA request */
1174 areq.id = ctx->d.cpl.remote_id;
1175
1176 /* advance context state */
1177 ctx->state = AMP_CPL_GAA_RSP;
1178 ctx->evt_type = AMP_A2MP_RSP;
1179 ctx->rsp_ident = next_ident(ctx->mgr);
1180 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
1181 sizeof(areq), &areq);
1182 break;
1183
1184 case AMP_CPL_GAA_RSP:
1185 if (skb->len < sizeof(*arsp))
1186 goto cpl_finished;
1187 hdr = (void *) skb->data;
1188 arsp = (void *) skb_pull(skb, sizeof(*hdr));
1189 if (arsp->id != ctx->d.cpl.remote_id)
1190 goto cpl_finished;
1191 if (arsp->status != 0)
1192 goto cpl_finished;
1193
1194 /* store away remote assoc */
1195 assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
1196 ctx->d.cpl.len_so_far = 0;
1197 ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
1198 rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
1199 if (!rassoc)
1200 goto cpl_finished;
1201 memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
1202 ctx->d.cpl.remote_assoc = rassoc;
1203 skb_pull(skb, ctx->d.cpl.rem_len);
1204
1205 /* set up CPL command */
1206 ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
1207 cp.phy_handle = ctx->d.cpl.phy_handle;
1208 if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
1209 &cp.key_len, &cp.type)) {
1210 result = -EPERM;
1211 goto cpl_finished;
1212 }
1213
1214 /* advance context state */
1215 ctx->state = AMP_CPL_CPL_STATUS;
1216 ctx->evt_type = AMP_HCI_CMD_STATUS;
1217 ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
1218 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
1219 break;
1220
1221 case AMP_CPL_CPL_STATUS:
1222 /* received create physical link command status */
1223 if (cs->status != 0)
1224 goto cpl_finished;
1225 /* send the first assoc fragment */
1226 wcp.phy_handle = ctx->d.cpl.phy_handle;
1227 wcp.len_so_far = ctx->d.cpl.len_so_far;
1228 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1229 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1230 memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
1231 ctx->state = AMP_CPL_WRA_COMPLETE;
1232 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1233 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
1234 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1235 break;
1236
1237 case AMP_CPL_WRA_COMPLETE:
1238 /* received write remote amp assoc command complete event */
1239 if (skb->len < sizeof(*wrp))
1240 goto cpl_finished;
1241 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
1242 if (wrp->status != 0)
1243 goto cpl_finished;
1244 if (wrp->phy_handle != ctx->d.cpl.phy_handle)
1245 goto cpl_finished;
1246
1247 /* update progress */
1248 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1249 ctx->d.cpl.len_so_far += frag_len;
1250 ctx->d.cpl.rem_len -= frag_len;
1251 if (ctx->d.cpl.rem_len > 0) {
1252 /* another assoc fragment to send */
1253 wcp.phy_handle = ctx->d.cpl.phy_handle;
1254 wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
1255 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1256 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1257 memcpy(wcp.frag,
1258 ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
1259 frag_len);
1260 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1261 break;
1262 }
1263 /* now wait for channel selected event */
1264 ctx->state = AMP_CPL_CHANNEL_SELECT;
1265 ctx->evt_type = AMP_HCI_EVENT;
1266 ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
1267 break;
1268
1269 case AMP_CPL_CHANNEL_SELECT:
1270 /* received channel selection event */
1271 if (skb->len < sizeof(*cev))
1272 goto cpl_finished;
1273 cev = (void *) skb->data;
1274/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
1275 Create Physical Link collision scenario
1276 if (cev->phy_handle != ctx->d.cpl.phy_handle)
1277 goto cpl_finished;
1278*/
1279
1280 /* request the first local assoc fragment */
1281 rcp.phy_handle = ctx->d.cpl.phy_handle;
1282 rcp.len_so_far = 0;
1283 rcp.max_len = ctx->d.cpl.max_len;
1284 lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
1285 if (!lassoc)
1286 goto cpl_finished;
1287 ctx->d.cpl.local_assoc = lassoc;
1288 ctx->d.cpl.len_so_far = 0;
1289 ctx->state = AMP_CPL_RLA_COMPLETE;
1290 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1291 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
1292 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1293 break;
1294
1295 case AMP_CPL_RLA_COMPLETE:
1296 /* received read local amp assoc command complete event */
1297 if (skb->len < 4)
1298 goto cpl_finished;
1299 rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
1300 if (rrp->status)
1301 goto cpl_finished;
1302 if (rrp->phy_handle != ctx->d.cpl.phy_handle)
1303 goto cpl_finished;
1304 rem_len = le16_to_cpu(rrp->rem_len);
1305 skb_pull(skb, 4);
1306 frag_len = skb->len;
1307
1308 if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
1309 goto cpl_finished;
1310
1311 /* save this fragment in context */
1312 lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
1313 memcpy(lassoc, rrp->frag, frag_len);
1314 ctx->d.cpl.len_so_far += frag_len;
1315 rem_len -= frag_len;
1316 if (rem_len > 0) {
1317 /* request another local assoc fragment */
1318 rcp.phy_handle = ctx->d.cpl.phy_handle;
1319 rcp.len_so_far = ctx->d.cpl.len_so_far;
1320 rcp.max_len = ctx->d.cpl.max_len;
1321 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1322 } else {
1323 creq.local_id = ctx->id;
1324 creq.remote_id = ctx->d.cpl.remote_id;
1325 /* wait for A2MP rsp AND phys link complete event */
1326 ctx->state = AMP_CPL_PL_COMPLETE;
1327 ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
1328 ctx->rsp_ident = next_ident(ctx->mgr);
1329 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
1330 send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
1331 A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
1332 ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
1333 }
1334 break;
1335
1336 case AMP_CPL_PL_COMPLETE:
1337 if (evt_type == AMP_A2MP_RSP) {
1338 /* create physical link response received */
1339 ctx->evt_type &= ~AMP_A2MP_RSP;
1340 if (skb->len < sizeof(*crsp))
1341 goto cpl_finished;
1342 crsp = (void *) skb_pull(skb, sizeof(*hdr));
1343 if ((crsp->local_id != ctx->d.cpl.remote_id) ||
1344 (crsp->remote_id != ctx->id) ||
1345 (crsp->status != 0)) {
1346 cancel_cpl_ctx(ctx, 0x13);
1347 break;
1348 }
1349
1350 /* notify Qualcomm PAL */
1351 if (ctx->hdev->manufacturer == 0x001d)
1352 hci_send_cmd(ctx->hdev,
1353 hci_opcode_pack(0x3f, 0x00), 0, NULL);
1354 }
1355 if (evt_type == AMP_HCI_EVENT) {
1356 ctx->evt_type &= ~AMP_HCI_EVENT;
1357 /* physical link complete event received */
1358 if (skb->len < sizeof(*pev))
1359 goto cpl_finished;
1360 pev = (void *) skb->data;
1361 if (pev->phy_handle != ctx->d.cpl.phy_handle)
1362 break;
1363 if (pev->status != 0)
1364 goto cpl_finished;
1365 }
1366 if (ctx->evt_type)
1367 break;
1368 conn = hci_conn_hash_lookup_handle(ctx->hdev,
1369 ctx->d.cpl.phy_handle);
1370 if (!conn)
1371 goto cpl_finished;
1372 result = 0;
1373 BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
1374 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
1375 conn->dst_id = ctx->d.cpl.remote_id;
1376 conn->out = 1;
1377 goto cpl_finished;
1378 break;
1379
1380 case AMP_CPL_PL_CANCEL:
1381 dev = (void *) skb->data;
1382 BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
1383 result = -EISCONN;
1384 goto cpl_finished;
1385 break;
1386
1387 default:
1388 goto cpl_finished;
1389 break;
1390 }
1391 return 0;
1392
1393cpl_finished:
1394 l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
1395 ctx->sk);
1396 if (ctx->sk)
1397 sock_put(ctx->sk);
1398 if (ctx->hdev)
1399 hci_dev_put(ctx->hdev);
1400 kfree(ctx->d.cpl.remote_assoc);
1401 kfree(ctx->d.cpl.local_assoc);
1402 return 1;
1403}
1404
1405static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
1406{
1407 struct a2mp_cmd_hdr *hdr = (void *) skb->data;
1408 struct a2mp_disconnphyslink_req *req;
1409 struct a2mp_disconnphyslink_rsp rsp;
1410 struct hci_dev *hdev;
1411 struct hci_conn *conn;
1412 struct amp_ctx *aplctx;
1413
1414 BT_DBG("mgr %p skb %p", mgr, skb);
1415 if (hdr->len < sizeof(*req))
1416 return -EINVAL;
1417 req = (void *) skb_pull(skb, sizeof(*hdr));
1418 skb_pull(skb, sizeof(*req));
1419
1420 rsp.local_id = req->remote_id;
1421 rsp.remote_id = req->local_id;
1422 rsp.status = 0;
1423 BT_DBG("local_id %d remote_id %d",
1424 (int) rsp.local_id, (int) rsp.remote_id);
1425 hdev = hci_dev_get(A2MP_HCI_ID(rsp.local_id));
1426 if (!hdev) {
1427 rsp.status = 1; /* Invalid Controller ID */
1428 goto dpl_finished;
1429 }
1430 BT_DBG("hdev %p", hdev);
1431 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1432 &mgr->l2cap_conn->hcon->dst);
1433 if (!conn) {
1434 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
1435 if (aplctx) {
1436 kill_ctx(aplctx);
1437 rsp.status = 0;
1438 goto dpl_finished;
1439 }
1440 rsp.status = 2; /* No Physical Link exists */
1441 goto dpl_finished;
1442 }
1443 BT_DBG("conn %p", conn);
1444 hci_disconnect(conn, 0x13);
1445
1446dpl_finished:
1447 send_a2mp_cmd(mgr, hdr->ident,
1448 A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
1449 if (hdev)
1450 hci_dev_put(hdev);
1451 return 0;
1452}
1453
1454static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
1455{
1456 struct amp_mgr *mgr = ctx->mgr;
1457 u8 finished = 0;
1458
1459 if (!mgr->connected)
1460 return 0;
1461
1462 switch (ctx->type) {
1463 case AMP_GETAMPASSOC:
1464 finished = getampassoc_handler(ctx, evt_type, data);
1465 break;
1466 case AMP_CREATEPHYSLINK:
1467 finished = createphyslink_handler(ctx, evt_type, data);
1468 break;
1469 case AMP_ACCEPTPHYSLINK:
1470 finished = acceptphyslink_handler(ctx, evt_type, data);
1471 break;
1472 }
1473
1474 if (!finished)
1475 mod_timer(&(ctx->timer), jiffies +
1476 msecs_to_jiffies(A2MP_RSP_TIMEOUT));
1477 else
1478 destroy_ctx(ctx);
1479 return finished;
1480}
1481
1482static int cancel_ctx(struct amp_ctx *ctx)
1483{
1484 return execute_ctx(ctx, AMP_CANCEL, 0);
1485}
1486
1487static int kill_ctx(struct amp_ctx *ctx)
1488{
1489 return execute_ctx(ctx, AMP_KILLED, 0);
1490}
1491
1492static void ctx_timeout_worker(struct work_struct *w)
1493{
1494 struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
1495 struct amp_ctx *ctx = work->ctx;
1496 kill_ctx(ctx);
1497 kfree(work);
1498}
1499
1500static void ctx_timeout(unsigned long data)
1501{
1502 struct amp_ctx *ctx = (struct amp_ctx *) data;
1503 struct amp_work_ctx_timeout *work;
1504
1505 BT_DBG("ctx %p", ctx);
1506 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1507 if (work) {
1508 INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
1509 work->ctx = ctx;
1510 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1511 kfree(work);
1512 }
1513}
1514
1515static void launch_ctx(struct amp_mgr *mgr)
1516{
1517 struct amp_ctx *ctx = NULL;
1518
1519 BT_DBG("mgr %p", mgr);
1520 read_lock_bh(&mgr->ctx_list_lock);
1521 if (!list_empty(&mgr->ctx_list))
1522 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
1523 read_unlock_bh(&mgr->ctx_list_lock);
1524 BT_DBG("ctx %p", ctx);
1525 if (ctx)
1526 execute_ctx(ctx, AMP_INIT, NULL);
1527}
1528
1529static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
1530{
1531 struct amp_ctx *ctx;
1532 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1533 u16 hdr_len = le16_to_cpu(hdr->len);
1534
1535 /* find context waiting for A2MP rsp with this rsp's identifier */
1536 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
1537 ctx = get_ctx_a2mp(mgr, hdr->ident);
1538 if (ctx) {
1539 execute_ctx(ctx, AMP_A2MP_RSP, skb);
1540 } else {
1541 BT_DBG("context not found");
1542 skb_pull(skb, sizeof(*hdr));
1543 if (hdr_len > skb->len)
1544 hdr_len = skb->len;
1545 skb_pull(skb, hdr_len);
1546 }
1547 return 0;
1548}
1549
1550/* L2CAP-A2MP interface */
1551
1552void a2mp_receive(struct sock *sk, struct sk_buff *skb)
1553{
1554 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1555 int len;
1556 int err = 0;
1557 struct amp_mgr *mgr;
1558
1559 mgr = get_amp_mgr_sk(sk);
1560 if (!mgr)
1561 goto a2mp_finished;
1562
1563 len = skb->len;
1564 while (len >= sizeof(*hdr)) {
1565 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1566 u16 clen = le16_to_cpu(hdr->len);
1567
1568 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
1569 if (clen > len || !hdr->ident) {
1570 err = -EINVAL;
1571 break;
1572 }
1573 switch (hdr->code) {
1574 case A2MP_COMMAND_REJ:
1575 command_rej(mgr, skb);
1576 break;
1577 case A2MP_DISCOVER_REQ:
1578 err = discover_req(mgr, skb);
1579 break;
1580 case A2MP_CHANGE_NOTIFY:
1581 err = change_notify(mgr, skb);
1582 break;
1583 case A2MP_GETINFO_REQ:
1584 err = getinfo_req(mgr, skb);
1585 break;
1586 case A2MP_GETAMPASSOC_REQ:
1587 err = getampassoc_req(mgr, skb);
1588 break;
1589 case A2MP_CREATEPHYSLINK_REQ:
1590 err = createphyslink_req(mgr, skb);
1591 break;
1592 case A2MP_DISCONNPHYSLINK_REQ:
1593 err = disconnphyslink_req(mgr, skb);
1594 break;
1595 case A2MP_CHANGE_RSP:
1596 case A2MP_DISCOVER_RSP:
1597 case A2MP_GETINFO_RSP:
1598 case A2MP_GETAMPASSOC_RSP:
1599 case A2MP_CREATEPHYSLINK_RSP:
1600 case A2MP_DISCONNPHYSLINK_RSP:
1601 err = a2mp_rsp(mgr, skb);
1602 break;
1603 default:
1604 BT_ERR("Unknown A2MP signaling command 0x%2.2x",
1605 hdr->code);
1606 skb_pull(skb, sizeof(*hdr));
1607 err = -EINVAL;
1608 break;
1609 }
1610 len = skb->len;
1611 }
1612
1613a2mp_finished:
1614 if (err && mgr) {
1615 struct a2mp_cmd_rej rej;
1616 rej.reason = cpu_to_le16(0);
1617 send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
1618 sizeof(rej), &rej);
1619 }
1620}
1621
1622/* L2CAP-A2MP interface */
1623
1624static int send_a2mp(struct socket *sock, u8 *data, int len)
1625{
1626 struct kvec iv = { data, len };
1627 struct msghdr msg;
1628
1629 memset(&msg, 0, sizeof(msg));
1630
1631 return kernel_sendmsg(sock, &msg, &iv, 1, len);
1632}
1633
1634static void data_ready_worker(struct work_struct *w)
1635{
1636 struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
1637 struct sock *sk = work->sk;
1638 struct sk_buff *skb;
1639
1640 /* skb_dequeue() is thread-safe */
1641 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1642 a2mp_receive(sk, skb);
1643 kfree_skb(skb);
1644 }
1645 sock_put(work->sk);
1646 kfree(work);
1647}
1648
1649static void data_ready(struct sock *sk, int bytes)
1650{
1651 struct amp_work_data_ready *work;
1652 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1653 if (work) {
1654 INIT_WORK((struct work_struct *) work, data_ready_worker);
1655 sock_hold(sk);
1656 work->sk = sk;
1657 work->bytes = bytes;
1658 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1659 kfree(work);
1660 sock_put(sk);
1661 }
1662 }
1663}
1664
1665static void state_change_worker(struct work_struct *w)
1666{
1667 struct amp_work_state_change *work = (struct amp_work_state_change *) w;
1668 struct amp_mgr *mgr;
1669 switch (work->sk->sk_state) {
1670 case BT_CONNECTED:
1671 /* socket is up */
1672 BT_DBG("CONNECTED");
1673 mgr = get_amp_mgr_sk(work->sk);
1674 if (mgr) {
1675 mgr->connected = 1;
1676 if (mgr->skb) {
1677 l2cap_recv_deferred_frame(work->sk, mgr->skb);
1678 mgr->skb = NULL;
1679 }
1680 launch_ctx(mgr);
1681 }
1682 break;
1683
1684 case BT_CLOSED:
1685 /* connection is gone */
1686 BT_DBG("CLOSED");
1687 mgr = get_amp_mgr_sk(work->sk);
1688 if (mgr) {
1689 if (!sock_flag(work->sk, SOCK_DEAD))
1690 sock_release(mgr->a2mp_sock);
1691 mgr->a2mp_sock = NULL;
1692 remove_amp_mgr(mgr);
1693 }
1694 break;
1695
1696 default:
1697 /* something else happened */
1698 break;
1699 }
1700 sock_put(work->sk);
1701 kfree(work);
1702}
1703
1704static void state_change(struct sock *sk)
1705{
1706 struct amp_work_state_change *work;
1707 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1708 if (work) {
1709 INIT_WORK((struct work_struct *) work, state_change_worker);
1710 sock_hold(sk);
1711 work->sk = sk;
1712 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1713 kfree(work);
1714 sock_put(sk);
1715 }
1716 }
1717}
1718
1719static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
1720{
1721 int err;
1722 struct socket *sock;
1723 struct sockaddr_l2 addr;
1724 struct sock *sk;
1725 struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
1726 L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
1727 L2CAP_MODE_ERTM, 1, 0xFF, 1};
1728
1729
1730 err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
1731 BTPROTO_L2CAP, &sock);
1732
1733 if (err) {
1734 BT_ERR("sock_create_kern failed %d", err);
1735 return NULL;
1736 }
1737
1738 sk = sock->sk;
1739 sk->sk_data_ready = data_ready;
1740 sk->sk_state_change = state_change;
1741
1742 memset(&addr, 0, sizeof(addr));
1743 bacpy(&addr.l2_bdaddr, src);
1744 addr.l2_family = AF_BLUETOOTH;
1745 addr.l2_cid = L2CAP_CID_A2MP;
1746 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1747 if (err) {
1748 BT_ERR("kernel_bind failed %d", err);
1749 sock_release(sock);
1750 return NULL;
1751 }
1752
1753 l2cap_fixed_channel_config(sk, &opts);
1754
1755 memset(&addr, 0, sizeof(addr));
1756 bacpy(&addr.l2_bdaddr, dst);
1757 addr.l2_family = AF_BLUETOOTH;
1758 addr.l2_cid = L2CAP_CID_A2MP;
1759 err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
1760 O_NONBLOCK);
1761 if ((err == 0) || (err == -EINPROGRESS))
1762 return sock;
1763 else {
1764 BT_ERR("kernel_connect failed %d", err);
1765 sock_release(sock);
1766 return NULL;
1767 }
1768}
1769
1770static void conn_ind_worker(struct work_struct *w)
1771{
1772 struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
1773 struct l2cap_conn *conn = work->conn;
1774 struct sk_buff *skb = work->skb;
1775 struct amp_mgr *mgr;
1776
1777 mgr = get_create_amp_mgr(conn, skb);
1778 BT_DBG("mgr %p", mgr);
1779 kfree(work);
1780}
1781
1782static void create_physical_worker(struct work_struct *w)
1783{
1784 struct amp_work_create_physical *work =
1785 (struct amp_work_create_physical *) w;
1786
1787 create_physical(work->conn, work->sk);
1788 sock_put(work->sk);
1789 kfree(work);
1790}
1791
1792static void accept_physical_worker(struct work_struct *w)
1793{
1794 struct amp_work_accept_physical *work =
1795 (struct amp_work_accept_physical *) w;
1796
1797 accept_physical(work->conn, work->id, work->sk);
1798 sock_put(work->sk);
1799 kfree(work);
1800}
1801
1802/* L2CAP Fixed Channel interface */
1803
1804void amp_conn_ind(struct l2cap_conn *conn, struct sk_buff *skb)
1805{
1806 struct amp_work_conn_ind *work;
1807 BT_DBG("conn %p, skb %p", conn, skb);
1808 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1809 if (work) {
1810 INIT_WORK((struct work_struct *) work, conn_ind_worker);
1811 work->conn = conn;
1812 work->skb = skb;
1813 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1814 kfree(work);
1815 }
1816}
1817
1818/* L2CAP Physical Link interface */
1819
1820void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
1821{
1822 struct amp_work_create_physical *work;
1823 BT_DBG("conn %p", conn);
1824 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1825 if (work) {
1826 INIT_WORK((struct work_struct *) work, create_physical_worker);
1827 work->conn = conn;
1828 work->sk = sk;
1829 sock_hold(sk);
1830 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1831 sock_put(sk);
1832 kfree(work);
1833 }
1834 }
1835}
1836
1837void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
1838{
1839 struct amp_work_accept_physical *work;
1840 BT_DBG("conn %p", conn);
1841
1842 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1843 if (work) {
1844 INIT_WORK((struct work_struct *) work, accept_physical_worker);
1845 work->conn = conn;
1846 work->sk = sk;
1847 work->id = id;
1848 sock_hold(sk);
1849 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1850 sock_put(sk);
1851 kfree(work);
1852 }
1853 }
1854}
1855
1856/* HCI interface */
1857
1858static void amp_cmd_cmplt_worker(struct work_struct *w)
1859{
1860 struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
1861 struct hci_dev *hdev = work->hdev;
1862 u16 opcode = work->opcode;
1863 struct sk_buff *skb = work->skb;
1864 struct amp_ctx *ctx;
1865
1866 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
1867 if (ctx)
1868 execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
1869 kfree_skb(skb);
1870 kfree(w);
1871}
1872
1873static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
1874 struct sk_buff *skb)
1875{
1876 struct amp_work_cmd_cmplt *work;
1877 struct sk_buff *skbc;
1878 BT_DBG("hdev %p opcode 0x%x skb %p len %d",
1879 hdev, opcode, skb, skb->len);
1880 skbc = skb_clone(skb, GFP_ATOMIC);
1881 if (!skbc)
1882 return;
1883 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1884 if (work) {
1885 INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
1886 work->hdev = hdev;
1887 work->opcode = opcode;
1888 work->skb = skbc;
1889 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1890 kfree(work);
1891 }
1892}
1893
1894static void amp_cmd_status_worker(struct work_struct *w)
1895{
1896 struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
1897 struct hci_dev *hdev = work->hdev;
1898 u16 opcode = work->opcode;
1899 u8 status = work->status;
1900 struct amp_ctx *ctx;
1901
1902 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
1903 if (ctx)
1904 execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
1905 kfree(w);
1906}
1907
1908static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
1909{
1910 struct amp_work_cmd_status *work;
1911 BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
1912 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1913 if (work) {
1914 INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
1915 work->hdev = hdev;
1916 work->opcode = opcode;
1917 work->status = status;
1918 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1919 kfree(work);
1920 }
1921}
1922
1923static void amp_event_worker(struct work_struct *w)
1924{
1925 struct amp_work_event *work = (struct amp_work_event *) w;
1926 struct hci_dev *hdev = work->hdev;
1927 u8 event = work->event;
1928 struct sk_buff *skb = work->skb;
1929 struct amp_ctx *ctx;
1930
1931 if (event == HCI_EV_AMP_STATUS_CHANGE) {
1932 struct hci_ev_amp_status_change *ev;
1933 if (skb->len < sizeof(*ev))
1934 goto amp_event_finished;
1935 ev = (void *) skb->data;
1936 if (ev->status != 0)
1937 goto amp_event_finished;
1938 if (ev->amp_status == hdev->amp_status)
1939 goto amp_event_finished;
1940 hdev->amp_status = ev->amp_status;
1941 send_a2mp_change_notify();
1942 goto amp_event_finished;
1943 }
1944 ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
1945 if (ctx)
1946 execute_ctx(ctx, AMP_HCI_EVENT, skb);
1947
1948amp_event_finished:
1949 kfree_skb(skb);
1950 kfree(w);
1951}
1952
1953static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
1954{
1955 struct amp_work_event *work;
1956 struct sk_buff *skbc;
1957 BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
1958 skbc = skb_clone(skb, GFP_ATOMIC);
1959 if (!skbc)
1960 return;
1961 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1962 if (work) {
1963 INIT_WORK((struct work_struct *) work, amp_event_worker);
1964 work->hdev = hdev;
1965 work->event = event;
1966 work->skb = skbc;
1967 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1968 kfree(work);
1969 }
1970}
1971
1972static void amp_dev_event_worker(struct work_struct *w)
1973{
1974 send_a2mp_change_notify();
1975 kfree(w);
1976}
1977
1978static int amp_dev_event(struct notifier_block *this, unsigned long event,
1979 void *ptr)
1980{
1981 struct hci_dev *hdev = (struct hci_dev *) ptr;
1982 struct amp_work_event *work;
1983
1984 if (hdev->amp_type == HCI_BREDR)
1985 return NOTIFY_DONE;
1986
1987 switch (event) {
1988 case HCI_DEV_UNREG:
1989 case HCI_DEV_REG:
1990 case HCI_DEV_UP:
1991 case HCI_DEV_DOWN:
1992 BT_DBG("hdev %p event %ld", hdev, event);
1993 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1994 if (work) {
1995 INIT_WORK((struct work_struct *) work,
1996 amp_dev_event_worker);
1997 if (queue_work(amp_workqueue,
1998 (struct work_struct *) work) == 0)
1999 kfree(work);
2000 }
2001 }
2002 return NOTIFY_DONE;
2003}
2004
2005
2006/* L2CAP module init continued */
2007
2008static struct notifier_block amp_notifier = {
2009 .notifier_call = amp_dev_event
2010};
2011
2012static struct amp_mgr_cb hci_amp = {
2013 .amp_cmd_complete_event = amp_cmd_cmplt_evt,
2014 .amp_cmd_status_event = amp_cmd_status_evt,
2015 .amp_event = amp_evt
2016};
2017
2018int amp_init(void)
2019{
2020 hci_register_amp(&hci_amp);
2021 hci_register_notifier(&amp_notifier);
2022 amp_next_handle = 1;
2023 amp_workqueue = create_singlethread_workqueue("a2mp");
2024 if (!amp_workqueue)
2025 return -EPERM;
2026 return 0;
2027}
2028
2029void amp_exit(void)
2030{
2031 hci_unregister_amp(&hci_amp);
2032 hci_unregister_notifier(&amp_notifier);
2033 flush_workqueue(amp_workqueue);
2034 destroy_workqueue(amp_workqueue);
2035}