blob: af8ee260708fd48d6b4797c15897f961ad145228 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18
19#include <linux/skbuff.h>
20#include <linux/list.h>
21#include <linux/workqueue.h>
22#include <linux/timer.h>
23
24#include <linux/crypto.h>
25#include <linux/scatterlist.h>
26#include <linux/err.h>
27#include <crypto/hash.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/amp.h>
33
34static struct workqueue_struct *amp_workqueue;
35
36LIST_HEAD(amp_mgr_list);
37DEFINE_RWLOCK(amp_mgr_list_lock);
38
39static int send_a2mp(struct socket *sock, u8 *data, int len);
40
41static void ctx_timeout(unsigned long data);
42
43static void launch_ctx(struct amp_mgr *mgr);
44static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
45static int kill_ctx(struct amp_ctx *ctx);
46static int cancel_ctx(struct amp_ctx *ctx);
47
48static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
49
50static void remove_amp_mgr(struct amp_mgr *mgr)
51{
52 BT_DBG("mgr %p", mgr);
53
Peter Krystadf5289202011-11-14 15:11:22 -080054 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 list_del(&mgr->list);
Peter Krystadf5289202011-11-14 15:11:22 -080056 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
Peter Krystadf5289202011-11-14 15:11:22 -080058 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 while (!list_empty(&mgr->ctx_list)) {
60 struct amp_ctx *ctx;
61 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -080062 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 BT_DBG("kill ctx %p", ctx);
64 kill_ctx(ctx);
Peter Krystadf5289202011-11-14 15:11:22 -080065 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 }
Peter Krystadf5289202011-11-14 15:11:22 -080067 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69 kfree(mgr->ctrls);
70
71 kfree(mgr);
72}
73
74static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
75{
76 struct amp_mgr *mgr;
77 struct amp_mgr *found = NULL;
78
Peter Krystadf5289202011-11-14 15:11:22 -080079 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 list_for_each_entry(mgr, &amp_mgr_list, list) {
81 if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
82 found = mgr;
83 break;
84 }
85 }
Peter Krystadf5289202011-11-14 15:11:22 -080086 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 return found;
88}
89
90static struct amp_mgr *get_create_amp_mgr(struct l2cap_conn *conn,
91 struct sk_buff *skb)
92{
93 struct amp_mgr *mgr;
94
Peter Krystadf5289202011-11-14 15:11:22 -080095 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 list_for_each_entry(mgr, &amp_mgr_list, list) {
97 if (mgr->l2cap_conn == conn) {
98 BT_DBG("conn %p found %p", conn, mgr);
Peter Krystadf5289202011-11-14 15:11:22 -080099 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 goto gc_finished;
101 }
102 }
Peter Krystadf5289202011-11-14 15:11:22 -0800103 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104
105 mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
106 if (!mgr)
Peter Krystadf5289202011-11-14 15:11:22 -0800107 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
109 mgr->l2cap_conn = conn;
110 mgr->next_ident = 1;
111 INIT_LIST_HEAD(&mgr->ctx_list);
112 rwlock_init(&mgr->ctx_list_lock);
113 mgr->skb = skb;
114 BT_DBG("conn %p mgr %p", conn, mgr);
115 mgr->a2mp_sock = open_fixed_channel(conn->src, conn->dst);
116 if (!mgr->a2mp_sock) {
117 kfree(mgr);
Peter Krystadf5289202011-11-14 15:11:22 -0800118 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 }
Peter Krystadf5289202011-11-14 15:11:22 -0800120 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121 list_add(&(mgr->list), &amp_mgr_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800122 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124gc_finished:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 return mgr;
126}
127
128static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
129{
130 if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
131 return mgr->ctrls;
132 else
133 return NULL;
134}
135
136static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
137{
138 struct amp_ctrl *ctrl;
139
140 BT_DBG("mgr %p, id %d", mgr, id);
141 if ((mgr->ctrls) && (mgr->ctrls->id == id))
142 ctrl = mgr->ctrls;
143 else {
144 kfree(mgr->ctrls);
145 ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
146 if (ctrl) {
147 ctrl->mgr = mgr;
148 ctrl->id = id;
149 }
150 mgr->ctrls = ctrl;
151 }
152
153 return ctrl;
154}
155
156static struct amp_ctx *create_ctx(u8 type, u8 state)
157{
158 struct amp_ctx *ctx = NULL;
159
160 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
161 if (ctx) {
162 ctx->type = type;
163 ctx->state = state;
164 init_timer(&(ctx->timer));
165 ctx->timer.function = ctx_timeout;
166 ctx->timer.data = (unsigned long) ctx;
167 }
168 BT_DBG("ctx %p, type %d", ctx, type);
169 return ctx;
170}
171
172static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
173{
174 BT_DBG("ctx %p", ctx);
Peter Krystadf5289202011-11-14 15:11:22 -0800175 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 list_add(&ctx->list, &mgr->ctx_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800177 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 ctx->mgr = mgr;
179 execute_ctx(ctx, AMP_INIT, 0);
180}
181
182static void destroy_ctx(struct amp_ctx *ctx)
183{
184 struct amp_mgr *mgr = ctx->mgr;
185
186 BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
187 del_timer(&ctx->timer);
Peter Krystadf5289202011-11-14 15:11:22 -0800188 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 list_del(&ctx->list);
Peter Krystadf5289202011-11-14 15:11:22 -0800190 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 if (ctx->deferred)
192 execute_ctx(ctx->deferred, AMP_INIT, 0);
193 kfree(ctx);
194}
195
196static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
197{
198 struct amp_ctx *fnd = NULL;
199 struct amp_ctx *ctx;
200
Peter Krystadf5289202011-11-14 15:11:22 -0800201 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 list_for_each_entry(ctx, &mgr->ctx_list, list) {
203 if (ctx->type == type) {
204 fnd = ctx;
205 break;
206 }
207 }
Peter Krystadf5289202011-11-14 15:11:22 -0800208 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 return fnd;
210}
211
212static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
213{
214 struct amp_mgr *mgr = cur->mgr;
215 struct amp_ctx *fnd = NULL;
216 struct amp_ctx *ctx;
217
Peter Krystadf5289202011-11-14 15:11:22 -0800218 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 list_for_each_entry(ctx, &mgr->ctx_list, list) {
220 if ((ctx->type == type) && (ctx != cur)) {
221 fnd = ctx;
222 break;
223 }
224 }
Peter Krystadf5289202011-11-14 15:11:22 -0800225 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 return fnd;
227}
228
229static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
230{
231 struct amp_ctx *fnd = NULL;
232 struct amp_ctx *ctx;
233
Peter Krystadf5289202011-11-14 15:11:22 -0800234 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 list_for_each_entry(ctx, &mgr->ctx_list, list) {
236 if ((ctx->evt_type & AMP_A2MP_RSP) &&
237 (ctx->rsp_ident == ident)) {
238 fnd = ctx;
239 break;
240 }
241 }
Peter Krystadf5289202011-11-14 15:11:22 -0800242 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 return fnd;
244}
245
246static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
247 u16 evt_value)
248{
249 struct amp_mgr *mgr;
250 struct amp_ctx *fnd = NULL;
251
Peter Krystadf5289202011-11-14 15:11:22 -0800252 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 list_for_each_entry(mgr, &amp_mgr_list, list) {
254 struct amp_ctx *ctx;
Peter Krystadf5289202011-11-14 15:11:22 -0800255 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 list_for_each_entry(ctx, &mgr->ctx_list, list) {
257 struct hci_dev *ctx_hdev;
258 ctx_hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
259 if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
260 switch (evt_type) {
261 case AMP_HCI_CMD_STATUS:
262 case AMP_HCI_CMD_CMPLT:
263 if (ctx->opcode == evt_value)
264 fnd = ctx;
265 break;
266 case AMP_HCI_EVENT:
267 if (ctx->evt_code == (u8) evt_value)
268 fnd = ctx;
269 break;
270 }
271 }
272 if (ctx_hdev)
273 hci_dev_put(ctx_hdev);
274
275 if (fnd)
276 break;
277 }
Peter Krystadf5289202011-11-14 15:11:22 -0800278 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 }
Peter Krystadf5289202011-11-14 15:11:22 -0800280 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281 return fnd;
282}
283
284static inline u8 next_ident(struct amp_mgr *mgr)
285{
286 if (++mgr->next_ident == 0)
287 mgr->next_ident = 1;
288 return mgr->next_ident;
289}
290
291static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
292 u16 len, void *data, u16 len2, void *data2)
293{
294 struct a2mp_cmd_hdr *hdr;
295 int plen;
296 u8 *p, *cmd;
297
298 BT_DBG("ident %d code 0x%02x", ident, code);
299 if (!mgr->a2mp_sock)
300 return;
301 plen = sizeof(*hdr) + len + len2;
302 cmd = kzalloc(plen, GFP_ATOMIC);
303 if (!cmd)
304 return;
305 hdr = (struct a2mp_cmd_hdr *) cmd;
306 hdr->code = code;
307 hdr->ident = ident;
308 hdr->len = cpu_to_le16(len+len2);
309 p = cmd + sizeof(*hdr);
310 memcpy(p, data, len);
311 p += len;
312 memcpy(p, data2, len2);
313 send_a2mp(mgr->a2mp_sock, cmd, plen);
314 kfree(cmd);
315}
316
317static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
318 u8 code, u16 len, void *data)
319{
320 send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
321}
322
323static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
324{
325 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
326 struct a2mp_cmd_rej *rej;
327 struct amp_ctx *ctx;
328
329 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
330 rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
331 if (skb->len < sizeof(*rej))
332 return -EINVAL;
333 BT_DBG("reason %d", le16_to_cpu(rej->reason));
334 ctx = get_ctx_a2mp(mgr, hdr->ident);
335 if (ctx)
336 kill_ctx(ctx);
337 skb_pull(skb, sizeof(*rej));
338 return 0;
339}
340
341static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
342 void *msg)
343{
344 struct a2mp_cl clist[16];
345 struct a2mp_cl *cl;
346 struct hci_dev *hdev;
347 int num_ctrls = 1, id;
348
349 cl = clist;
350 cl->id = 0;
351 cl->type = 0;
352 cl->status = 1;
353
354 for (id = 0; id < 16; ++id) {
355 hdev = hci_dev_get(id);
356 if (hdev) {
357 if ((hdev->amp_type != HCI_BREDR) &&
358 test_bit(HCI_UP, &hdev->flags)) {
359 (cl + num_ctrls)->id = HCI_A2MP_ID(hdev->id);
360 (cl + num_ctrls)->type = hdev->amp_type;
361 (cl + num_ctrls)->status = hdev->amp_status;
362 ++num_ctrls;
363 }
364 hci_dev_put(hdev);
365 }
366 }
367 send_a2mp_cmd2(mgr, ident, code, len, msg,
368 num_ctrls*sizeof(*cl), clist);
369
370 return 0;
371}
372
373static void send_a2mp_change_notify(void)
374{
375 struct amp_mgr *mgr;
376
Peter Krystadf5289202011-11-14 15:11:22 -0800377 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 list_for_each_entry(mgr, &amp_mgr_list, list) {
379 if (mgr->discovered)
380 send_a2mp_cl(mgr, next_ident(mgr),
381 A2MP_CHANGE_NOTIFY, 0, NULL);
382 }
Peter Krystadf5289202011-11-14 15:11:22 -0800383 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384}
385
386static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
387{
388 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
389 struct a2mp_discover_req *req;
390 u16 *efm;
391 struct a2mp_discover_rsp rsp;
392
393 req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
394 if (skb->len < sizeof(*req))
395 return -EINVAL;
396 efm = (u16 *) skb_pull(skb, sizeof(*req));
397
398 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
399 le16_to_cpu(req->ext_feat));
400
401 while (le16_to_cpu(req->ext_feat) & 0x8000) {
402 if (skb->len < sizeof(*efm))
403 return -EINVAL;
404 req->ext_feat = *efm;
405 BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
406 efm = (u16 *) skb_pull(skb, sizeof(*efm));
407 }
408
409 rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
410 rsp.ext_feat = 0;
411
412 mgr->discovered = 1;
413
414 return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
415 sizeof(rsp), &rsp);
416}
417
418static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
419{
420 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
421 struct a2mp_cl *cl;
422
423 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
424 while (skb->len >= sizeof(*cl)) {
425 struct amp_ctrl *ctrl;
426 if (cl->id != 0) {
427 ctrl = get_create_ctrl(mgr, cl->id);
428 if (ctrl != NULL) {
429 ctrl->type = cl->type;
430 ctrl->status = cl->status;
431 }
432 }
433 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
434 }
435
436 /* TODO find controllers in manager that were not on received */
437 /* controller list and destroy them */
438 send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
439
440 return 0;
441}
442
443static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
444{
445 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
446 u8 *data;
447 int id;
448 struct hci_dev *hdev;
449 struct a2mp_getinfo_rsp rsp;
450
451 data = (u8 *) skb_pull(skb, sizeof(*hdr));
452 if (le16_to_cpu(hdr->len) < sizeof(*data))
453 return -EINVAL;
454 if (skb->len < sizeof(*data))
455 return -EINVAL;
456 id = *data;
457 skb_pull(skb, sizeof(*data));
458 rsp.id = id;
459 rsp.status = 1;
460
461 BT_DBG("id %d", id);
462 hdev = hci_dev_get(A2MP_HCI_ID(id));
463
464 if (hdev && hdev->amp_type != HCI_BREDR) {
465 rsp.status = 0;
466 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
467 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
468 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
469 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
470 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
471 }
472
473 send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
474
475 if (hdev)
476 hci_dev_put(hdev);
477
478 return 0;
479}
480
481static void create_physical(struct l2cap_conn *conn, struct sock *sk)
482{
483 struct amp_mgr *mgr;
484 struct amp_ctx *ctx = NULL;
485
486 BT_DBG("conn %p", conn);
487 mgr = get_create_amp_mgr(conn, NULL);
488 if (!mgr)
489 goto cp_finished;
490 BT_DBG("mgr %p", mgr);
491 ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
492 if (!ctx)
493 goto cp_finished;
494 ctx->sk = sk;
495 sock_hold(sk);
496 start_ctx(mgr, ctx);
497 return;
498
499cp_finished:
500 l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
501}
502
503static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
504{
505 struct amp_mgr *mgr;
506 struct hci_dev *hdev;
507 struct hci_conn *conn;
508 struct amp_ctx *aplctx = NULL;
509 u8 remote_id = 0;
510 int result = -EINVAL;
511
512 BT_DBG("lcon %p", lcon);
513 mgr = get_create_amp_mgr(lcon, NULL);
514 if (!mgr)
515 goto ap_finished;
516 BT_DBG("mgr %p", mgr);
517 hdev = hci_dev_get(A2MP_HCI_ID(id));
518 if (!hdev)
519 goto ap_finished;
520 BT_DBG("hdev %p", hdev);
521 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
522 &mgr->l2cap_conn->hcon->dst);
523 if (conn) {
524 BT_DBG("conn %p", hdev);
525 result = 0;
526 remote_id = conn->dst_id;
527 goto ap_finished;
528 }
529 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
530 if (!aplctx)
531 goto ap_finished;
532 aplctx->sk = sk;
533 sock_hold(sk);
534 return;
535
536ap_finished:
537 l2cap_amp_physical_complete(result, id, remote_id, sk);
538}
539
540static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
541{
542 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
543 struct amp_ctx *ctx;
544 struct a2mp_getampassoc_req *req;
545
546 if (hdr->len < sizeof(*req))
547 return -EINVAL;
548 req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
549 skb_pull(skb, sizeof(*req));
550
551 ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
552 if (!ctx)
553 return -ENOMEM;
554 ctx->id = req->id;
555 ctx->d.gaa.req_ident = hdr->ident;
556 ctx->hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
557 if (ctx->hdev)
558 ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
559 GFP_ATOMIC);
560 start_ctx(mgr, ctx);
561 return 0;
562}
563
564static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
565{
566 struct sk_buff *skb = (struct sk_buff *) data;
567 struct hci_cp_read_local_amp_assoc cp;
568 struct hci_rp_read_local_amp_assoc *rp;
569 struct a2mp_getampassoc_rsp rsp;
570 u16 rem_len;
571 u16 frag_len;
572
573 rsp.status = 1;
574 if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
575 goto gaa_finished;
576
577 switch (ctx->state) {
578 case AMP_GAA_INIT:
579 ctx->state = AMP_GAA_RLAA_COMPLETE;
580 ctx->evt_type = AMP_HCI_CMD_CMPLT;
581 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
582 ctx->d.gaa.len_so_far = 0;
583 cp.phy_handle = 0;
584 cp.len_so_far = 0;
585 cp.max_len = ctx->hdev->amp_assoc_size;
586 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
587 break;
588
589 case AMP_GAA_RLAA_COMPLETE:
590 if (skb->len < 4)
591 goto gaa_finished;
592 rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
593 if (rp->status)
594 goto gaa_finished;
595 rem_len = le16_to_cpu(rp->rem_len);
596 skb_pull(skb, 4);
597 frag_len = skb->len;
598
599 if (ctx->d.gaa.len_so_far + rem_len <=
600 ctx->hdev->amp_assoc_size) {
601 struct hci_cp_read_local_amp_assoc cp;
602 u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
603 memcpy(assoc, rp->frag, frag_len);
604 ctx->d.gaa.len_so_far += rem_len;
605 rem_len -= frag_len;
606 if (rem_len == 0) {
607 rsp.status = 0;
608 goto gaa_finished;
609 }
610 /* more assoc data to read */
611 cp.phy_handle = 0;
612 cp.len_so_far = ctx->d.gaa.len_so_far;
613 cp.max_len = ctx->hdev->amp_assoc_size;
614 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
615 }
616 break;
617
618 default:
619 goto gaa_finished;
620 break;
621 }
622 return 0;
623
624gaa_finished:
625 rsp.id = ctx->id;
626 send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
627 sizeof(rsp), &rsp,
628 ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
629 kfree(ctx->d.gaa.assoc);
630 if (ctx->hdev)
631 hci_dev_put(ctx->hdev);
632 return 1;
633}
634
635struct hmac_sha256_result {
636 struct completion completion;
637 int err;
638};
639
640static void hmac_sha256_final(struct crypto_async_request *req, int err)
641{
642 struct hmac_sha256_result *r = req->data;
643 if (err == -EINPROGRESS)
644 return;
645 r->err = err;
646 complete(&r->completion);
647}
648
649int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
650 u8 *output, u8 outlen)
651{
652 int ret = 0;
653 struct crypto_ahash *tfm;
654 struct scatterlist sg;
655 struct ahash_request *req;
656 struct hmac_sha256_result tresult;
657 void *hash_buff = NULL;
658
659 unsigned char hash_result[64];
660 int i;
661
662 memset(output, 0, outlen);
663
664 init_completion(&tresult.completion);
665
666 tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
667 CRYPTO_ALG_TYPE_AHASH_MASK);
668 if (IS_ERR(tfm)) {
669 BT_DBG("crypto_alloc_ahash failed");
670 ret = PTR_ERR(tfm);
671 goto err_tfm;
672 }
673
674 req = ahash_request_alloc(tfm, GFP_KERNEL);
675 if (!req) {
676 BT_DBG("failed to allocate request for hmac(sha256)");
677 ret = -ENOMEM;
678 goto err_req;
679 }
680
681 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
682 hmac_sha256_final, &tresult);
683
684 hash_buff = kzalloc(psize, GFP_KERNEL);
685 if (!hash_buff) {
686 BT_DBG("failed to kzalloc hash_buff");
687 ret = -ENOMEM;
688 goto err_hash_buf;
689 }
690
691 memset(hash_result, 0, 64);
692 memcpy(hash_buff, plaintext, psize);
693 sg_init_one(&sg, hash_buff, psize);
694
695 if (ksize) {
696 crypto_ahash_clear_flags(tfm, ~0);
697 ret = crypto_ahash_setkey(tfm, key, ksize);
698
699 if (ret) {
700 BT_DBG("crypto_ahash_setkey failed");
701 goto err_setkey;
702 }
703 }
704
705 ahash_request_set_crypt(req, &sg, hash_result, psize);
706 ret = crypto_ahash_digest(req);
707
708 BT_DBG("ret 0x%x", ret);
709
710 switch (ret) {
711 case 0:
712 for (i = 0; i < outlen; i++)
713 output[i] = hash_result[i];
714 break;
715 case -EINPROGRESS:
716 case -EBUSY:
717 ret = wait_for_completion_interruptible(&tresult.completion);
718 if (!ret && !tresult.err) {
719 INIT_COMPLETION(tresult.completion);
720 break;
721 } else {
722 BT_DBG("wait_for_completion_interruptible failed");
723 if (!ret)
724 ret = tresult.err;
725 goto out;
726 }
727 default:
728 goto out;
729 }
730
731out:
732err_setkey:
733 kfree(hash_buff);
734err_hash_buf:
735 ahash_request_free(req);
736err_req:
737 crypto_free_ahash(tfm);
738err_tfm:
739 return ret;
740}
741
742static void show_key(u8 *k)
743{
744 int i = 0;
745 for (i = 0; i < 32; i += 8)
746 BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
747 *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
748 *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
749}
750
751static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
752{
753 u8 bt2_key[32];
754 u8 gamp_key[32];
755 u8 b802_key[32];
756 int result;
757
758 if (!hci_conn_check_link_mode(conn))
759 return -EACCES;
760
761 BT_DBG("key_type %d", conn->key_type);
762 if (conn->key_type < 3)
763 return -EACCES;
764
765 *type = conn->key_type;
766 *len = 32;
767 memcpy(&bt2_key[0], conn->link_key, 16);
768 memcpy(&bt2_key[16], conn->link_key, 16);
769 result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
770 if (result)
771 goto ps_finished;
772
773 if (conn->key_type == 3) {
774 BT_DBG("gamp_key");
775 show_key(gamp_key);
776 memcpy(data, gamp_key, 32);
777 goto ps_finished;
778 }
779
780 result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
781 if (result)
782 goto ps_finished;
783
784 BT_DBG("802b_key");
785 show_key(b802_key);
786 memcpy(data, b802_key, 32);
787
788ps_finished:
789 return result;
790}
791
792static u8 amp_next_handle;
793static inline u8 physlink_handle(struct hci_dev *hdev)
794{
795 /* TODO amp_next_handle should be part of hci_dev */
796 if (amp_next_handle == 0)
797 amp_next_handle = 1;
798 return amp_next_handle++;
799}
800
801/* Start an Accept Physical Link sequence */
802static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
803{
804 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
805 struct amp_ctx *ctx = NULL;
806 struct a2mp_createphyslink_req *req;
807
808 if (hdr->len < sizeof(*req))
809 return -EINVAL;
810 req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
811 skb_pull(skb, sizeof(*req));
812 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
813
814 /* initialize the context */
815 ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
816 if (!ctx)
817 return -ENOMEM;
818 ctx->d.apl.req_ident = hdr->ident;
819 ctx->d.apl.remote_id = req->local_id;
820 ctx->id = req->remote_id;
821
822 /* add the supplied remote assoc to the context */
823 ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
824 if (ctx->d.apl.remote_assoc)
825 memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
826 ctx->d.apl.len_so_far = 0;
827 ctx->d.apl.rem_len = skb->len;
828 skb_pull(skb, skb->len);
829 ctx->hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
830 start_ctx(mgr, ctx);
831 return 0;
832}
833
834static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
835{
836 struct sk_buff *skb = data;
837 struct hci_cp_accept_phys_link acp;
838 struct hci_cp_write_remote_amp_assoc wcp;
839 struct hci_rp_write_remote_amp_assoc *wrp;
840 struct hci_ev_cmd_status *cs = data;
841 struct hci_ev_phys_link_complete *ev;
842 struct a2mp_createphyslink_rsp rsp;
843 struct amp_ctx *cplctx;
844 struct amp_ctx *aplctx;
845 u16 frag_len;
846 struct hci_conn *conn;
847 int result;
848
849 BT_DBG("state %d", ctx->state);
850 result = -EINVAL;
851 rsp.status = 1; /* Invalid Controller ID */
852 if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
853 goto apl_finished;
854 if (evt_type == AMP_KILLED) {
855 result = -EAGAIN;
856 rsp.status = 4; /* Disconnect request received */
857 goto apl_finished;
858 }
859 if (!ctx->d.apl.remote_assoc) {
860 result = -ENOMEM;
861 rsp.status = 2; /* Unable to Start */
862 goto apl_finished;
863 }
864
865 switch (ctx->state) {
866 case AMP_APL_INIT:
867 BT_DBG("local_id %d, remote_id %d",
868 ctx->id, ctx->d.apl.remote_id);
869 conn = hci_conn_hash_lookup_id(ctx->hdev,
870 &ctx->mgr->l2cap_conn->hcon->dst,
871 ctx->d.apl.remote_id);
872 if (conn) {
873 result = -EEXIST;
874 rsp.status = 5; /* Already Exists */
875 goto apl_finished;
876 }
877
878 aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
879 if ((aplctx) &&
880 (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
881 BT_DBG("deferred to %p", aplctx);
882 aplctx->deferred = ctx;
883 break;
884 }
885
886 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
887 if ((cplctx) &&
888 (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
889 struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
890 BT_DBG("local %s remote %s",
891 batostr(&bcon->hdev->bdaddr),
892 batostr(&bcon->dst));
893 if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
894 (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
895 BT_DBG("COLLISION LOSER");
896 cplctx->deferred = ctx;
897 cancel_ctx(cplctx);
898 break;
899 } else {
900 BT_DBG("COLLISION WINNER");
901 result = -EISCONN;
902 rsp.status = 3; /* Collision */
903 goto apl_finished;
904 }
905 }
906
907 result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
908 &acp.key_len, &acp.type);
909 if (result) {
910 BT_DBG("SECURITY");
911 rsp.status = 6; /* Security Violation */
912 goto apl_finished;
913 }
914
915 ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
916 ctx->state = AMP_APL_APL_STATUS;
917 ctx->evt_type = AMP_HCI_CMD_STATUS;
918 ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
919 acp.phy_handle = ctx->d.apl.phy_handle;
920 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
921 break;
922
923 case AMP_APL_APL_STATUS:
924 if (cs->status != 0)
925 goto apl_finished;
926 /* PAL will accept link, send a2mp response */
927 rsp.local_id = ctx->id;
928 rsp.remote_id = ctx->d.apl.remote_id;
929 rsp.status = 0;
930 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
931 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
932
933 /* send the first assoc fragment */
934 wcp.phy_handle = ctx->d.apl.phy_handle;
935 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
936 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
937 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
938 memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
939 ctx->state = AMP_APL_WRA_COMPLETE;
940 ctx->evt_type = AMP_HCI_CMD_CMPLT;
941 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
942 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
943 break;
944
945 case AMP_APL_WRA_COMPLETE:
946 /* received write remote amp assoc command complete event */
947 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
948 if (wrp->status != 0)
949 goto apl_finished;
950 if (wrp->phy_handle != ctx->d.apl.phy_handle)
951 goto apl_finished;
952 /* update progress */
953 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
954 ctx->d.apl.len_so_far += frag_len;
955 ctx->d.apl.rem_len -= frag_len;
956 if (ctx->d.apl.rem_len > 0) {
957 u8 *assoc;
958 /* another assoc fragment to send */
959 wcp.phy_handle = ctx->d.apl.phy_handle;
960 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
961 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
962 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
963 assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
964 memcpy(wcp.frag, assoc, frag_len);
965 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
966 break;
967 }
968 /* wait for physical link complete event */
969 ctx->state = AMP_APL_PL_COMPLETE;
970 ctx->evt_type = AMP_HCI_EVENT;
971 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
972 break;
973
974 case AMP_APL_PL_COMPLETE:
975 /* physical link complete event received */
976 if (skb->len < sizeof(*ev))
977 goto apl_finished;
978 ev = (struct hci_ev_phys_link_complete *) skb->data;
979 if (ev->phy_handle != ctx->d.apl.phy_handle)
980 break;
981 if (ev->status != 0)
982 goto apl_finished;
983 conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
984 if (!conn)
985 goto apl_finished;
986 result = 0;
987 BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
988 conn->dst_id = ctx->d.apl.remote_id;
989 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
990 goto apl_finished;
991 break;
992
993 default:
994 goto apl_finished;
995 break;
996 }
997 return 0;
998
999apl_finished:
1000 if (ctx->sk)
1001 l2cap_amp_physical_complete(result, ctx->id,
1002 ctx->d.apl.remote_id, ctx->sk);
1003 if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
1004 rsp.local_id = ctx->id;
1005 rsp.remote_id = ctx->d.apl.remote_id;
1006 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
1007 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
1008 }
1009 kfree(ctx->d.apl.remote_assoc);
1010 if (ctx->sk)
1011 sock_put(ctx->sk);
1012 if (ctx->hdev)
1013 hci_dev_put(ctx->hdev);
1014 return 1;
1015}
1016
1017static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
1018{
1019 struct hci_cp_disconn_phys_link dcp;
1020
1021 ctx->state = AMP_CPL_PL_CANCEL;
1022 ctx->evt_type = AMP_HCI_EVENT;
1023 ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
1024 dcp.phy_handle = ctx->d.cpl.phy_handle;
1025 dcp.reason = reason;
1026 hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
1027}
1028
1029static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
1030{
1031 struct amp_ctrl *ctrl;
1032 struct sk_buff *skb = data;
1033 struct a2mp_cmd_hdr *hdr;
1034 struct hci_ev_cmd_status *cs = data;
1035 struct amp_ctx *cplctx;
1036 struct a2mp_discover_req dreq;
1037 struct a2mp_discover_rsp *drsp;
1038 u16 *efm;
1039 struct a2mp_getinfo_req greq;
1040 struct a2mp_getinfo_rsp *grsp;
1041 struct a2mp_cl *cl;
1042 struct a2mp_getampassoc_req areq;
1043 struct a2mp_getampassoc_rsp *arsp;
1044 struct hci_cp_create_phys_link cp;
1045 struct hci_cp_write_remote_amp_assoc wcp;
1046 struct hci_rp_write_remote_amp_assoc *wrp;
1047 struct hci_ev_channel_selected *cev;
1048 struct hci_cp_read_local_amp_assoc rcp;
1049 struct hci_rp_read_local_amp_assoc *rrp;
1050 struct a2mp_createphyslink_req creq;
1051 struct a2mp_createphyslink_rsp *crsp;
1052 struct hci_ev_phys_link_complete *pev;
1053 struct hci_ev_disconn_phys_link_complete *dev;
1054 u8 *assoc, *rassoc, *lassoc;
1055 u16 frag_len;
1056 u16 rem_len;
1057 int result = -EAGAIN;
1058 struct hci_conn *conn;
1059
1060 BT_DBG("state %d", ctx->state);
1061 if (evt_type == AMP_KILLED)
1062 goto cpl_finished;
1063
1064 if (evt_type == AMP_CANCEL) {
1065 if ((ctx->state < AMP_CPL_CPL_STATUS) ||
1066 ((ctx->state == AMP_CPL_PL_COMPLETE) &&
1067 !(ctx->evt_type & AMP_HCI_EVENT)))
1068 goto cpl_finished;
1069
1070 cancel_cpl_ctx(ctx, 0x16);
1071 return 0;
1072 }
1073
1074 switch (ctx->state) {
1075 case AMP_CPL_INIT:
1076 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
1077 if (cplctx) {
1078 BT_DBG("deferred to %p", cplctx);
1079 cplctx->deferred = ctx;
1080 break;
1081 }
1082 ctx->state = AMP_CPL_DISC_RSP;
1083 ctx->evt_type = AMP_A2MP_RSP;
1084 ctx->rsp_ident = next_ident(ctx->mgr);
1085 dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
1086 dreq.ext_feat = 0;
1087 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
1088 sizeof(dreq), &dreq);
1089 break;
1090
1091 case AMP_CPL_DISC_RSP:
1092 drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
1093 if (skb->len < (sizeof(*drsp))) {
1094 result = -EINVAL;
1095 goto cpl_finished;
1096 }
1097
1098 efm = (u16 *) skb_pull(skb, sizeof(*drsp));
1099 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
1100 le16_to_cpu(drsp->ext_feat));
1101
1102 while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
1103 if (skb->len < sizeof(*efm)) {
1104 result = -EINVAL;
1105 goto cpl_finished;
1106 }
1107 drsp->ext_feat = *efm;
1108 BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
1109 efm = (u16 *) skb_pull(skb, sizeof(*efm));
1110 }
1111 cl = (struct a2mp_cl *) efm;
1112
1113 /* find the first remote and local controller with the
1114 * same type
1115 */
1116 greq.id = 0;
1117 result = -ENODEV;
1118 while (skb->len >= sizeof(*cl)) {
1119 if ((cl->id != 0) && (greq.id == 0)) {
1120 struct hci_dev *hdev;
1121 hdev = hci_dev_get_type(cl->type);
1122 if (hdev) {
1123 struct hci_conn *conn;
1124 ctx->hdev = hdev;
1125 ctx->id = HCI_A2MP_ID(hdev->id);
1126 ctx->d.cpl.remote_id = cl->id;
1127 conn = hci_conn_hash_lookup_ba(hdev,
1128 ACL_LINK,
1129 &ctx->mgr->l2cap_conn->hcon->dst);
1130 if (conn) {
1131 BT_DBG("PL_COMPLETE exists %x",
1132 (int) conn->handle);
1133 result = 0;
1134 }
1135 ctrl = get_create_ctrl(ctx->mgr,
1136 cl->id);
1137 if (ctrl) {
1138 ctrl->type = cl->type;
1139 ctrl->status = cl->status;
1140 }
1141 greq.id = cl->id;
1142 }
1143 }
1144 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
1145 }
1146 if ((!greq.id) || (!result))
1147 goto cpl_finished;
1148 ctx->state = AMP_CPL_GETINFO_RSP;
1149 ctx->evt_type = AMP_A2MP_RSP;
1150 ctx->rsp_ident = next_ident(ctx->mgr);
1151 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
1152 sizeof(greq), &greq);
1153 break;
1154
1155 case AMP_CPL_GETINFO_RSP:
1156 if (skb->len < sizeof(*grsp))
1157 goto cpl_finished;
1158 grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
1159 if (grsp->status)
1160 goto cpl_finished;
1161 if (grsp->id != ctx->d.cpl.remote_id)
1162 goto cpl_finished;
1163 ctrl = get_ctrl(ctx->mgr, grsp->id);
1164 if (!ctrl)
1165 goto cpl_finished;
1166 ctrl->status = grsp->status;
1167 ctrl->total_bw = le32_to_cpu(grsp->total_bw);
1168 ctrl->max_bw = le32_to_cpu(grsp->max_bw);
1169 ctrl->min_latency = le32_to_cpu(grsp->min_latency);
1170 ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
1171 ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
1172 skb_pull(skb, sizeof(*grsp));
1173
1174 ctx->d.cpl.max_len = ctrl->max_assoc_size;
1175
1176 /* setup up GAA request */
1177 areq.id = ctx->d.cpl.remote_id;
1178
1179 /* advance context state */
1180 ctx->state = AMP_CPL_GAA_RSP;
1181 ctx->evt_type = AMP_A2MP_RSP;
1182 ctx->rsp_ident = next_ident(ctx->mgr);
1183 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
1184 sizeof(areq), &areq);
1185 break;
1186
1187 case AMP_CPL_GAA_RSP:
1188 if (skb->len < sizeof(*arsp))
1189 goto cpl_finished;
1190 hdr = (void *) skb->data;
1191 arsp = (void *) skb_pull(skb, sizeof(*hdr));
1192 if (arsp->id != ctx->d.cpl.remote_id)
1193 goto cpl_finished;
1194 if (arsp->status != 0)
1195 goto cpl_finished;
1196
1197 /* store away remote assoc */
1198 assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
1199 ctx->d.cpl.len_so_far = 0;
1200 ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
1201 rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
1202 if (!rassoc)
1203 goto cpl_finished;
1204 memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
1205 ctx->d.cpl.remote_assoc = rassoc;
1206 skb_pull(skb, ctx->d.cpl.rem_len);
1207
1208 /* set up CPL command */
1209 ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
1210 cp.phy_handle = ctx->d.cpl.phy_handle;
1211 if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
1212 &cp.key_len, &cp.type)) {
1213 result = -EPERM;
1214 goto cpl_finished;
1215 }
1216
1217 /* advance context state */
1218 ctx->state = AMP_CPL_CPL_STATUS;
1219 ctx->evt_type = AMP_HCI_CMD_STATUS;
1220 ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
1221 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
1222 break;
1223
1224 case AMP_CPL_CPL_STATUS:
1225 /* received create physical link command status */
1226 if (cs->status != 0)
1227 goto cpl_finished;
1228 /* send the first assoc fragment */
1229 wcp.phy_handle = ctx->d.cpl.phy_handle;
1230 wcp.len_so_far = ctx->d.cpl.len_so_far;
1231 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1232 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1233 memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
1234 ctx->state = AMP_CPL_WRA_COMPLETE;
1235 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1236 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
1237 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1238 break;
1239
1240 case AMP_CPL_WRA_COMPLETE:
1241 /* received write remote amp assoc command complete event */
1242 if (skb->len < sizeof(*wrp))
1243 goto cpl_finished;
1244 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
1245 if (wrp->status != 0)
1246 goto cpl_finished;
1247 if (wrp->phy_handle != ctx->d.cpl.phy_handle)
1248 goto cpl_finished;
1249
1250 /* update progress */
1251 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1252 ctx->d.cpl.len_so_far += frag_len;
1253 ctx->d.cpl.rem_len -= frag_len;
1254 if (ctx->d.cpl.rem_len > 0) {
1255 /* another assoc fragment to send */
1256 wcp.phy_handle = ctx->d.cpl.phy_handle;
1257 wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
1258 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1259 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1260 memcpy(wcp.frag,
1261 ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
1262 frag_len);
1263 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1264 break;
1265 }
1266 /* now wait for channel selected event */
1267 ctx->state = AMP_CPL_CHANNEL_SELECT;
1268 ctx->evt_type = AMP_HCI_EVENT;
1269 ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
1270 break;
1271
1272 case AMP_CPL_CHANNEL_SELECT:
1273 /* received channel selection event */
1274 if (skb->len < sizeof(*cev))
1275 goto cpl_finished;
1276 cev = (void *) skb->data;
1277/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
1278 Create Physical Link collision scenario
1279 if (cev->phy_handle != ctx->d.cpl.phy_handle)
1280 goto cpl_finished;
1281*/
1282
1283 /* request the first local assoc fragment */
1284 rcp.phy_handle = ctx->d.cpl.phy_handle;
1285 rcp.len_so_far = 0;
1286 rcp.max_len = ctx->d.cpl.max_len;
1287 lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
1288 if (!lassoc)
1289 goto cpl_finished;
1290 ctx->d.cpl.local_assoc = lassoc;
1291 ctx->d.cpl.len_so_far = 0;
1292 ctx->state = AMP_CPL_RLA_COMPLETE;
1293 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1294 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
1295 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1296 break;
1297
1298 case AMP_CPL_RLA_COMPLETE:
1299 /* received read local amp assoc command complete event */
1300 if (skb->len < 4)
1301 goto cpl_finished;
1302 rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
1303 if (rrp->status)
1304 goto cpl_finished;
1305 if (rrp->phy_handle != ctx->d.cpl.phy_handle)
1306 goto cpl_finished;
1307 rem_len = le16_to_cpu(rrp->rem_len);
1308 skb_pull(skb, 4);
1309 frag_len = skb->len;
1310
1311 if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
1312 goto cpl_finished;
1313
1314 /* save this fragment in context */
1315 lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
1316 memcpy(lassoc, rrp->frag, frag_len);
1317 ctx->d.cpl.len_so_far += frag_len;
1318 rem_len -= frag_len;
1319 if (rem_len > 0) {
1320 /* request another local assoc fragment */
1321 rcp.phy_handle = ctx->d.cpl.phy_handle;
1322 rcp.len_so_far = ctx->d.cpl.len_so_far;
1323 rcp.max_len = ctx->d.cpl.max_len;
1324 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1325 } else {
1326 creq.local_id = ctx->id;
1327 creq.remote_id = ctx->d.cpl.remote_id;
1328 /* wait for A2MP rsp AND phys link complete event */
1329 ctx->state = AMP_CPL_PL_COMPLETE;
1330 ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
1331 ctx->rsp_ident = next_ident(ctx->mgr);
1332 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
1333 send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
1334 A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
1335 ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
1336 }
1337 break;
1338
1339 case AMP_CPL_PL_COMPLETE:
1340 if (evt_type == AMP_A2MP_RSP) {
1341 /* create physical link response received */
1342 ctx->evt_type &= ~AMP_A2MP_RSP;
1343 if (skb->len < sizeof(*crsp))
1344 goto cpl_finished;
1345 crsp = (void *) skb_pull(skb, sizeof(*hdr));
1346 if ((crsp->local_id != ctx->d.cpl.remote_id) ||
1347 (crsp->remote_id != ctx->id) ||
1348 (crsp->status != 0)) {
1349 cancel_cpl_ctx(ctx, 0x13);
1350 break;
1351 }
1352
1353 /* notify Qualcomm PAL */
1354 if (ctx->hdev->manufacturer == 0x001d)
1355 hci_send_cmd(ctx->hdev,
1356 hci_opcode_pack(0x3f, 0x00), 0, NULL);
1357 }
1358 if (evt_type == AMP_HCI_EVENT) {
1359 ctx->evt_type &= ~AMP_HCI_EVENT;
1360 /* physical link complete event received */
1361 if (skb->len < sizeof(*pev))
1362 goto cpl_finished;
1363 pev = (void *) skb->data;
1364 if (pev->phy_handle != ctx->d.cpl.phy_handle)
1365 break;
1366 if (pev->status != 0)
1367 goto cpl_finished;
1368 }
1369 if (ctx->evt_type)
1370 break;
1371 conn = hci_conn_hash_lookup_handle(ctx->hdev,
1372 ctx->d.cpl.phy_handle);
1373 if (!conn)
1374 goto cpl_finished;
1375 result = 0;
1376 BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
1377 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
1378 conn->dst_id = ctx->d.cpl.remote_id;
1379 conn->out = 1;
1380 goto cpl_finished;
1381 break;
1382
1383 case AMP_CPL_PL_CANCEL:
1384 dev = (void *) skb->data;
1385 BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
1386 result = -EISCONN;
1387 goto cpl_finished;
1388 break;
1389
1390 default:
1391 goto cpl_finished;
1392 break;
1393 }
1394 return 0;
1395
1396cpl_finished:
1397 l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
1398 ctx->sk);
1399 if (ctx->sk)
1400 sock_put(ctx->sk);
1401 if (ctx->hdev)
1402 hci_dev_put(ctx->hdev);
1403 kfree(ctx->d.cpl.remote_assoc);
1404 kfree(ctx->d.cpl.local_assoc);
1405 return 1;
1406}
1407
1408static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
1409{
1410 struct a2mp_cmd_hdr *hdr = (void *) skb->data;
1411 struct a2mp_disconnphyslink_req *req;
1412 struct a2mp_disconnphyslink_rsp rsp;
1413 struct hci_dev *hdev;
1414 struct hci_conn *conn;
1415 struct amp_ctx *aplctx;
1416
1417 BT_DBG("mgr %p skb %p", mgr, skb);
1418 if (hdr->len < sizeof(*req))
1419 return -EINVAL;
1420 req = (void *) skb_pull(skb, sizeof(*hdr));
1421 skb_pull(skb, sizeof(*req));
1422
1423 rsp.local_id = req->remote_id;
1424 rsp.remote_id = req->local_id;
1425 rsp.status = 0;
1426 BT_DBG("local_id %d remote_id %d",
1427 (int) rsp.local_id, (int) rsp.remote_id);
1428 hdev = hci_dev_get(A2MP_HCI_ID(rsp.local_id));
1429 if (!hdev) {
1430 rsp.status = 1; /* Invalid Controller ID */
1431 goto dpl_finished;
1432 }
1433 BT_DBG("hdev %p", hdev);
1434 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1435 &mgr->l2cap_conn->hcon->dst);
1436 if (!conn) {
1437 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
1438 if (aplctx) {
1439 kill_ctx(aplctx);
1440 rsp.status = 0;
1441 goto dpl_finished;
1442 }
1443 rsp.status = 2; /* No Physical Link exists */
1444 goto dpl_finished;
1445 }
1446 BT_DBG("conn %p", conn);
1447 hci_disconnect(conn, 0x13);
1448
1449dpl_finished:
1450 send_a2mp_cmd(mgr, hdr->ident,
1451 A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
1452 if (hdev)
1453 hci_dev_put(hdev);
1454 return 0;
1455}
1456
1457static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
1458{
1459 struct amp_mgr *mgr = ctx->mgr;
1460 u8 finished = 0;
1461
1462 if (!mgr->connected)
1463 return 0;
1464
1465 switch (ctx->type) {
1466 case AMP_GETAMPASSOC:
1467 finished = getampassoc_handler(ctx, evt_type, data);
1468 break;
1469 case AMP_CREATEPHYSLINK:
1470 finished = createphyslink_handler(ctx, evt_type, data);
1471 break;
1472 case AMP_ACCEPTPHYSLINK:
1473 finished = acceptphyslink_handler(ctx, evt_type, data);
1474 break;
1475 }
1476
1477 if (!finished)
1478 mod_timer(&(ctx->timer), jiffies +
1479 msecs_to_jiffies(A2MP_RSP_TIMEOUT));
1480 else
1481 destroy_ctx(ctx);
1482 return finished;
1483}
1484
1485static int cancel_ctx(struct amp_ctx *ctx)
1486{
1487 return execute_ctx(ctx, AMP_CANCEL, 0);
1488}
1489
1490static int kill_ctx(struct amp_ctx *ctx)
1491{
1492 return execute_ctx(ctx, AMP_KILLED, 0);
1493}
1494
1495static void ctx_timeout_worker(struct work_struct *w)
1496{
1497 struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
1498 struct amp_ctx *ctx = work->ctx;
1499 kill_ctx(ctx);
1500 kfree(work);
1501}
1502
1503static void ctx_timeout(unsigned long data)
1504{
1505 struct amp_ctx *ctx = (struct amp_ctx *) data;
1506 struct amp_work_ctx_timeout *work;
1507
1508 BT_DBG("ctx %p", ctx);
1509 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1510 if (work) {
1511 INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
1512 work->ctx = ctx;
1513 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1514 kfree(work);
1515 }
1516}
1517
1518static void launch_ctx(struct amp_mgr *mgr)
1519{
1520 struct amp_ctx *ctx = NULL;
1521
1522 BT_DBG("mgr %p", mgr);
Peter Krystadf5289202011-11-14 15:11:22 -08001523 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524 if (!list_empty(&mgr->ctx_list))
1525 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -08001526 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001527 BT_DBG("ctx %p", ctx);
1528 if (ctx)
1529 execute_ctx(ctx, AMP_INIT, NULL);
1530}
1531
1532static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
1533{
1534 struct amp_ctx *ctx;
1535 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1536 u16 hdr_len = le16_to_cpu(hdr->len);
1537
1538 /* find context waiting for A2MP rsp with this rsp's identifier */
1539 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
1540 ctx = get_ctx_a2mp(mgr, hdr->ident);
1541 if (ctx) {
1542 execute_ctx(ctx, AMP_A2MP_RSP, skb);
1543 } else {
1544 BT_DBG("context not found");
1545 skb_pull(skb, sizeof(*hdr));
1546 if (hdr_len > skb->len)
1547 hdr_len = skb->len;
1548 skb_pull(skb, hdr_len);
1549 }
1550 return 0;
1551}
1552
1553/* L2CAP-A2MP interface */
1554
Peter Krystadf5289202011-11-14 15:11:22 -08001555static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556{
1557 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1558 int len;
1559 int err = 0;
1560 struct amp_mgr *mgr;
1561
1562 mgr = get_amp_mgr_sk(sk);
1563 if (!mgr)
1564 goto a2mp_finished;
1565
1566 len = skb->len;
1567 while (len >= sizeof(*hdr)) {
1568 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1569 u16 clen = le16_to_cpu(hdr->len);
1570
1571 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
1572 if (clen > len || !hdr->ident) {
1573 err = -EINVAL;
1574 break;
1575 }
1576 switch (hdr->code) {
1577 case A2MP_COMMAND_REJ:
1578 command_rej(mgr, skb);
1579 break;
1580 case A2MP_DISCOVER_REQ:
1581 err = discover_req(mgr, skb);
1582 break;
1583 case A2MP_CHANGE_NOTIFY:
1584 err = change_notify(mgr, skb);
1585 break;
1586 case A2MP_GETINFO_REQ:
1587 err = getinfo_req(mgr, skb);
1588 break;
1589 case A2MP_GETAMPASSOC_REQ:
1590 err = getampassoc_req(mgr, skb);
1591 break;
1592 case A2MP_CREATEPHYSLINK_REQ:
1593 err = createphyslink_req(mgr, skb);
1594 break;
1595 case A2MP_DISCONNPHYSLINK_REQ:
1596 err = disconnphyslink_req(mgr, skb);
1597 break;
1598 case A2MP_CHANGE_RSP:
1599 case A2MP_DISCOVER_RSP:
1600 case A2MP_GETINFO_RSP:
1601 case A2MP_GETAMPASSOC_RSP:
1602 case A2MP_CREATEPHYSLINK_RSP:
1603 case A2MP_DISCONNPHYSLINK_RSP:
1604 err = a2mp_rsp(mgr, skb);
1605 break;
1606 default:
1607 BT_ERR("Unknown A2MP signaling command 0x%2.2x",
1608 hdr->code);
1609 skb_pull(skb, sizeof(*hdr));
1610 err = -EINVAL;
1611 break;
1612 }
1613 len = skb->len;
1614 }
1615
1616a2mp_finished:
1617 if (err && mgr) {
1618 struct a2mp_cmd_rej rej;
1619 rej.reason = cpu_to_le16(0);
1620 send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
1621 sizeof(rej), &rej);
1622 }
1623}
1624
1625/* L2CAP-A2MP interface */
1626
1627static int send_a2mp(struct socket *sock, u8 *data, int len)
1628{
1629 struct kvec iv = { data, len };
1630 struct msghdr msg;
1631
1632 memset(&msg, 0, sizeof(msg));
1633
1634 return kernel_sendmsg(sock, &msg, &iv, 1, len);
1635}
1636
1637static void data_ready_worker(struct work_struct *w)
1638{
1639 struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
1640 struct sock *sk = work->sk;
1641 struct sk_buff *skb;
1642
1643 /* skb_dequeue() is thread-safe */
1644 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1645 a2mp_receive(sk, skb);
1646 kfree_skb(skb);
1647 }
1648 sock_put(work->sk);
1649 kfree(work);
1650}
1651
1652static void data_ready(struct sock *sk, int bytes)
1653{
1654 struct amp_work_data_ready *work;
1655 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1656 if (work) {
1657 INIT_WORK((struct work_struct *) work, data_ready_worker);
1658 sock_hold(sk);
1659 work->sk = sk;
1660 work->bytes = bytes;
1661 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1662 kfree(work);
1663 sock_put(sk);
1664 }
1665 }
1666}
1667
1668static void state_change_worker(struct work_struct *w)
1669{
1670 struct amp_work_state_change *work = (struct amp_work_state_change *) w;
1671 struct amp_mgr *mgr;
1672 switch (work->sk->sk_state) {
1673 case BT_CONNECTED:
1674 /* socket is up */
1675 BT_DBG("CONNECTED");
1676 mgr = get_amp_mgr_sk(work->sk);
1677 if (mgr) {
1678 mgr->connected = 1;
1679 if (mgr->skb) {
1680 l2cap_recv_deferred_frame(work->sk, mgr->skb);
1681 mgr->skb = NULL;
1682 }
1683 launch_ctx(mgr);
1684 }
1685 break;
1686
1687 case BT_CLOSED:
1688 /* connection is gone */
1689 BT_DBG("CLOSED");
1690 mgr = get_amp_mgr_sk(work->sk);
1691 if (mgr) {
1692 if (!sock_flag(work->sk, SOCK_DEAD))
1693 sock_release(mgr->a2mp_sock);
1694 mgr->a2mp_sock = NULL;
1695 remove_amp_mgr(mgr);
1696 }
1697 break;
1698
1699 default:
1700 /* something else happened */
1701 break;
1702 }
1703 sock_put(work->sk);
1704 kfree(work);
1705}
1706
1707static void state_change(struct sock *sk)
1708{
1709 struct amp_work_state_change *work;
1710 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1711 if (work) {
1712 INIT_WORK((struct work_struct *) work, state_change_worker);
1713 sock_hold(sk);
1714 work->sk = sk;
1715 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1716 kfree(work);
1717 sock_put(sk);
1718 }
1719 }
1720}
1721
1722static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
1723{
1724 int err;
1725 struct socket *sock;
1726 struct sockaddr_l2 addr;
1727 struct sock *sk;
1728 struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
1729 L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
1730 L2CAP_MODE_ERTM, 1, 0xFF, 1};
1731
1732
1733 err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
1734 BTPROTO_L2CAP, &sock);
1735
1736 if (err) {
1737 BT_ERR("sock_create_kern failed %d", err);
1738 return NULL;
1739 }
1740
1741 sk = sock->sk;
1742 sk->sk_data_ready = data_ready;
1743 sk->sk_state_change = state_change;
1744
1745 memset(&addr, 0, sizeof(addr));
1746 bacpy(&addr.l2_bdaddr, src);
1747 addr.l2_family = AF_BLUETOOTH;
1748 addr.l2_cid = L2CAP_CID_A2MP;
1749 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1750 if (err) {
1751 BT_ERR("kernel_bind failed %d", err);
1752 sock_release(sock);
1753 return NULL;
1754 }
1755
1756 l2cap_fixed_channel_config(sk, &opts);
1757
1758 memset(&addr, 0, sizeof(addr));
1759 bacpy(&addr.l2_bdaddr, dst);
1760 addr.l2_family = AF_BLUETOOTH;
1761 addr.l2_cid = L2CAP_CID_A2MP;
1762 err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
1763 O_NONBLOCK);
1764 if ((err == 0) || (err == -EINPROGRESS))
1765 return sock;
1766 else {
1767 BT_ERR("kernel_connect failed %d", err);
1768 sock_release(sock);
1769 return NULL;
1770 }
1771}
1772
1773static void conn_ind_worker(struct work_struct *w)
1774{
1775 struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
1776 struct l2cap_conn *conn = work->conn;
1777 struct sk_buff *skb = work->skb;
1778 struct amp_mgr *mgr;
1779
1780 mgr = get_create_amp_mgr(conn, skb);
1781 BT_DBG("mgr %p", mgr);
1782 kfree(work);
1783}
1784
1785static void create_physical_worker(struct work_struct *w)
1786{
1787 struct amp_work_create_physical *work =
1788 (struct amp_work_create_physical *) w;
1789
1790 create_physical(work->conn, work->sk);
1791 sock_put(work->sk);
1792 kfree(work);
1793}
1794
1795static void accept_physical_worker(struct work_struct *w)
1796{
1797 struct amp_work_accept_physical *work =
1798 (struct amp_work_accept_physical *) w;
1799
1800 accept_physical(work->conn, work->id, work->sk);
1801 sock_put(work->sk);
1802 kfree(work);
1803}
1804
1805/* L2CAP Fixed Channel interface */
1806
1807void amp_conn_ind(struct l2cap_conn *conn, struct sk_buff *skb)
1808{
1809 struct amp_work_conn_ind *work;
1810 BT_DBG("conn %p, skb %p", conn, skb);
1811 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1812 if (work) {
1813 INIT_WORK((struct work_struct *) work, conn_ind_worker);
1814 work->conn = conn;
1815 work->skb = skb;
1816 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1817 kfree(work);
1818 }
1819}
1820
1821/* L2CAP Physical Link interface */
1822
1823void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
1824{
1825 struct amp_work_create_physical *work;
1826 BT_DBG("conn %p", conn);
1827 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1828 if (work) {
1829 INIT_WORK((struct work_struct *) work, create_physical_worker);
1830 work->conn = conn;
1831 work->sk = sk;
1832 sock_hold(sk);
1833 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1834 sock_put(sk);
1835 kfree(work);
1836 }
1837 }
1838}
1839
1840void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
1841{
1842 struct amp_work_accept_physical *work;
1843 BT_DBG("conn %p", conn);
1844
1845 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1846 if (work) {
1847 INIT_WORK((struct work_struct *) work, accept_physical_worker);
1848 work->conn = conn;
1849 work->sk = sk;
1850 work->id = id;
1851 sock_hold(sk);
1852 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1853 sock_put(sk);
1854 kfree(work);
1855 }
1856 }
1857}
1858
1859/* HCI interface */
1860
1861static void amp_cmd_cmplt_worker(struct work_struct *w)
1862{
1863 struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
1864 struct hci_dev *hdev = work->hdev;
1865 u16 opcode = work->opcode;
1866 struct sk_buff *skb = work->skb;
1867 struct amp_ctx *ctx;
1868
1869 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
1870 if (ctx)
1871 execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
1872 kfree_skb(skb);
1873 kfree(w);
1874}
1875
1876static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
1877 struct sk_buff *skb)
1878{
1879 struct amp_work_cmd_cmplt *work;
1880 struct sk_buff *skbc;
1881 BT_DBG("hdev %p opcode 0x%x skb %p len %d",
1882 hdev, opcode, skb, skb->len);
1883 skbc = skb_clone(skb, GFP_ATOMIC);
1884 if (!skbc)
1885 return;
1886 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1887 if (work) {
1888 INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
1889 work->hdev = hdev;
1890 work->opcode = opcode;
1891 work->skb = skbc;
1892 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1893 kfree(work);
1894 }
1895}
1896
1897static void amp_cmd_status_worker(struct work_struct *w)
1898{
1899 struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
1900 struct hci_dev *hdev = work->hdev;
1901 u16 opcode = work->opcode;
1902 u8 status = work->status;
1903 struct amp_ctx *ctx;
1904
1905 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
1906 if (ctx)
1907 execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
1908 kfree(w);
1909}
1910
1911static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
1912{
1913 struct amp_work_cmd_status *work;
1914 BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
1915 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1916 if (work) {
1917 INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
1918 work->hdev = hdev;
1919 work->opcode = opcode;
1920 work->status = status;
1921 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1922 kfree(work);
1923 }
1924}
1925
1926static void amp_event_worker(struct work_struct *w)
1927{
1928 struct amp_work_event *work = (struct amp_work_event *) w;
1929 struct hci_dev *hdev = work->hdev;
1930 u8 event = work->event;
1931 struct sk_buff *skb = work->skb;
1932 struct amp_ctx *ctx;
1933
1934 if (event == HCI_EV_AMP_STATUS_CHANGE) {
1935 struct hci_ev_amp_status_change *ev;
1936 if (skb->len < sizeof(*ev))
1937 goto amp_event_finished;
1938 ev = (void *) skb->data;
1939 if (ev->status != 0)
1940 goto amp_event_finished;
1941 if (ev->amp_status == hdev->amp_status)
1942 goto amp_event_finished;
1943 hdev->amp_status = ev->amp_status;
1944 send_a2mp_change_notify();
1945 goto amp_event_finished;
1946 }
1947 ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
1948 if (ctx)
1949 execute_ctx(ctx, AMP_HCI_EVENT, skb);
1950
1951amp_event_finished:
1952 kfree_skb(skb);
1953 kfree(w);
1954}
1955
1956static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
1957{
1958 struct amp_work_event *work;
1959 struct sk_buff *skbc;
1960 BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
1961 skbc = skb_clone(skb, GFP_ATOMIC);
1962 if (!skbc)
1963 return;
1964 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1965 if (work) {
1966 INIT_WORK((struct work_struct *) work, amp_event_worker);
1967 work->hdev = hdev;
1968 work->event = event;
1969 work->skb = skbc;
1970 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1971 kfree(work);
1972 }
1973}
1974
1975static void amp_dev_event_worker(struct work_struct *w)
1976{
1977 send_a2mp_change_notify();
1978 kfree(w);
1979}
1980
1981static int amp_dev_event(struct notifier_block *this, unsigned long event,
1982 void *ptr)
1983{
1984 struct hci_dev *hdev = (struct hci_dev *) ptr;
1985 struct amp_work_event *work;
1986
1987 if (hdev->amp_type == HCI_BREDR)
1988 return NOTIFY_DONE;
1989
1990 switch (event) {
1991 case HCI_DEV_UNREG:
1992 case HCI_DEV_REG:
1993 case HCI_DEV_UP:
1994 case HCI_DEV_DOWN:
1995 BT_DBG("hdev %p event %ld", hdev, event);
1996 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1997 if (work) {
1998 INIT_WORK((struct work_struct *) work,
1999 amp_dev_event_worker);
2000 if (queue_work(amp_workqueue,
2001 (struct work_struct *) work) == 0)
2002 kfree(work);
2003 }
2004 }
2005 return NOTIFY_DONE;
2006}
2007
2008
2009/* L2CAP module init continued */
2010
2011static struct notifier_block amp_notifier = {
2012 .notifier_call = amp_dev_event
2013};
2014
2015static struct amp_mgr_cb hci_amp = {
2016 .amp_cmd_complete_event = amp_cmd_cmplt_evt,
2017 .amp_cmd_status_event = amp_cmd_status_evt,
2018 .amp_event = amp_evt
2019};
2020
2021int amp_init(void)
2022{
2023 hci_register_amp(&hci_amp);
2024 hci_register_notifier(&amp_notifier);
2025 amp_next_handle = 1;
2026 amp_workqueue = create_singlethread_workqueue("a2mp");
2027 if (!amp_workqueue)
2028 return -EPERM;
2029 return 0;
2030}
2031
2032void amp_exit(void)
2033{
2034 hci_unregister_amp(&hci_amp);
2035 hci_unregister_notifier(&amp_notifier);
2036 flush_workqueue(amp_workqueue);
2037 destroy_workqueue(amp_workqueue);
2038}