blob: 4918caab34de143577a3d0c99df701b293270cec [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18
19#include <linux/skbuff.h>
20#include <linux/list.h>
21#include <linux/workqueue.h>
22#include <linux/timer.h>
23
24#include <linux/crypto.h>
25#include <linux/scatterlist.h>
26#include <linux/err.h>
27#include <crypto/hash.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/amp.h>
33
34static struct workqueue_struct *amp_workqueue;
35
36LIST_HEAD(amp_mgr_list);
37DEFINE_RWLOCK(amp_mgr_list_lock);
38
39static int send_a2mp(struct socket *sock, u8 *data, int len);
40
41static void ctx_timeout(unsigned long data);
42
43static void launch_ctx(struct amp_mgr *mgr);
44static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
45static int kill_ctx(struct amp_ctx *ctx);
46static int cancel_ctx(struct amp_ctx *ctx);
47
48static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
49
50static void remove_amp_mgr(struct amp_mgr *mgr)
51{
52 BT_DBG("mgr %p", mgr);
53
Peter Krystadf5289202011-11-14 15:11:22 -080054 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 list_del(&mgr->list);
Peter Krystadf5289202011-11-14 15:11:22 -080056 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
Peter Krystadf5289202011-11-14 15:11:22 -080058 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 while (!list_empty(&mgr->ctx_list)) {
60 struct amp_ctx *ctx;
61 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -080062 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 BT_DBG("kill ctx %p", ctx);
64 kill_ctx(ctx);
Peter Krystadf5289202011-11-14 15:11:22 -080065 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 }
Peter Krystadf5289202011-11-14 15:11:22 -080067 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69 kfree(mgr->ctrls);
70
71 kfree(mgr);
72}
73
74static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
75{
76 struct amp_mgr *mgr;
77 struct amp_mgr *found = NULL;
78
Peter Krystadf5289202011-11-14 15:11:22 -080079 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 list_for_each_entry(mgr, &amp_mgr_list, list) {
81 if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
82 found = mgr;
83 break;
84 }
85 }
Peter Krystadf5289202011-11-14 15:11:22 -080086 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 return found;
88}
89
90static struct amp_mgr *get_create_amp_mgr(struct l2cap_conn *conn,
91 struct sk_buff *skb)
92{
93 struct amp_mgr *mgr;
94
Peter Krystadf5289202011-11-14 15:11:22 -080095 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 list_for_each_entry(mgr, &amp_mgr_list, list) {
97 if (mgr->l2cap_conn == conn) {
98 BT_DBG("conn %p found %p", conn, mgr);
Peter Krystadf5289202011-11-14 15:11:22 -080099 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 goto gc_finished;
101 }
102 }
Peter Krystadf5289202011-11-14 15:11:22 -0800103 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104
105 mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
106 if (!mgr)
Peter Krystadf5289202011-11-14 15:11:22 -0800107 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
109 mgr->l2cap_conn = conn;
110 mgr->next_ident = 1;
111 INIT_LIST_HEAD(&mgr->ctx_list);
112 rwlock_init(&mgr->ctx_list_lock);
113 mgr->skb = skb;
114 BT_DBG("conn %p mgr %p", conn, mgr);
115 mgr->a2mp_sock = open_fixed_channel(conn->src, conn->dst);
116 if (!mgr->a2mp_sock) {
117 kfree(mgr);
Peter Krystadf5289202011-11-14 15:11:22 -0800118 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 }
Peter Krystadf5289202011-11-14 15:11:22 -0800120 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121 list_add(&(mgr->list), &amp_mgr_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800122 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124gc_finished:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 return mgr;
126}
127
128static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
129{
130 if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
131 return mgr->ctrls;
132 else
133 return NULL;
134}
135
136static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
137{
138 struct amp_ctrl *ctrl;
139
140 BT_DBG("mgr %p, id %d", mgr, id);
141 if ((mgr->ctrls) && (mgr->ctrls->id == id))
142 ctrl = mgr->ctrls;
143 else {
144 kfree(mgr->ctrls);
145 ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
146 if (ctrl) {
147 ctrl->mgr = mgr;
148 ctrl->id = id;
149 }
150 mgr->ctrls = ctrl;
151 }
152
153 return ctrl;
154}
155
156static struct amp_ctx *create_ctx(u8 type, u8 state)
157{
158 struct amp_ctx *ctx = NULL;
159
160 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
161 if (ctx) {
162 ctx->type = type;
163 ctx->state = state;
164 init_timer(&(ctx->timer));
165 ctx->timer.function = ctx_timeout;
166 ctx->timer.data = (unsigned long) ctx;
167 }
168 BT_DBG("ctx %p, type %d", ctx, type);
169 return ctx;
170}
171
172static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
173{
174 BT_DBG("ctx %p", ctx);
Peter Krystadf5289202011-11-14 15:11:22 -0800175 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 list_add(&ctx->list, &mgr->ctx_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800177 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 ctx->mgr = mgr;
179 execute_ctx(ctx, AMP_INIT, 0);
180}
181
182static void destroy_ctx(struct amp_ctx *ctx)
183{
184 struct amp_mgr *mgr = ctx->mgr;
185
186 BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
187 del_timer(&ctx->timer);
Peter Krystadf5289202011-11-14 15:11:22 -0800188 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 list_del(&ctx->list);
Peter Krystadf5289202011-11-14 15:11:22 -0800190 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 if (ctx->deferred)
192 execute_ctx(ctx->deferred, AMP_INIT, 0);
193 kfree(ctx);
194}
195
196static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
197{
198 struct amp_ctx *fnd = NULL;
199 struct amp_ctx *ctx;
200
Peter Krystadf5289202011-11-14 15:11:22 -0800201 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 list_for_each_entry(ctx, &mgr->ctx_list, list) {
203 if (ctx->type == type) {
204 fnd = ctx;
205 break;
206 }
207 }
Peter Krystadf5289202011-11-14 15:11:22 -0800208 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 return fnd;
210}
211
212static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
213{
214 struct amp_mgr *mgr = cur->mgr;
215 struct amp_ctx *fnd = NULL;
216 struct amp_ctx *ctx;
217
Peter Krystadf5289202011-11-14 15:11:22 -0800218 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 list_for_each_entry(ctx, &mgr->ctx_list, list) {
220 if ((ctx->type == type) && (ctx != cur)) {
221 fnd = ctx;
222 break;
223 }
224 }
Peter Krystadf5289202011-11-14 15:11:22 -0800225 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 return fnd;
227}
228
229static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
230{
231 struct amp_ctx *fnd = NULL;
232 struct amp_ctx *ctx;
233
Peter Krystadf5289202011-11-14 15:11:22 -0800234 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 list_for_each_entry(ctx, &mgr->ctx_list, list) {
236 if ((ctx->evt_type & AMP_A2MP_RSP) &&
237 (ctx->rsp_ident == ident)) {
238 fnd = ctx;
239 break;
240 }
241 }
Peter Krystadf5289202011-11-14 15:11:22 -0800242 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 return fnd;
244}
245
246static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
247 u16 evt_value)
248{
249 struct amp_mgr *mgr;
250 struct amp_ctx *fnd = NULL;
251
Peter Krystadf5289202011-11-14 15:11:22 -0800252 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 list_for_each_entry(mgr, &amp_mgr_list, list) {
254 struct amp_ctx *ctx;
Peter Krystadf5289202011-11-14 15:11:22 -0800255 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 list_for_each_entry(ctx, &mgr->ctx_list, list) {
257 struct hci_dev *ctx_hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800258 ctx_hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
260 switch (evt_type) {
261 case AMP_HCI_CMD_STATUS:
262 case AMP_HCI_CMD_CMPLT:
263 if (ctx->opcode == evt_value)
264 fnd = ctx;
265 break;
266 case AMP_HCI_EVENT:
267 if (ctx->evt_code == (u8) evt_value)
268 fnd = ctx;
269 break;
270 }
271 }
272 if (ctx_hdev)
273 hci_dev_put(ctx_hdev);
274
275 if (fnd)
276 break;
277 }
Peter Krystadf5289202011-11-14 15:11:22 -0800278 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 }
Peter Krystadf5289202011-11-14 15:11:22 -0800280 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281 return fnd;
282}
283
284static inline u8 next_ident(struct amp_mgr *mgr)
285{
286 if (++mgr->next_ident == 0)
287 mgr->next_ident = 1;
288 return mgr->next_ident;
289}
290
291static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
292 u16 len, void *data, u16 len2, void *data2)
293{
294 struct a2mp_cmd_hdr *hdr;
295 int plen;
296 u8 *p, *cmd;
297
298 BT_DBG("ident %d code 0x%02x", ident, code);
299 if (!mgr->a2mp_sock)
300 return;
301 plen = sizeof(*hdr) + len + len2;
302 cmd = kzalloc(plen, GFP_ATOMIC);
303 if (!cmd)
304 return;
305 hdr = (struct a2mp_cmd_hdr *) cmd;
306 hdr->code = code;
307 hdr->ident = ident;
308 hdr->len = cpu_to_le16(len+len2);
309 p = cmd + sizeof(*hdr);
310 memcpy(p, data, len);
311 p += len;
312 memcpy(p, data2, len2);
313 send_a2mp(mgr->a2mp_sock, cmd, plen);
314 kfree(cmd);
315}
316
317static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
318 u8 code, u16 len, void *data)
319{
320 send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
321}
322
323static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
324{
325 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
326 struct a2mp_cmd_rej *rej;
327 struct amp_ctx *ctx;
328
329 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
330 rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
331 if (skb->len < sizeof(*rej))
332 return -EINVAL;
333 BT_DBG("reason %d", le16_to_cpu(rej->reason));
334 ctx = get_ctx_a2mp(mgr, hdr->ident);
335 if (ctx)
336 kill_ctx(ctx);
337 skb_pull(skb, sizeof(*rej));
338 return 0;
339}
340
341static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
342 void *msg)
343{
344 struct a2mp_cl clist[16];
345 struct a2mp_cl *cl;
346 struct hci_dev *hdev;
347 int num_ctrls = 1, id;
348
349 cl = clist;
350 cl->id = 0;
351 cl->type = 0;
352 cl->status = 1;
353
354 for (id = 0; id < 16; ++id) {
355 hdev = hci_dev_get(id);
356 if (hdev) {
357 if ((hdev->amp_type != HCI_BREDR) &&
358 test_bit(HCI_UP, &hdev->flags)) {
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800359 (cl + num_ctrls)->id = hdev->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 (cl + num_ctrls)->type = hdev->amp_type;
361 (cl + num_ctrls)->status = hdev->amp_status;
362 ++num_ctrls;
363 }
364 hci_dev_put(hdev);
365 }
366 }
367 send_a2mp_cmd2(mgr, ident, code, len, msg,
368 num_ctrls*sizeof(*cl), clist);
369
370 return 0;
371}
372
373static void send_a2mp_change_notify(void)
374{
375 struct amp_mgr *mgr;
376
Peter Krystadf5289202011-11-14 15:11:22 -0800377 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 list_for_each_entry(mgr, &amp_mgr_list, list) {
379 if (mgr->discovered)
380 send_a2mp_cl(mgr, next_ident(mgr),
381 A2MP_CHANGE_NOTIFY, 0, NULL);
382 }
Peter Krystadf5289202011-11-14 15:11:22 -0800383 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384}
385
386static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
387{
388 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
389 struct a2mp_discover_req *req;
390 u16 *efm;
391 struct a2mp_discover_rsp rsp;
392
393 req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
394 if (skb->len < sizeof(*req))
395 return -EINVAL;
396 efm = (u16 *) skb_pull(skb, sizeof(*req));
397
398 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
399 le16_to_cpu(req->ext_feat));
400
401 while (le16_to_cpu(req->ext_feat) & 0x8000) {
402 if (skb->len < sizeof(*efm))
403 return -EINVAL;
404 req->ext_feat = *efm;
405 BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
406 efm = (u16 *) skb_pull(skb, sizeof(*efm));
407 }
408
409 rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
410 rsp.ext_feat = 0;
411
412 mgr->discovered = 1;
413
414 return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
415 sizeof(rsp), &rsp);
416}
417
418static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
419{
420 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
421 struct a2mp_cl *cl;
422
423 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
424 while (skb->len >= sizeof(*cl)) {
425 struct amp_ctrl *ctrl;
426 if (cl->id != 0) {
427 ctrl = get_create_ctrl(mgr, cl->id);
428 if (ctrl != NULL) {
429 ctrl->type = cl->type;
430 ctrl->status = cl->status;
431 }
432 }
433 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
434 }
435
436 /* TODO find controllers in manager that were not on received */
437 /* controller list and destroy them */
438 send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
439
440 return 0;
441}
442
443static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
444{
445 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
446 u8 *data;
447 int id;
448 struct hci_dev *hdev;
449 struct a2mp_getinfo_rsp rsp;
450
451 data = (u8 *) skb_pull(skb, sizeof(*hdr));
452 if (le16_to_cpu(hdr->len) < sizeof(*data))
453 return -EINVAL;
454 if (skb->len < sizeof(*data))
455 return -EINVAL;
456 id = *data;
457 skb_pull(skb, sizeof(*data));
458 rsp.id = id;
459 rsp.status = 1;
460
461 BT_DBG("id %d", id);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800462 hdev = hci_dev_get(id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463
464 if (hdev && hdev->amp_type != HCI_BREDR) {
465 rsp.status = 0;
466 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
467 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
468 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
469 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
470 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
471 }
472
473 send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
474
475 if (hdev)
476 hci_dev_put(hdev);
477
478 return 0;
479}
480
481static void create_physical(struct l2cap_conn *conn, struct sock *sk)
482{
483 struct amp_mgr *mgr;
484 struct amp_ctx *ctx = NULL;
485
486 BT_DBG("conn %p", conn);
487 mgr = get_create_amp_mgr(conn, NULL);
488 if (!mgr)
489 goto cp_finished;
490 BT_DBG("mgr %p", mgr);
491 ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
492 if (!ctx)
493 goto cp_finished;
494 ctx->sk = sk;
495 sock_hold(sk);
496 start_ctx(mgr, ctx);
497 return;
498
499cp_finished:
500 l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
501}
502
503static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
504{
505 struct amp_mgr *mgr;
506 struct hci_dev *hdev;
507 struct hci_conn *conn;
508 struct amp_ctx *aplctx = NULL;
509 u8 remote_id = 0;
510 int result = -EINVAL;
511
512 BT_DBG("lcon %p", lcon);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800513 hdev = hci_dev_get(id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 if (!hdev)
515 goto ap_finished;
516 BT_DBG("hdev %p", hdev);
Peter Krystadf7dcc792011-11-14 15:11:58 -0800517 mgr = get_create_amp_mgr(lcon, NULL);
518 if (!mgr)
519 goto ap_finished;
520 BT_DBG("mgr %p", mgr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
522 &mgr->l2cap_conn->hcon->dst);
523 if (conn) {
524 BT_DBG("conn %p", hdev);
525 result = 0;
526 remote_id = conn->dst_id;
527 goto ap_finished;
528 }
529 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
530 if (!aplctx)
531 goto ap_finished;
532 aplctx->sk = sk;
533 sock_hold(sk);
534 return;
535
536ap_finished:
Peter Krystadf7dcc792011-11-14 15:11:58 -0800537 if (hdev)
538 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 l2cap_amp_physical_complete(result, id, remote_id, sk);
540}
541
542static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
543{
544 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
545 struct amp_ctx *ctx;
546 struct a2mp_getampassoc_req *req;
547
548 if (hdr->len < sizeof(*req))
549 return -EINVAL;
550 req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
551 skb_pull(skb, sizeof(*req));
552
553 ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
554 if (!ctx)
555 return -ENOMEM;
556 ctx->id = req->id;
557 ctx->d.gaa.req_ident = hdr->ident;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800558 ctx->hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 if (ctx->hdev)
560 ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
561 GFP_ATOMIC);
562 start_ctx(mgr, ctx);
563 return 0;
564}
565
566static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
567{
568 struct sk_buff *skb = (struct sk_buff *) data;
569 struct hci_cp_read_local_amp_assoc cp;
570 struct hci_rp_read_local_amp_assoc *rp;
571 struct a2mp_getampassoc_rsp rsp;
572 u16 rem_len;
573 u16 frag_len;
574
575 rsp.status = 1;
576 if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
577 goto gaa_finished;
578
579 switch (ctx->state) {
580 case AMP_GAA_INIT:
581 ctx->state = AMP_GAA_RLAA_COMPLETE;
582 ctx->evt_type = AMP_HCI_CMD_CMPLT;
583 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
584 ctx->d.gaa.len_so_far = 0;
585 cp.phy_handle = 0;
586 cp.len_so_far = 0;
587 cp.max_len = ctx->hdev->amp_assoc_size;
588 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
589 break;
590
591 case AMP_GAA_RLAA_COMPLETE:
592 if (skb->len < 4)
593 goto gaa_finished;
594 rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
595 if (rp->status)
596 goto gaa_finished;
597 rem_len = le16_to_cpu(rp->rem_len);
598 skb_pull(skb, 4);
599 frag_len = skb->len;
600
601 if (ctx->d.gaa.len_so_far + rem_len <=
602 ctx->hdev->amp_assoc_size) {
603 struct hci_cp_read_local_amp_assoc cp;
604 u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
605 memcpy(assoc, rp->frag, frag_len);
606 ctx->d.gaa.len_so_far += rem_len;
607 rem_len -= frag_len;
608 if (rem_len == 0) {
609 rsp.status = 0;
610 goto gaa_finished;
611 }
612 /* more assoc data to read */
613 cp.phy_handle = 0;
614 cp.len_so_far = ctx->d.gaa.len_so_far;
615 cp.max_len = ctx->hdev->amp_assoc_size;
616 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
617 }
618 break;
619
620 default:
621 goto gaa_finished;
622 break;
623 }
624 return 0;
625
626gaa_finished:
627 rsp.id = ctx->id;
628 send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
629 sizeof(rsp), &rsp,
630 ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
631 kfree(ctx->d.gaa.assoc);
632 if (ctx->hdev)
633 hci_dev_put(ctx->hdev);
634 return 1;
635}
636
637struct hmac_sha256_result {
638 struct completion completion;
639 int err;
640};
641
642static void hmac_sha256_final(struct crypto_async_request *req, int err)
643{
644 struct hmac_sha256_result *r = req->data;
645 if (err == -EINPROGRESS)
646 return;
647 r->err = err;
648 complete(&r->completion);
649}
650
651int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
652 u8 *output, u8 outlen)
653{
654 int ret = 0;
655 struct crypto_ahash *tfm;
656 struct scatterlist sg;
657 struct ahash_request *req;
658 struct hmac_sha256_result tresult;
659 void *hash_buff = NULL;
660
661 unsigned char hash_result[64];
662 int i;
663
664 memset(output, 0, outlen);
665
666 init_completion(&tresult.completion);
667
668 tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
669 CRYPTO_ALG_TYPE_AHASH_MASK);
670 if (IS_ERR(tfm)) {
671 BT_DBG("crypto_alloc_ahash failed");
672 ret = PTR_ERR(tfm);
673 goto err_tfm;
674 }
675
676 req = ahash_request_alloc(tfm, GFP_KERNEL);
677 if (!req) {
678 BT_DBG("failed to allocate request for hmac(sha256)");
679 ret = -ENOMEM;
680 goto err_req;
681 }
682
683 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
684 hmac_sha256_final, &tresult);
685
686 hash_buff = kzalloc(psize, GFP_KERNEL);
687 if (!hash_buff) {
688 BT_DBG("failed to kzalloc hash_buff");
689 ret = -ENOMEM;
690 goto err_hash_buf;
691 }
692
693 memset(hash_result, 0, 64);
694 memcpy(hash_buff, plaintext, psize);
695 sg_init_one(&sg, hash_buff, psize);
696
697 if (ksize) {
698 crypto_ahash_clear_flags(tfm, ~0);
699 ret = crypto_ahash_setkey(tfm, key, ksize);
700
701 if (ret) {
702 BT_DBG("crypto_ahash_setkey failed");
703 goto err_setkey;
704 }
705 }
706
707 ahash_request_set_crypt(req, &sg, hash_result, psize);
708 ret = crypto_ahash_digest(req);
709
710 BT_DBG("ret 0x%x", ret);
711
712 switch (ret) {
713 case 0:
714 for (i = 0; i < outlen; i++)
715 output[i] = hash_result[i];
716 break;
717 case -EINPROGRESS:
718 case -EBUSY:
719 ret = wait_for_completion_interruptible(&tresult.completion);
720 if (!ret && !tresult.err) {
721 INIT_COMPLETION(tresult.completion);
722 break;
723 } else {
724 BT_DBG("wait_for_completion_interruptible failed");
725 if (!ret)
726 ret = tresult.err;
727 goto out;
728 }
729 default:
730 goto out;
731 }
732
733out:
734err_setkey:
735 kfree(hash_buff);
736err_hash_buf:
737 ahash_request_free(req);
738err_req:
739 crypto_free_ahash(tfm);
740err_tfm:
741 return ret;
742}
743
744static void show_key(u8 *k)
745{
746 int i = 0;
747 for (i = 0; i < 32; i += 8)
748 BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
749 *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
750 *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
751}
752
753static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
754{
755 u8 bt2_key[32];
756 u8 gamp_key[32];
757 u8 b802_key[32];
758 int result;
759
760 if (!hci_conn_check_link_mode(conn))
761 return -EACCES;
762
763 BT_DBG("key_type %d", conn->key_type);
764 if (conn->key_type < 3)
765 return -EACCES;
766
767 *type = conn->key_type;
768 *len = 32;
769 memcpy(&bt2_key[0], conn->link_key, 16);
770 memcpy(&bt2_key[16], conn->link_key, 16);
771 result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
772 if (result)
773 goto ps_finished;
774
775 if (conn->key_type == 3) {
776 BT_DBG("gamp_key");
777 show_key(gamp_key);
778 memcpy(data, gamp_key, 32);
779 goto ps_finished;
780 }
781
782 result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
783 if (result)
784 goto ps_finished;
785
786 BT_DBG("802b_key");
787 show_key(b802_key);
788 memcpy(data, b802_key, 32);
789
790ps_finished:
791 return result;
792}
793
794static u8 amp_next_handle;
795static inline u8 physlink_handle(struct hci_dev *hdev)
796{
797 /* TODO amp_next_handle should be part of hci_dev */
798 if (amp_next_handle == 0)
799 amp_next_handle = 1;
800 return amp_next_handle++;
801}
802
803/* Start an Accept Physical Link sequence */
804static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
805{
806 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
807 struct amp_ctx *ctx = NULL;
808 struct a2mp_createphyslink_req *req;
809
810 if (hdr->len < sizeof(*req))
811 return -EINVAL;
812 req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
813 skb_pull(skb, sizeof(*req));
814 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
815
816 /* initialize the context */
817 ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
818 if (!ctx)
819 return -ENOMEM;
820 ctx->d.apl.req_ident = hdr->ident;
821 ctx->d.apl.remote_id = req->local_id;
822 ctx->id = req->remote_id;
823
824 /* add the supplied remote assoc to the context */
825 ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
826 if (ctx->d.apl.remote_assoc)
827 memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
828 ctx->d.apl.len_so_far = 0;
829 ctx->d.apl.rem_len = skb->len;
830 skb_pull(skb, skb->len);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800831 ctx->hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 start_ctx(mgr, ctx);
833 return 0;
834}
835
836static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
837{
838 struct sk_buff *skb = data;
839 struct hci_cp_accept_phys_link acp;
840 struct hci_cp_write_remote_amp_assoc wcp;
841 struct hci_rp_write_remote_amp_assoc *wrp;
842 struct hci_ev_cmd_status *cs = data;
843 struct hci_ev_phys_link_complete *ev;
844 struct a2mp_createphyslink_rsp rsp;
845 struct amp_ctx *cplctx;
846 struct amp_ctx *aplctx;
847 u16 frag_len;
848 struct hci_conn *conn;
849 int result;
850
851 BT_DBG("state %d", ctx->state);
852 result = -EINVAL;
853 rsp.status = 1; /* Invalid Controller ID */
854 if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
855 goto apl_finished;
856 if (evt_type == AMP_KILLED) {
857 result = -EAGAIN;
858 rsp.status = 4; /* Disconnect request received */
859 goto apl_finished;
860 }
861 if (!ctx->d.apl.remote_assoc) {
862 result = -ENOMEM;
863 rsp.status = 2; /* Unable to Start */
864 goto apl_finished;
865 }
866
867 switch (ctx->state) {
868 case AMP_APL_INIT:
869 BT_DBG("local_id %d, remote_id %d",
870 ctx->id, ctx->d.apl.remote_id);
871 conn = hci_conn_hash_lookup_id(ctx->hdev,
872 &ctx->mgr->l2cap_conn->hcon->dst,
873 ctx->d.apl.remote_id);
874 if (conn) {
875 result = -EEXIST;
876 rsp.status = 5; /* Already Exists */
877 goto apl_finished;
878 }
879
880 aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
881 if ((aplctx) &&
882 (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
883 BT_DBG("deferred to %p", aplctx);
884 aplctx->deferred = ctx;
885 break;
886 }
887
888 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
889 if ((cplctx) &&
890 (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
891 struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
892 BT_DBG("local %s remote %s",
893 batostr(&bcon->hdev->bdaddr),
894 batostr(&bcon->dst));
895 if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
896 (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
897 BT_DBG("COLLISION LOSER");
898 cplctx->deferred = ctx;
899 cancel_ctx(cplctx);
900 break;
901 } else {
902 BT_DBG("COLLISION WINNER");
903 result = -EISCONN;
904 rsp.status = 3; /* Collision */
905 goto apl_finished;
906 }
907 }
908
909 result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
910 &acp.key_len, &acp.type);
911 if (result) {
912 BT_DBG("SECURITY");
913 rsp.status = 6; /* Security Violation */
914 goto apl_finished;
915 }
916
917 ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
918 ctx->state = AMP_APL_APL_STATUS;
919 ctx->evt_type = AMP_HCI_CMD_STATUS;
920 ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
921 acp.phy_handle = ctx->d.apl.phy_handle;
922 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
923 break;
924
925 case AMP_APL_APL_STATUS:
926 if (cs->status != 0)
927 goto apl_finished;
928 /* PAL will accept link, send a2mp response */
929 rsp.local_id = ctx->id;
930 rsp.remote_id = ctx->d.apl.remote_id;
931 rsp.status = 0;
932 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
933 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
934
935 /* send the first assoc fragment */
936 wcp.phy_handle = ctx->d.apl.phy_handle;
937 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
938 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
939 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
940 memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
941 ctx->state = AMP_APL_WRA_COMPLETE;
942 ctx->evt_type = AMP_HCI_CMD_CMPLT;
943 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
944 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
945 break;
946
947 case AMP_APL_WRA_COMPLETE:
948 /* received write remote amp assoc command complete event */
949 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
950 if (wrp->status != 0)
951 goto apl_finished;
952 if (wrp->phy_handle != ctx->d.apl.phy_handle)
953 goto apl_finished;
954 /* update progress */
955 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
956 ctx->d.apl.len_so_far += frag_len;
957 ctx->d.apl.rem_len -= frag_len;
958 if (ctx->d.apl.rem_len > 0) {
959 u8 *assoc;
960 /* another assoc fragment to send */
961 wcp.phy_handle = ctx->d.apl.phy_handle;
962 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
963 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
964 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
965 assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
966 memcpy(wcp.frag, assoc, frag_len);
967 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
968 break;
969 }
970 /* wait for physical link complete event */
971 ctx->state = AMP_APL_PL_COMPLETE;
972 ctx->evt_type = AMP_HCI_EVENT;
973 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
974 break;
975
976 case AMP_APL_PL_COMPLETE:
977 /* physical link complete event received */
978 if (skb->len < sizeof(*ev))
979 goto apl_finished;
980 ev = (struct hci_ev_phys_link_complete *) skb->data;
981 if (ev->phy_handle != ctx->d.apl.phy_handle)
982 break;
983 if (ev->status != 0)
984 goto apl_finished;
985 conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
986 if (!conn)
987 goto apl_finished;
988 result = 0;
989 BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
990 conn->dst_id = ctx->d.apl.remote_id;
991 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
992 goto apl_finished;
993 break;
994
995 default:
996 goto apl_finished;
997 break;
998 }
999 return 0;
1000
1001apl_finished:
1002 if (ctx->sk)
1003 l2cap_amp_physical_complete(result, ctx->id,
1004 ctx->d.apl.remote_id, ctx->sk);
1005 if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
1006 rsp.local_id = ctx->id;
1007 rsp.remote_id = ctx->d.apl.remote_id;
1008 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
1009 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
1010 }
1011 kfree(ctx->d.apl.remote_assoc);
1012 if (ctx->sk)
1013 sock_put(ctx->sk);
1014 if (ctx->hdev)
1015 hci_dev_put(ctx->hdev);
1016 return 1;
1017}
1018
1019static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
1020{
1021 struct hci_cp_disconn_phys_link dcp;
1022
1023 ctx->state = AMP_CPL_PL_CANCEL;
1024 ctx->evt_type = AMP_HCI_EVENT;
1025 ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
1026 dcp.phy_handle = ctx->d.cpl.phy_handle;
1027 dcp.reason = reason;
1028 hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
1029}
1030
1031static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
1032{
1033 struct amp_ctrl *ctrl;
1034 struct sk_buff *skb = data;
1035 struct a2mp_cmd_hdr *hdr;
1036 struct hci_ev_cmd_status *cs = data;
1037 struct amp_ctx *cplctx;
1038 struct a2mp_discover_req dreq;
1039 struct a2mp_discover_rsp *drsp;
1040 u16 *efm;
1041 struct a2mp_getinfo_req greq;
1042 struct a2mp_getinfo_rsp *grsp;
1043 struct a2mp_cl *cl;
1044 struct a2mp_getampassoc_req areq;
1045 struct a2mp_getampassoc_rsp *arsp;
1046 struct hci_cp_create_phys_link cp;
1047 struct hci_cp_write_remote_amp_assoc wcp;
1048 struct hci_rp_write_remote_amp_assoc *wrp;
1049 struct hci_ev_channel_selected *cev;
1050 struct hci_cp_read_local_amp_assoc rcp;
1051 struct hci_rp_read_local_amp_assoc *rrp;
1052 struct a2mp_createphyslink_req creq;
1053 struct a2mp_createphyslink_rsp *crsp;
1054 struct hci_ev_phys_link_complete *pev;
1055 struct hci_ev_disconn_phys_link_complete *dev;
1056 u8 *assoc, *rassoc, *lassoc;
1057 u16 frag_len;
1058 u16 rem_len;
1059 int result = -EAGAIN;
1060 struct hci_conn *conn;
1061
1062 BT_DBG("state %d", ctx->state);
1063 if (evt_type == AMP_KILLED)
1064 goto cpl_finished;
1065
1066 if (evt_type == AMP_CANCEL) {
1067 if ((ctx->state < AMP_CPL_CPL_STATUS) ||
1068 ((ctx->state == AMP_CPL_PL_COMPLETE) &&
1069 !(ctx->evt_type & AMP_HCI_EVENT)))
1070 goto cpl_finished;
1071
1072 cancel_cpl_ctx(ctx, 0x16);
1073 return 0;
1074 }
1075
1076 switch (ctx->state) {
1077 case AMP_CPL_INIT:
1078 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
1079 if (cplctx) {
1080 BT_DBG("deferred to %p", cplctx);
1081 cplctx->deferred = ctx;
1082 break;
1083 }
1084 ctx->state = AMP_CPL_DISC_RSP;
1085 ctx->evt_type = AMP_A2MP_RSP;
1086 ctx->rsp_ident = next_ident(ctx->mgr);
1087 dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
1088 dreq.ext_feat = 0;
1089 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
1090 sizeof(dreq), &dreq);
1091 break;
1092
1093 case AMP_CPL_DISC_RSP:
1094 drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
1095 if (skb->len < (sizeof(*drsp))) {
1096 result = -EINVAL;
1097 goto cpl_finished;
1098 }
1099
1100 efm = (u16 *) skb_pull(skb, sizeof(*drsp));
1101 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
1102 le16_to_cpu(drsp->ext_feat));
1103
1104 while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
1105 if (skb->len < sizeof(*efm)) {
1106 result = -EINVAL;
1107 goto cpl_finished;
1108 }
1109 drsp->ext_feat = *efm;
1110 BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
1111 efm = (u16 *) skb_pull(skb, sizeof(*efm));
1112 }
1113 cl = (struct a2mp_cl *) efm;
1114
1115 /* find the first remote and local controller with the
1116 * same type
1117 */
1118 greq.id = 0;
1119 result = -ENODEV;
1120 while (skb->len >= sizeof(*cl)) {
1121 if ((cl->id != 0) && (greq.id == 0)) {
1122 struct hci_dev *hdev;
1123 hdev = hci_dev_get_type(cl->type);
1124 if (hdev) {
1125 struct hci_conn *conn;
1126 ctx->hdev = hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08001127 ctx->id = hdev->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128 ctx->d.cpl.remote_id = cl->id;
1129 conn = hci_conn_hash_lookup_ba(hdev,
1130 ACL_LINK,
1131 &ctx->mgr->l2cap_conn->hcon->dst);
1132 if (conn) {
1133 BT_DBG("PL_COMPLETE exists %x",
1134 (int) conn->handle);
1135 result = 0;
1136 }
1137 ctrl = get_create_ctrl(ctx->mgr,
1138 cl->id);
1139 if (ctrl) {
1140 ctrl->type = cl->type;
1141 ctrl->status = cl->status;
1142 }
1143 greq.id = cl->id;
1144 }
1145 }
1146 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
1147 }
1148 if ((!greq.id) || (!result))
1149 goto cpl_finished;
1150 ctx->state = AMP_CPL_GETINFO_RSP;
1151 ctx->evt_type = AMP_A2MP_RSP;
1152 ctx->rsp_ident = next_ident(ctx->mgr);
1153 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
1154 sizeof(greq), &greq);
1155 break;
1156
1157 case AMP_CPL_GETINFO_RSP:
1158 if (skb->len < sizeof(*grsp))
1159 goto cpl_finished;
1160 grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
1161 if (grsp->status)
1162 goto cpl_finished;
1163 if (grsp->id != ctx->d.cpl.remote_id)
1164 goto cpl_finished;
1165 ctrl = get_ctrl(ctx->mgr, grsp->id);
1166 if (!ctrl)
1167 goto cpl_finished;
1168 ctrl->status = grsp->status;
1169 ctrl->total_bw = le32_to_cpu(grsp->total_bw);
1170 ctrl->max_bw = le32_to_cpu(grsp->max_bw);
1171 ctrl->min_latency = le32_to_cpu(grsp->min_latency);
1172 ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
1173 ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
1174 skb_pull(skb, sizeof(*grsp));
1175
1176 ctx->d.cpl.max_len = ctrl->max_assoc_size;
1177
1178 /* setup up GAA request */
1179 areq.id = ctx->d.cpl.remote_id;
1180
1181 /* advance context state */
1182 ctx->state = AMP_CPL_GAA_RSP;
1183 ctx->evt_type = AMP_A2MP_RSP;
1184 ctx->rsp_ident = next_ident(ctx->mgr);
1185 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
1186 sizeof(areq), &areq);
1187 break;
1188
1189 case AMP_CPL_GAA_RSP:
1190 if (skb->len < sizeof(*arsp))
1191 goto cpl_finished;
1192 hdr = (void *) skb->data;
1193 arsp = (void *) skb_pull(skb, sizeof(*hdr));
1194 if (arsp->id != ctx->d.cpl.remote_id)
1195 goto cpl_finished;
1196 if (arsp->status != 0)
1197 goto cpl_finished;
1198
1199 /* store away remote assoc */
1200 assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
1201 ctx->d.cpl.len_so_far = 0;
1202 ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
1203 rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
1204 if (!rassoc)
1205 goto cpl_finished;
1206 memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
1207 ctx->d.cpl.remote_assoc = rassoc;
1208 skb_pull(skb, ctx->d.cpl.rem_len);
1209
1210 /* set up CPL command */
1211 ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
1212 cp.phy_handle = ctx->d.cpl.phy_handle;
1213 if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
1214 &cp.key_len, &cp.type)) {
1215 result = -EPERM;
1216 goto cpl_finished;
1217 }
1218
1219 /* advance context state */
1220 ctx->state = AMP_CPL_CPL_STATUS;
1221 ctx->evt_type = AMP_HCI_CMD_STATUS;
1222 ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
1223 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
1224 break;
1225
1226 case AMP_CPL_CPL_STATUS:
1227 /* received create physical link command status */
1228 if (cs->status != 0)
1229 goto cpl_finished;
1230 /* send the first assoc fragment */
1231 wcp.phy_handle = ctx->d.cpl.phy_handle;
1232 wcp.len_so_far = ctx->d.cpl.len_so_far;
1233 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1234 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1235 memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
1236 ctx->state = AMP_CPL_WRA_COMPLETE;
1237 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1238 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
1239 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1240 break;
1241
1242 case AMP_CPL_WRA_COMPLETE:
1243 /* received write remote amp assoc command complete event */
1244 if (skb->len < sizeof(*wrp))
1245 goto cpl_finished;
1246 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
1247 if (wrp->status != 0)
1248 goto cpl_finished;
1249 if (wrp->phy_handle != ctx->d.cpl.phy_handle)
1250 goto cpl_finished;
1251
1252 /* update progress */
1253 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1254 ctx->d.cpl.len_so_far += frag_len;
1255 ctx->d.cpl.rem_len -= frag_len;
1256 if (ctx->d.cpl.rem_len > 0) {
1257 /* another assoc fragment to send */
1258 wcp.phy_handle = ctx->d.cpl.phy_handle;
1259 wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
1260 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1261 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1262 memcpy(wcp.frag,
1263 ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
1264 frag_len);
1265 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1266 break;
1267 }
1268 /* now wait for channel selected event */
1269 ctx->state = AMP_CPL_CHANNEL_SELECT;
1270 ctx->evt_type = AMP_HCI_EVENT;
1271 ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
1272 break;
1273
1274 case AMP_CPL_CHANNEL_SELECT:
1275 /* received channel selection event */
1276 if (skb->len < sizeof(*cev))
1277 goto cpl_finished;
1278 cev = (void *) skb->data;
1279/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
1280 Create Physical Link collision scenario
1281 if (cev->phy_handle != ctx->d.cpl.phy_handle)
1282 goto cpl_finished;
1283*/
1284
1285 /* request the first local assoc fragment */
1286 rcp.phy_handle = ctx->d.cpl.phy_handle;
1287 rcp.len_so_far = 0;
1288 rcp.max_len = ctx->d.cpl.max_len;
1289 lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
1290 if (!lassoc)
1291 goto cpl_finished;
1292 ctx->d.cpl.local_assoc = lassoc;
1293 ctx->d.cpl.len_so_far = 0;
1294 ctx->state = AMP_CPL_RLA_COMPLETE;
1295 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1296 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
1297 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1298 break;
1299
1300 case AMP_CPL_RLA_COMPLETE:
1301 /* received read local amp assoc command complete event */
1302 if (skb->len < 4)
1303 goto cpl_finished;
1304 rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
1305 if (rrp->status)
1306 goto cpl_finished;
1307 if (rrp->phy_handle != ctx->d.cpl.phy_handle)
1308 goto cpl_finished;
1309 rem_len = le16_to_cpu(rrp->rem_len);
1310 skb_pull(skb, 4);
1311 frag_len = skb->len;
1312
1313 if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
1314 goto cpl_finished;
1315
1316 /* save this fragment in context */
1317 lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
1318 memcpy(lassoc, rrp->frag, frag_len);
1319 ctx->d.cpl.len_so_far += frag_len;
1320 rem_len -= frag_len;
1321 if (rem_len > 0) {
1322 /* request another local assoc fragment */
1323 rcp.phy_handle = ctx->d.cpl.phy_handle;
1324 rcp.len_so_far = ctx->d.cpl.len_so_far;
1325 rcp.max_len = ctx->d.cpl.max_len;
1326 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1327 } else {
1328 creq.local_id = ctx->id;
1329 creq.remote_id = ctx->d.cpl.remote_id;
1330 /* wait for A2MP rsp AND phys link complete event */
1331 ctx->state = AMP_CPL_PL_COMPLETE;
1332 ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
1333 ctx->rsp_ident = next_ident(ctx->mgr);
1334 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
1335 send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
1336 A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
1337 ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
1338 }
1339 break;
1340
1341 case AMP_CPL_PL_COMPLETE:
1342 if (evt_type == AMP_A2MP_RSP) {
1343 /* create physical link response received */
1344 ctx->evt_type &= ~AMP_A2MP_RSP;
1345 if (skb->len < sizeof(*crsp))
1346 goto cpl_finished;
1347 crsp = (void *) skb_pull(skb, sizeof(*hdr));
1348 if ((crsp->local_id != ctx->d.cpl.remote_id) ||
1349 (crsp->remote_id != ctx->id) ||
1350 (crsp->status != 0)) {
1351 cancel_cpl_ctx(ctx, 0x13);
1352 break;
1353 }
1354
1355 /* notify Qualcomm PAL */
1356 if (ctx->hdev->manufacturer == 0x001d)
1357 hci_send_cmd(ctx->hdev,
1358 hci_opcode_pack(0x3f, 0x00), 0, NULL);
1359 }
1360 if (evt_type == AMP_HCI_EVENT) {
1361 ctx->evt_type &= ~AMP_HCI_EVENT;
1362 /* physical link complete event received */
1363 if (skb->len < sizeof(*pev))
1364 goto cpl_finished;
1365 pev = (void *) skb->data;
1366 if (pev->phy_handle != ctx->d.cpl.phy_handle)
1367 break;
1368 if (pev->status != 0)
1369 goto cpl_finished;
1370 }
1371 if (ctx->evt_type)
1372 break;
1373 conn = hci_conn_hash_lookup_handle(ctx->hdev,
1374 ctx->d.cpl.phy_handle);
1375 if (!conn)
1376 goto cpl_finished;
1377 result = 0;
1378 BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
1379 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
1380 conn->dst_id = ctx->d.cpl.remote_id;
1381 conn->out = 1;
1382 goto cpl_finished;
1383 break;
1384
1385 case AMP_CPL_PL_CANCEL:
1386 dev = (void *) skb->data;
1387 BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
1388 result = -EISCONN;
1389 goto cpl_finished;
1390 break;
1391
1392 default:
1393 goto cpl_finished;
1394 break;
1395 }
1396 return 0;
1397
1398cpl_finished:
1399 l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
1400 ctx->sk);
1401 if (ctx->sk)
1402 sock_put(ctx->sk);
1403 if (ctx->hdev)
1404 hci_dev_put(ctx->hdev);
1405 kfree(ctx->d.cpl.remote_assoc);
1406 kfree(ctx->d.cpl.local_assoc);
1407 return 1;
1408}
1409
1410static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
1411{
1412 struct a2mp_cmd_hdr *hdr = (void *) skb->data;
1413 struct a2mp_disconnphyslink_req *req;
1414 struct a2mp_disconnphyslink_rsp rsp;
1415 struct hci_dev *hdev;
1416 struct hci_conn *conn;
1417 struct amp_ctx *aplctx;
1418
1419 BT_DBG("mgr %p skb %p", mgr, skb);
1420 if (hdr->len < sizeof(*req))
1421 return -EINVAL;
1422 req = (void *) skb_pull(skb, sizeof(*hdr));
1423 skb_pull(skb, sizeof(*req));
1424
1425 rsp.local_id = req->remote_id;
1426 rsp.remote_id = req->local_id;
1427 rsp.status = 0;
1428 BT_DBG("local_id %d remote_id %d",
1429 (int) rsp.local_id, (int) rsp.remote_id);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08001430 hdev = hci_dev_get(rsp.local_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 if (!hdev) {
1432 rsp.status = 1; /* Invalid Controller ID */
1433 goto dpl_finished;
1434 }
1435 BT_DBG("hdev %p", hdev);
1436 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1437 &mgr->l2cap_conn->hcon->dst);
1438 if (!conn) {
1439 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
1440 if (aplctx) {
1441 kill_ctx(aplctx);
1442 rsp.status = 0;
1443 goto dpl_finished;
1444 }
1445 rsp.status = 2; /* No Physical Link exists */
1446 goto dpl_finished;
1447 }
1448 BT_DBG("conn %p", conn);
1449 hci_disconnect(conn, 0x13);
1450
1451dpl_finished:
1452 send_a2mp_cmd(mgr, hdr->ident,
1453 A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
1454 if (hdev)
1455 hci_dev_put(hdev);
1456 return 0;
1457}
1458
1459static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
1460{
1461 struct amp_mgr *mgr = ctx->mgr;
1462 u8 finished = 0;
1463
1464 if (!mgr->connected)
1465 return 0;
1466
1467 switch (ctx->type) {
1468 case AMP_GETAMPASSOC:
1469 finished = getampassoc_handler(ctx, evt_type, data);
1470 break;
1471 case AMP_CREATEPHYSLINK:
1472 finished = createphyslink_handler(ctx, evt_type, data);
1473 break;
1474 case AMP_ACCEPTPHYSLINK:
1475 finished = acceptphyslink_handler(ctx, evt_type, data);
1476 break;
1477 }
1478
1479 if (!finished)
1480 mod_timer(&(ctx->timer), jiffies +
1481 msecs_to_jiffies(A2MP_RSP_TIMEOUT));
1482 else
1483 destroy_ctx(ctx);
1484 return finished;
1485}
1486
1487static int cancel_ctx(struct amp_ctx *ctx)
1488{
1489 return execute_ctx(ctx, AMP_CANCEL, 0);
1490}
1491
1492static int kill_ctx(struct amp_ctx *ctx)
1493{
1494 return execute_ctx(ctx, AMP_KILLED, 0);
1495}
1496
1497static void ctx_timeout_worker(struct work_struct *w)
1498{
1499 struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
1500 struct amp_ctx *ctx = work->ctx;
1501 kill_ctx(ctx);
1502 kfree(work);
1503}
1504
1505static void ctx_timeout(unsigned long data)
1506{
1507 struct amp_ctx *ctx = (struct amp_ctx *) data;
1508 struct amp_work_ctx_timeout *work;
1509
1510 BT_DBG("ctx %p", ctx);
1511 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1512 if (work) {
1513 INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
1514 work->ctx = ctx;
1515 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1516 kfree(work);
1517 }
1518}
1519
1520static void launch_ctx(struct amp_mgr *mgr)
1521{
1522 struct amp_ctx *ctx = NULL;
1523
1524 BT_DBG("mgr %p", mgr);
Peter Krystadf5289202011-11-14 15:11:22 -08001525 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 if (!list_empty(&mgr->ctx_list))
1527 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -08001528 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 BT_DBG("ctx %p", ctx);
1530 if (ctx)
1531 execute_ctx(ctx, AMP_INIT, NULL);
1532}
1533
1534static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
1535{
1536 struct amp_ctx *ctx;
1537 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1538 u16 hdr_len = le16_to_cpu(hdr->len);
1539
1540 /* find context waiting for A2MP rsp with this rsp's identifier */
1541 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
1542 ctx = get_ctx_a2mp(mgr, hdr->ident);
1543 if (ctx) {
1544 execute_ctx(ctx, AMP_A2MP_RSP, skb);
1545 } else {
1546 BT_DBG("context not found");
1547 skb_pull(skb, sizeof(*hdr));
1548 if (hdr_len > skb->len)
1549 hdr_len = skb->len;
1550 skb_pull(skb, hdr_len);
1551 }
1552 return 0;
1553}
1554
1555/* L2CAP-A2MP interface */
1556
Peter Krystadf5289202011-11-14 15:11:22 -08001557static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558{
1559 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1560 int len;
1561 int err = 0;
1562 struct amp_mgr *mgr;
1563
1564 mgr = get_amp_mgr_sk(sk);
1565 if (!mgr)
1566 goto a2mp_finished;
1567
1568 len = skb->len;
1569 while (len >= sizeof(*hdr)) {
1570 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1571 u16 clen = le16_to_cpu(hdr->len);
1572
1573 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
1574 if (clen > len || !hdr->ident) {
1575 err = -EINVAL;
1576 break;
1577 }
1578 switch (hdr->code) {
1579 case A2MP_COMMAND_REJ:
1580 command_rej(mgr, skb);
1581 break;
1582 case A2MP_DISCOVER_REQ:
1583 err = discover_req(mgr, skb);
1584 break;
1585 case A2MP_CHANGE_NOTIFY:
1586 err = change_notify(mgr, skb);
1587 break;
1588 case A2MP_GETINFO_REQ:
1589 err = getinfo_req(mgr, skb);
1590 break;
1591 case A2MP_GETAMPASSOC_REQ:
1592 err = getampassoc_req(mgr, skb);
1593 break;
1594 case A2MP_CREATEPHYSLINK_REQ:
1595 err = createphyslink_req(mgr, skb);
1596 break;
1597 case A2MP_DISCONNPHYSLINK_REQ:
1598 err = disconnphyslink_req(mgr, skb);
1599 break;
1600 case A2MP_CHANGE_RSP:
1601 case A2MP_DISCOVER_RSP:
1602 case A2MP_GETINFO_RSP:
1603 case A2MP_GETAMPASSOC_RSP:
1604 case A2MP_CREATEPHYSLINK_RSP:
1605 case A2MP_DISCONNPHYSLINK_RSP:
1606 err = a2mp_rsp(mgr, skb);
1607 break;
1608 default:
1609 BT_ERR("Unknown A2MP signaling command 0x%2.2x",
1610 hdr->code);
1611 skb_pull(skb, sizeof(*hdr));
1612 err = -EINVAL;
1613 break;
1614 }
1615 len = skb->len;
1616 }
1617
1618a2mp_finished:
1619 if (err && mgr) {
1620 struct a2mp_cmd_rej rej;
1621 rej.reason = cpu_to_le16(0);
1622 send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
1623 sizeof(rej), &rej);
1624 }
1625}
1626
1627/* L2CAP-A2MP interface */
1628
1629static int send_a2mp(struct socket *sock, u8 *data, int len)
1630{
1631 struct kvec iv = { data, len };
1632 struct msghdr msg;
1633
1634 memset(&msg, 0, sizeof(msg));
1635
1636 return kernel_sendmsg(sock, &msg, &iv, 1, len);
1637}
1638
1639static void data_ready_worker(struct work_struct *w)
1640{
1641 struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
1642 struct sock *sk = work->sk;
1643 struct sk_buff *skb;
1644
1645 /* skb_dequeue() is thread-safe */
1646 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1647 a2mp_receive(sk, skb);
1648 kfree_skb(skb);
1649 }
1650 sock_put(work->sk);
1651 kfree(work);
1652}
1653
1654static void data_ready(struct sock *sk, int bytes)
1655{
1656 struct amp_work_data_ready *work;
1657 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1658 if (work) {
1659 INIT_WORK((struct work_struct *) work, data_ready_worker);
1660 sock_hold(sk);
1661 work->sk = sk;
1662 work->bytes = bytes;
1663 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1664 kfree(work);
1665 sock_put(sk);
1666 }
1667 }
1668}
1669
1670static void state_change_worker(struct work_struct *w)
1671{
1672 struct amp_work_state_change *work = (struct amp_work_state_change *) w;
1673 struct amp_mgr *mgr;
1674 switch (work->sk->sk_state) {
1675 case BT_CONNECTED:
1676 /* socket is up */
1677 BT_DBG("CONNECTED");
1678 mgr = get_amp_mgr_sk(work->sk);
1679 if (mgr) {
1680 mgr->connected = 1;
1681 if (mgr->skb) {
1682 l2cap_recv_deferred_frame(work->sk, mgr->skb);
1683 mgr->skb = NULL;
1684 }
1685 launch_ctx(mgr);
1686 }
1687 break;
1688
1689 case BT_CLOSED:
1690 /* connection is gone */
1691 BT_DBG("CLOSED");
1692 mgr = get_amp_mgr_sk(work->sk);
1693 if (mgr) {
1694 if (!sock_flag(work->sk, SOCK_DEAD))
1695 sock_release(mgr->a2mp_sock);
1696 mgr->a2mp_sock = NULL;
1697 remove_amp_mgr(mgr);
1698 }
1699 break;
1700
1701 default:
1702 /* something else happened */
1703 break;
1704 }
1705 sock_put(work->sk);
1706 kfree(work);
1707}
1708
1709static void state_change(struct sock *sk)
1710{
1711 struct amp_work_state_change *work;
1712 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1713 if (work) {
1714 INIT_WORK((struct work_struct *) work, state_change_worker);
1715 sock_hold(sk);
1716 work->sk = sk;
1717 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1718 kfree(work);
1719 sock_put(sk);
1720 }
1721 }
1722}
1723
1724static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
1725{
1726 int err;
1727 struct socket *sock;
1728 struct sockaddr_l2 addr;
1729 struct sock *sk;
1730 struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
1731 L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
1732 L2CAP_MODE_ERTM, 1, 0xFF, 1};
1733
1734
1735 err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
1736 BTPROTO_L2CAP, &sock);
1737
1738 if (err) {
1739 BT_ERR("sock_create_kern failed %d", err);
1740 return NULL;
1741 }
1742
1743 sk = sock->sk;
1744 sk->sk_data_ready = data_ready;
1745 sk->sk_state_change = state_change;
1746
1747 memset(&addr, 0, sizeof(addr));
1748 bacpy(&addr.l2_bdaddr, src);
1749 addr.l2_family = AF_BLUETOOTH;
1750 addr.l2_cid = L2CAP_CID_A2MP;
1751 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1752 if (err) {
1753 BT_ERR("kernel_bind failed %d", err);
1754 sock_release(sock);
1755 return NULL;
1756 }
1757
1758 l2cap_fixed_channel_config(sk, &opts);
1759
1760 memset(&addr, 0, sizeof(addr));
1761 bacpy(&addr.l2_bdaddr, dst);
1762 addr.l2_family = AF_BLUETOOTH;
1763 addr.l2_cid = L2CAP_CID_A2MP;
1764 err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
1765 O_NONBLOCK);
1766 if ((err == 0) || (err == -EINPROGRESS))
1767 return sock;
1768 else {
1769 BT_ERR("kernel_connect failed %d", err);
1770 sock_release(sock);
1771 return NULL;
1772 }
1773}
1774
1775static void conn_ind_worker(struct work_struct *w)
1776{
1777 struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
1778 struct l2cap_conn *conn = work->conn;
1779 struct sk_buff *skb = work->skb;
1780 struct amp_mgr *mgr;
1781
1782 mgr = get_create_amp_mgr(conn, skb);
1783 BT_DBG("mgr %p", mgr);
1784 kfree(work);
1785}
1786
1787static void create_physical_worker(struct work_struct *w)
1788{
1789 struct amp_work_create_physical *work =
1790 (struct amp_work_create_physical *) w;
1791
1792 create_physical(work->conn, work->sk);
1793 sock_put(work->sk);
1794 kfree(work);
1795}
1796
1797static void accept_physical_worker(struct work_struct *w)
1798{
1799 struct amp_work_accept_physical *work =
1800 (struct amp_work_accept_physical *) w;
1801
1802 accept_physical(work->conn, work->id, work->sk);
1803 sock_put(work->sk);
1804 kfree(work);
1805}
1806
1807/* L2CAP Fixed Channel interface */
1808
1809void amp_conn_ind(struct l2cap_conn *conn, struct sk_buff *skb)
1810{
1811 struct amp_work_conn_ind *work;
1812 BT_DBG("conn %p, skb %p", conn, skb);
1813 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1814 if (work) {
1815 INIT_WORK((struct work_struct *) work, conn_ind_worker);
1816 work->conn = conn;
1817 work->skb = skb;
1818 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1819 kfree(work);
1820 }
1821}
1822
1823/* L2CAP Physical Link interface */
1824
1825void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
1826{
1827 struct amp_work_create_physical *work;
1828 BT_DBG("conn %p", conn);
1829 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1830 if (work) {
1831 INIT_WORK((struct work_struct *) work, create_physical_worker);
1832 work->conn = conn;
1833 work->sk = sk;
1834 sock_hold(sk);
1835 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1836 sock_put(sk);
1837 kfree(work);
1838 }
1839 }
1840}
1841
1842void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
1843{
1844 struct amp_work_accept_physical *work;
1845 BT_DBG("conn %p", conn);
1846
1847 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1848 if (work) {
1849 INIT_WORK((struct work_struct *) work, accept_physical_worker);
1850 work->conn = conn;
1851 work->sk = sk;
1852 work->id = id;
1853 sock_hold(sk);
1854 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1855 sock_put(sk);
1856 kfree(work);
1857 }
1858 }
1859}
1860
1861/* HCI interface */
1862
1863static void amp_cmd_cmplt_worker(struct work_struct *w)
1864{
1865 struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
1866 struct hci_dev *hdev = work->hdev;
1867 u16 opcode = work->opcode;
1868 struct sk_buff *skb = work->skb;
1869 struct amp_ctx *ctx;
1870
1871 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
1872 if (ctx)
1873 execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
1874 kfree_skb(skb);
1875 kfree(w);
1876}
1877
1878static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
1879 struct sk_buff *skb)
1880{
1881 struct amp_work_cmd_cmplt *work;
1882 struct sk_buff *skbc;
1883 BT_DBG("hdev %p opcode 0x%x skb %p len %d",
1884 hdev, opcode, skb, skb->len);
1885 skbc = skb_clone(skb, GFP_ATOMIC);
1886 if (!skbc)
1887 return;
1888 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1889 if (work) {
1890 INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
1891 work->hdev = hdev;
1892 work->opcode = opcode;
1893 work->skb = skbc;
1894 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1895 kfree(work);
1896 }
1897}
1898
1899static void amp_cmd_status_worker(struct work_struct *w)
1900{
1901 struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
1902 struct hci_dev *hdev = work->hdev;
1903 u16 opcode = work->opcode;
1904 u8 status = work->status;
1905 struct amp_ctx *ctx;
1906
1907 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
1908 if (ctx)
1909 execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
1910 kfree(w);
1911}
1912
1913static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
1914{
1915 struct amp_work_cmd_status *work;
1916 BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
1917 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1918 if (work) {
1919 INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
1920 work->hdev = hdev;
1921 work->opcode = opcode;
1922 work->status = status;
1923 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1924 kfree(work);
1925 }
1926}
1927
1928static void amp_event_worker(struct work_struct *w)
1929{
1930 struct amp_work_event *work = (struct amp_work_event *) w;
1931 struct hci_dev *hdev = work->hdev;
1932 u8 event = work->event;
1933 struct sk_buff *skb = work->skb;
1934 struct amp_ctx *ctx;
1935
1936 if (event == HCI_EV_AMP_STATUS_CHANGE) {
1937 struct hci_ev_amp_status_change *ev;
1938 if (skb->len < sizeof(*ev))
1939 goto amp_event_finished;
1940 ev = (void *) skb->data;
1941 if (ev->status != 0)
1942 goto amp_event_finished;
1943 if (ev->amp_status == hdev->amp_status)
1944 goto amp_event_finished;
1945 hdev->amp_status = ev->amp_status;
1946 send_a2mp_change_notify();
1947 goto amp_event_finished;
1948 }
1949 ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
1950 if (ctx)
1951 execute_ctx(ctx, AMP_HCI_EVENT, skb);
1952
1953amp_event_finished:
1954 kfree_skb(skb);
1955 kfree(w);
1956}
1957
1958static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
1959{
1960 struct amp_work_event *work;
1961 struct sk_buff *skbc;
1962 BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
1963 skbc = skb_clone(skb, GFP_ATOMIC);
1964 if (!skbc)
1965 return;
1966 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1967 if (work) {
1968 INIT_WORK((struct work_struct *) work, amp_event_worker);
1969 work->hdev = hdev;
1970 work->event = event;
1971 work->skb = skbc;
1972 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1973 kfree(work);
1974 }
1975}
1976
1977static void amp_dev_event_worker(struct work_struct *w)
1978{
1979 send_a2mp_change_notify();
1980 kfree(w);
1981}
1982
1983static int amp_dev_event(struct notifier_block *this, unsigned long event,
1984 void *ptr)
1985{
1986 struct hci_dev *hdev = (struct hci_dev *) ptr;
1987 struct amp_work_event *work;
1988
1989 if (hdev->amp_type == HCI_BREDR)
1990 return NOTIFY_DONE;
1991
1992 switch (event) {
1993 case HCI_DEV_UNREG:
1994 case HCI_DEV_REG:
1995 case HCI_DEV_UP:
1996 case HCI_DEV_DOWN:
1997 BT_DBG("hdev %p event %ld", hdev, event);
1998 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1999 if (work) {
2000 INIT_WORK((struct work_struct *) work,
2001 amp_dev_event_worker);
2002 if (queue_work(amp_workqueue,
2003 (struct work_struct *) work) == 0)
2004 kfree(work);
2005 }
2006 }
2007 return NOTIFY_DONE;
2008}
2009
2010
2011/* L2CAP module init continued */
2012
2013static struct notifier_block amp_notifier = {
2014 .notifier_call = amp_dev_event
2015};
2016
2017static struct amp_mgr_cb hci_amp = {
2018 .amp_cmd_complete_event = amp_cmd_cmplt_evt,
2019 .amp_cmd_status_event = amp_cmd_status_evt,
2020 .amp_event = amp_evt
2021};
2022
2023int amp_init(void)
2024{
2025 hci_register_amp(&hci_amp);
2026 hci_register_notifier(&amp_notifier);
2027 amp_next_handle = 1;
2028 amp_workqueue = create_singlethread_workqueue("a2mp");
2029 if (!amp_workqueue)
2030 return -EPERM;
2031 return 0;
2032}
2033
2034void amp_exit(void)
2035{
2036 hci_unregister_amp(&hci_amp);
2037 hci_unregister_notifier(&amp_notifier);
2038 flush_workqueue(amp_workqueue);
2039 destroy_workqueue(amp_workqueue);
2040}