blob: f4ceeea129cf3edcfb7d768f286b3d8f741b46e0 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Peter Krystad072a51f2012-03-30 12:59:33 -07002 Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18
19#include <linux/skbuff.h>
20#include <linux/list.h>
21#include <linux/workqueue.h>
22#include <linux/timer.h>
23
24#include <linux/crypto.h>
25#include <linux/scatterlist.h>
26#include <linux/err.h>
27#include <crypto/hash.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/amp.h>
33
34static struct workqueue_struct *amp_workqueue;
35
36LIST_HEAD(amp_mgr_list);
37DEFINE_RWLOCK(amp_mgr_list_lock);
38
39static int send_a2mp(struct socket *sock, u8 *data, int len);
40
41static void ctx_timeout(unsigned long data);
42
43static void launch_ctx(struct amp_mgr *mgr);
44static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
45static int kill_ctx(struct amp_ctx *ctx);
46static int cancel_ctx(struct amp_ctx *ctx);
47
48static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
49
50static void remove_amp_mgr(struct amp_mgr *mgr)
51{
52 BT_DBG("mgr %p", mgr);
53
Peter Krystadf5289202011-11-14 15:11:22 -080054 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 list_del(&mgr->list);
Peter Krystadf5289202011-11-14 15:11:22 -080056 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
Peter Krystadf5289202011-11-14 15:11:22 -080058 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 while (!list_empty(&mgr->ctx_list)) {
60 struct amp_ctx *ctx;
61 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -080062 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 BT_DBG("kill ctx %p", ctx);
64 kill_ctx(ctx);
Peter Krystadf5289202011-11-14 15:11:22 -080065 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 }
Peter Krystadf5289202011-11-14 15:11:22 -080067 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69 kfree(mgr->ctrls);
70
71 kfree(mgr);
72}
73
74static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
75{
76 struct amp_mgr *mgr;
77 struct amp_mgr *found = NULL;
78
Peter Krystadf5289202011-11-14 15:11:22 -080079 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 list_for_each_entry(mgr, &amp_mgr_list, list) {
81 if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
82 found = mgr;
83 break;
84 }
85 }
Peter Krystadf5289202011-11-14 15:11:22 -080086 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 return found;
88}
89
Peter Krystad072a51f2012-03-30 12:59:33 -070090static struct amp_mgr *get_create_amp_mgr(struct hci_conn *hcon,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 struct sk_buff *skb)
92{
93 struct amp_mgr *mgr;
94
Peter Krystadf5289202011-11-14 15:11:22 -080095 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 list_for_each_entry(mgr, &amp_mgr_list, list) {
Peter Krystad072a51f2012-03-30 12:59:33 -070097 if (mgr->l2cap_conn == hcon->l2cap_data) {
98 BT_DBG("found %p", mgr);
Peter Krystadf5289202011-11-14 15:11:22 -080099 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 goto gc_finished;
101 }
102 }
Peter Krystadf5289202011-11-14 15:11:22 -0800103 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104
105 mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
106 if (!mgr)
Peter Krystadf5289202011-11-14 15:11:22 -0800107 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
Peter Krystad072a51f2012-03-30 12:59:33 -0700109 mgr->l2cap_conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 mgr->next_ident = 1;
111 INIT_LIST_HEAD(&mgr->ctx_list);
112 rwlock_init(&mgr->ctx_list_lock);
113 mgr->skb = skb;
Peter Krystad072a51f2012-03-30 12:59:33 -0700114 BT_DBG("hcon %p mgr %p", hcon, mgr);
115 mgr->a2mp_sock = open_fixed_channel(&hcon->hdev->bdaddr, &hcon->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 if (!mgr->a2mp_sock) {
117 kfree(mgr);
Peter Krystadf5289202011-11-14 15:11:22 -0800118 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 }
Peter Krystadf5289202011-11-14 15:11:22 -0800120 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121 list_add(&(mgr->list), &amp_mgr_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800122 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124gc_finished:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 return mgr;
126}
127
128static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
129{
130 if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
131 return mgr->ctrls;
132 else
133 return NULL;
134}
135
136static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
137{
138 struct amp_ctrl *ctrl;
139
140 BT_DBG("mgr %p, id %d", mgr, id);
141 if ((mgr->ctrls) && (mgr->ctrls->id == id))
142 ctrl = mgr->ctrls;
143 else {
144 kfree(mgr->ctrls);
145 ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
146 if (ctrl) {
147 ctrl->mgr = mgr;
148 ctrl->id = id;
149 }
150 mgr->ctrls = ctrl;
151 }
152
153 return ctrl;
154}
155
156static struct amp_ctx *create_ctx(u8 type, u8 state)
157{
158 struct amp_ctx *ctx = NULL;
159
160 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
161 if (ctx) {
162 ctx->type = type;
163 ctx->state = state;
164 init_timer(&(ctx->timer));
165 ctx->timer.function = ctx_timeout;
166 ctx->timer.data = (unsigned long) ctx;
167 }
168 BT_DBG("ctx %p, type %d", ctx, type);
169 return ctx;
170}
171
172static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
173{
174 BT_DBG("ctx %p", ctx);
Peter Krystadf5289202011-11-14 15:11:22 -0800175 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 list_add(&ctx->list, &mgr->ctx_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800177 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 ctx->mgr = mgr;
179 execute_ctx(ctx, AMP_INIT, 0);
180}
181
182static void destroy_ctx(struct amp_ctx *ctx)
183{
184 struct amp_mgr *mgr = ctx->mgr;
185
186 BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
187 del_timer(&ctx->timer);
Peter Krystadf5289202011-11-14 15:11:22 -0800188 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 list_del(&ctx->list);
Peter Krystadf5289202011-11-14 15:11:22 -0800190 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 if (ctx->deferred)
192 execute_ctx(ctx->deferred, AMP_INIT, 0);
193 kfree(ctx);
194}
195
196static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
197{
198 struct amp_ctx *fnd = NULL;
199 struct amp_ctx *ctx;
200
Peter Krystadf5289202011-11-14 15:11:22 -0800201 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 list_for_each_entry(ctx, &mgr->ctx_list, list) {
203 if (ctx->type == type) {
204 fnd = ctx;
205 break;
206 }
207 }
Peter Krystadf5289202011-11-14 15:11:22 -0800208 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 return fnd;
210}
211
212static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
213{
214 struct amp_mgr *mgr = cur->mgr;
215 struct amp_ctx *fnd = NULL;
216 struct amp_ctx *ctx;
217
Peter Krystadf5289202011-11-14 15:11:22 -0800218 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 list_for_each_entry(ctx, &mgr->ctx_list, list) {
220 if ((ctx->type == type) && (ctx != cur)) {
221 fnd = ctx;
222 break;
223 }
224 }
Peter Krystadf5289202011-11-14 15:11:22 -0800225 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 return fnd;
227}
228
229static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
230{
231 struct amp_ctx *fnd = NULL;
232 struct amp_ctx *ctx;
233
Peter Krystadf5289202011-11-14 15:11:22 -0800234 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 list_for_each_entry(ctx, &mgr->ctx_list, list) {
236 if ((ctx->evt_type & AMP_A2MP_RSP) &&
237 (ctx->rsp_ident == ident)) {
238 fnd = ctx;
239 break;
240 }
241 }
Peter Krystadf5289202011-11-14 15:11:22 -0800242 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 return fnd;
244}
245
246static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
247 u16 evt_value)
248{
249 struct amp_mgr *mgr;
250 struct amp_ctx *fnd = NULL;
251
Peter Krystadf5289202011-11-14 15:11:22 -0800252 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 list_for_each_entry(mgr, &amp_mgr_list, list) {
254 struct amp_ctx *ctx;
Peter Krystadf5289202011-11-14 15:11:22 -0800255 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 list_for_each_entry(ctx, &mgr->ctx_list, list) {
257 struct hci_dev *ctx_hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800258 ctx_hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
260 switch (evt_type) {
261 case AMP_HCI_CMD_STATUS:
262 case AMP_HCI_CMD_CMPLT:
263 if (ctx->opcode == evt_value)
264 fnd = ctx;
265 break;
266 case AMP_HCI_EVENT:
267 if (ctx->evt_code == (u8) evt_value)
268 fnd = ctx;
269 break;
270 }
271 }
272 if (ctx_hdev)
273 hci_dev_put(ctx_hdev);
274
275 if (fnd)
276 break;
277 }
Peter Krystadf5289202011-11-14 15:11:22 -0800278 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 }
Peter Krystadf5289202011-11-14 15:11:22 -0800280 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281 return fnd;
282}
283
284static inline u8 next_ident(struct amp_mgr *mgr)
285{
286 if (++mgr->next_ident == 0)
287 mgr->next_ident = 1;
288 return mgr->next_ident;
289}
290
291static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
292 u16 len, void *data, u16 len2, void *data2)
293{
294 struct a2mp_cmd_hdr *hdr;
295 int plen;
296 u8 *p, *cmd;
297
298 BT_DBG("ident %d code 0x%02x", ident, code);
299 if (!mgr->a2mp_sock)
300 return;
301 plen = sizeof(*hdr) + len + len2;
302 cmd = kzalloc(plen, GFP_ATOMIC);
303 if (!cmd)
304 return;
305 hdr = (struct a2mp_cmd_hdr *) cmd;
306 hdr->code = code;
307 hdr->ident = ident;
308 hdr->len = cpu_to_le16(len+len2);
309 p = cmd + sizeof(*hdr);
310 memcpy(p, data, len);
311 p += len;
312 memcpy(p, data2, len2);
313 send_a2mp(mgr->a2mp_sock, cmd, plen);
314 kfree(cmd);
315}
316
317static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
318 u8 code, u16 len, void *data)
319{
320 send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
321}
322
323static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
324{
325 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
326 struct a2mp_cmd_rej *rej;
327 struct amp_ctx *ctx;
328
329 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
330 rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
331 if (skb->len < sizeof(*rej))
332 return -EINVAL;
333 BT_DBG("reason %d", le16_to_cpu(rej->reason));
334 ctx = get_ctx_a2mp(mgr, hdr->ident);
335 if (ctx)
336 kill_ctx(ctx);
337 skb_pull(skb, sizeof(*rej));
338 return 0;
339}
340
341static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
342 void *msg)
343{
344 struct a2mp_cl clist[16];
345 struct a2mp_cl *cl;
346 struct hci_dev *hdev;
347 int num_ctrls = 1, id;
348
349 cl = clist;
350 cl->id = 0;
351 cl->type = 0;
352 cl->status = 1;
353
354 for (id = 0; id < 16; ++id) {
355 hdev = hci_dev_get(id);
356 if (hdev) {
357 if ((hdev->amp_type != HCI_BREDR) &&
358 test_bit(HCI_UP, &hdev->flags)) {
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800359 (cl + num_ctrls)->id = hdev->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 (cl + num_ctrls)->type = hdev->amp_type;
361 (cl + num_ctrls)->status = hdev->amp_status;
362 ++num_ctrls;
363 }
364 hci_dev_put(hdev);
365 }
366 }
367 send_a2mp_cmd2(mgr, ident, code, len, msg,
368 num_ctrls*sizeof(*cl), clist);
369
370 return 0;
371}
372
373static void send_a2mp_change_notify(void)
374{
375 struct amp_mgr *mgr;
376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 list_for_each_entry(mgr, &amp_mgr_list, list) {
378 if (mgr->discovered)
379 send_a2mp_cl(mgr, next_ident(mgr),
380 A2MP_CHANGE_NOTIFY, 0, NULL);
381 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382}
383
384static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
385{
386 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
387 struct a2mp_discover_req *req;
388 u16 *efm;
389 struct a2mp_discover_rsp rsp;
390
391 req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
392 if (skb->len < sizeof(*req))
393 return -EINVAL;
394 efm = (u16 *) skb_pull(skb, sizeof(*req));
395
396 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
397 le16_to_cpu(req->ext_feat));
398
399 while (le16_to_cpu(req->ext_feat) & 0x8000) {
400 if (skb->len < sizeof(*efm))
401 return -EINVAL;
402 req->ext_feat = *efm;
403 BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
404 efm = (u16 *) skb_pull(skb, sizeof(*efm));
405 }
406
407 rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
408 rsp.ext_feat = 0;
409
410 mgr->discovered = 1;
411
412 return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
413 sizeof(rsp), &rsp);
414}
415
416static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
417{
418 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
419 struct a2mp_cl *cl;
420
421 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
422 while (skb->len >= sizeof(*cl)) {
423 struct amp_ctrl *ctrl;
424 if (cl->id != 0) {
425 ctrl = get_create_ctrl(mgr, cl->id);
426 if (ctrl != NULL) {
427 ctrl->type = cl->type;
428 ctrl->status = cl->status;
429 }
430 }
431 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
432 }
433
434 /* TODO find controllers in manager that were not on received */
435 /* controller list and destroy them */
436 send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
437
438 return 0;
439}
440
441static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
442{
443 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
444 u8 *data;
445 int id;
446 struct hci_dev *hdev;
447 struct a2mp_getinfo_rsp rsp;
448
449 data = (u8 *) skb_pull(skb, sizeof(*hdr));
450 if (le16_to_cpu(hdr->len) < sizeof(*data))
451 return -EINVAL;
452 if (skb->len < sizeof(*data))
453 return -EINVAL;
454 id = *data;
455 skb_pull(skb, sizeof(*data));
456 rsp.id = id;
457 rsp.status = 1;
458
459 BT_DBG("id %d", id);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800460 hdev = hci_dev_get(id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461
462 if (hdev && hdev->amp_type != HCI_BREDR) {
463 rsp.status = 0;
464 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
465 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
466 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
467 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
468 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
469 }
470
471 send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
472
473 if (hdev)
474 hci_dev_put(hdev);
475
476 return 0;
477}
478
479static void create_physical(struct l2cap_conn *conn, struct sock *sk)
480{
481 struct amp_mgr *mgr;
482 struct amp_ctx *ctx = NULL;
483
484 BT_DBG("conn %p", conn);
Peter Krystad072a51f2012-03-30 12:59:33 -0700485 mgr = get_create_amp_mgr(conn->hcon, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 if (!mgr)
487 goto cp_finished;
488 BT_DBG("mgr %p", mgr);
489 ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
490 if (!ctx)
491 goto cp_finished;
492 ctx->sk = sk;
493 sock_hold(sk);
494 start_ctx(mgr, ctx);
495 return;
496
497cp_finished:
498 l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
499}
500
501static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
502{
503 struct amp_mgr *mgr;
504 struct hci_dev *hdev;
505 struct hci_conn *conn;
506 struct amp_ctx *aplctx = NULL;
507 u8 remote_id = 0;
508 int result = -EINVAL;
509
510 BT_DBG("lcon %p", lcon);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800511 hdev = hci_dev_get(id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 if (!hdev)
513 goto ap_finished;
514 BT_DBG("hdev %p", hdev);
Peter Krystad072a51f2012-03-30 12:59:33 -0700515 mgr = get_create_amp_mgr(lcon->hcon, NULL);
Peter Krystadf7dcc792011-11-14 15:11:58 -0800516 if (!mgr)
517 goto ap_finished;
518 BT_DBG("mgr %p", mgr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
520 &mgr->l2cap_conn->hcon->dst);
521 if (conn) {
522 BT_DBG("conn %p", hdev);
523 result = 0;
524 remote_id = conn->dst_id;
525 goto ap_finished;
526 }
527 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
528 if (!aplctx)
529 goto ap_finished;
530 aplctx->sk = sk;
531 sock_hold(sk);
532 return;
533
534ap_finished:
Peter Krystadf7dcc792011-11-14 15:11:58 -0800535 if (hdev)
536 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 l2cap_amp_physical_complete(result, id, remote_id, sk);
538}
539
540static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
541{
542 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
543 struct amp_ctx *ctx;
544 struct a2mp_getampassoc_req *req;
545
546 if (hdr->len < sizeof(*req))
547 return -EINVAL;
548 req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
549 skb_pull(skb, sizeof(*req));
550
551 ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
552 if (!ctx)
553 return -ENOMEM;
554 ctx->id = req->id;
555 ctx->d.gaa.req_ident = hdr->ident;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800556 ctx->hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 if (ctx->hdev)
558 ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
559 GFP_ATOMIC);
560 start_ctx(mgr, ctx);
561 return 0;
562}
563
564static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
565{
566 struct sk_buff *skb = (struct sk_buff *) data;
567 struct hci_cp_read_local_amp_assoc cp;
568 struct hci_rp_read_local_amp_assoc *rp;
569 struct a2mp_getampassoc_rsp rsp;
570 u16 rem_len;
571 u16 frag_len;
572
573 rsp.status = 1;
574 if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
575 goto gaa_finished;
576
577 switch (ctx->state) {
578 case AMP_GAA_INIT:
579 ctx->state = AMP_GAA_RLAA_COMPLETE;
580 ctx->evt_type = AMP_HCI_CMD_CMPLT;
581 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
582 ctx->d.gaa.len_so_far = 0;
583 cp.phy_handle = 0;
584 cp.len_so_far = 0;
585 cp.max_len = ctx->hdev->amp_assoc_size;
586 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
587 break;
588
589 case AMP_GAA_RLAA_COMPLETE:
590 if (skb->len < 4)
591 goto gaa_finished;
592 rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
593 if (rp->status)
594 goto gaa_finished;
595 rem_len = le16_to_cpu(rp->rem_len);
596 skb_pull(skb, 4);
597 frag_len = skb->len;
598
599 if (ctx->d.gaa.len_so_far + rem_len <=
600 ctx->hdev->amp_assoc_size) {
601 struct hci_cp_read_local_amp_assoc cp;
602 u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
603 memcpy(assoc, rp->frag, frag_len);
604 ctx->d.gaa.len_so_far += rem_len;
605 rem_len -= frag_len;
606 if (rem_len == 0) {
607 rsp.status = 0;
608 goto gaa_finished;
609 }
610 /* more assoc data to read */
611 cp.phy_handle = 0;
612 cp.len_so_far = ctx->d.gaa.len_so_far;
613 cp.max_len = ctx->hdev->amp_assoc_size;
614 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
615 }
616 break;
617
618 default:
619 goto gaa_finished;
620 break;
621 }
622 return 0;
623
624gaa_finished:
625 rsp.id = ctx->id;
626 send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
627 sizeof(rsp), &rsp,
628 ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
629 kfree(ctx->d.gaa.assoc);
630 if (ctx->hdev)
631 hci_dev_put(ctx->hdev);
632 return 1;
633}
634
635struct hmac_sha256_result {
636 struct completion completion;
637 int err;
638};
639
640static void hmac_sha256_final(struct crypto_async_request *req, int err)
641{
642 struct hmac_sha256_result *r = req->data;
643 if (err == -EINPROGRESS)
644 return;
645 r->err = err;
646 complete(&r->completion);
647}
648
649int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
650 u8 *output, u8 outlen)
651{
652 int ret = 0;
653 struct crypto_ahash *tfm;
654 struct scatterlist sg;
655 struct ahash_request *req;
656 struct hmac_sha256_result tresult;
657 void *hash_buff = NULL;
658
659 unsigned char hash_result[64];
660 int i;
661
662 memset(output, 0, outlen);
663
664 init_completion(&tresult.completion);
665
666 tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
667 CRYPTO_ALG_TYPE_AHASH_MASK);
668 if (IS_ERR(tfm)) {
669 BT_DBG("crypto_alloc_ahash failed");
670 ret = PTR_ERR(tfm);
671 goto err_tfm;
672 }
673
674 req = ahash_request_alloc(tfm, GFP_KERNEL);
675 if (!req) {
676 BT_DBG("failed to allocate request for hmac(sha256)");
677 ret = -ENOMEM;
678 goto err_req;
679 }
680
681 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
682 hmac_sha256_final, &tresult);
683
684 hash_buff = kzalloc(psize, GFP_KERNEL);
685 if (!hash_buff) {
686 BT_DBG("failed to kzalloc hash_buff");
687 ret = -ENOMEM;
688 goto err_hash_buf;
689 }
690
691 memset(hash_result, 0, 64);
692 memcpy(hash_buff, plaintext, psize);
693 sg_init_one(&sg, hash_buff, psize);
694
695 if (ksize) {
696 crypto_ahash_clear_flags(tfm, ~0);
697 ret = crypto_ahash_setkey(tfm, key, ksize);
698
699 if (ret) {
700 BT_DBG("crypto_ahash_setkey failed");
701 goto err_setkey;
702 }
703 }
704
705 ahash_request_set_crypt(req, &sg, hash_result, psize);
706 ret = crypto_ahash_digest(req);
707
708 BT_DBG("ret 0x%x", ret);
709
710 switch (ret) {
711 case 0:
712 for (i = 0; i < outlen; i++)
713 output[i] = hash_result[i];
714 break;
715 case -EINPROGRESS:
716 case -EBUSY:
717 ret = wait_for_completion_interruptible(&tresult.completion);
718 if (!ret && !tresult.err) {
719 INIT_COMPLETION(tresult.completion);
720 break;
721 } else {
722 BT_DBG("wait_for_completion_interruptible failed");
723 if (!ret)
724 ret = tresult.err;
725 goto out;
726 }
727 default:
728 goto out;
729 }
730
731out:
732err_setkey:
733 kfree(hash_buff);
734err_hash_buf:
735 ahash_request_free(req);
736err_req:
737 crypto_free_ahash(tfm);
738err_tfm:
739 return ret;
740}
741
742static void show_key(u8 *k)
743{
744 int i = 0;
745 for (i = 0; i < 32; i += 8)
746 BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
747 *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
748 *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
749}
750
751static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
752{
753 u8 bt2_key[32];
754 u8 gamp_key[32];
755 u8 b802_key[32];
756 int result;
757
758 if (!hci_conn_check_link_mode(conn))
759 return -EACCES;
760
761 BT_DBG("key_type %d", conn->key_type);
762 if (conn->key_type < 3)
763 return -EACCES;
764
765 *type = conn->key_type;
766 *len = 32;
767 memcpy(&bt2_key[0], conn->link_key, 16);
768 memcpy(&bt2_key[16], conn->link_key, 16);
769 result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
770 if (result)
771 goto ps_finished;
772
773 if (conn->key_type == 3) {
774 BT_DBG("gamp_key");
775 show_key(gamp_key);
776 memcpy(data, gamp_key, 32);
777 goto ps_finished;
778 }
779
780 result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
781 if (result)
782 goto ps_finished;
783
784 BT_DBG("802b_key");
785 show_key(b802_key);
786 memcpy(data, b802_key, 32);
787
788ps_finished:
789 return result;
790}
791
792static u8 amp_next_handle;
793static inline u8 physlink_handle(struct hci_dev *hdev)
794{
795 /* TODO amp_next_handle should be part of hci_dev */
796 if (amp_next_handle == 0)
797 amp_next_handle = 1;
798 return amp_next_handle++;
799}
800
801/* Start an Accept Physical Link sequence */
802static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
803{
804 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
805 struct amp_ctx *ctx = NULL;
806 struct a2mp_createphyslink_req *req;
807
808 if (hdr->len < sizeof(*req))
809 return -EINVAL;
810 req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
811 skb_pull(skb, sizeof(*req));
812 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
813
814 /* initialize the context */
815 ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
816 if (!ctx)
817 return -ENOMEM;
818 ctx->d.apl.req_ident = hdr->ident;
819 ctx->d.apl.remote_id = req->local_id;
820 ctx->id = req->remote_id;
821
822 /* add the supplied remote assoc to the context */
823 ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
824 if (ctx->d.apl.remote_assoc)
825 memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
826 ctx->d.apl.len_so_far = 0;
827 ctx->d.apl.rem_len = skb->len;
828 skb_pull(skb, skb->len);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800829 ctx->hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 start_ctx(mgr, ctx);
831 return 0;
832}
833
834static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
835{
836 struct sk_buff *skb = data;
837 struct hci_cp_accept_phys_link acp;
838 struct hci_cp_write_remote_amp_assoc wcp;
839 struct hci_rp_write_remote_amp_assoc *wrp;
840 struct hci_ev_cmd_status *cs = data;
841 struct hci_ev_phys_link_complete *ev;
842 struct a2mp_createphyslink_rsp rsp;
843 struct amp_ctx *cplctx;
844 struct amp_ctx *aplctx;
845 u16 frag_len;
846 struct hci_conn *conn;
847 int result;
848
849 BT_DBG("state %d", ctx->state);
850 result = -EINVAL;
851 rsp.status = 1; /* Invalid Controller ID */
852 if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
853 goto apl_finished;
854 if (evt_type == AMP_KILLED) {
855 result = -EAGAIN;
856 rsp.status = 4; /* Disconnect request received */
857 goto apl_finished;
858 }
859 if (!ctx->d.apl.remote_assoc) {
860 result = -ENOMEM;
861 rsp.status = 2; /* Unable to Start */
862 goto apl_finished;
863 }
864
865 switch (ctx->state) {
866 case AMP_APL_INIT:
867 BT_DBG("local_id %d, remote_id %d",
868 ctx->id, ctx->d.apl.remote_id);
869 conn = hci_conn_hash_lookup_id(ctx->hdev,
870 &ctx->mgr->l2cap_conn->hcon->dst,
871 ctx->d.apl.remote_id);
872 if (conn) {
873 result = -EEXIST;
874 rsp.status = 5; /* Already Exists */
875 goto apl_finished;
876 }
877
878 aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
879 if ((aplctx) &&
880 (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
881 BT_DBG("deferred to %p", aplctx);
882 aplctx->deferred = ctx;
883 break;
884 }
885
886 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
887 if ((cplctx) &&
888 (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
889 struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
890 BT_DBG("local %s remote %s",
891 batostr(&bcon->hdev->bdaddr),
892 batostr(&bcon->dst));
893 if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
894 (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
895 BT_DBG("COLLISION LOSER");
896 cplctx->deferred = ctx;
897 cancel_ctx(cplctx);
898 break;
899 } else {
900 BT_DBG("COLLISION WINNER");
901 result = -EISCONN;
902 rsp.status = 3; /* Collision */
903 goto apl_finished;
904 }
905 }
906
907 result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
908 &acp.key_len, &acp.type);
909 if (result) {
910 BT_DBG("SECURITY");
911 rsp.status = 6; /* Security Violation */
912 goto apl_finished;
913 }
914
915 ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
916 ctx->state = AMP_APL_APL_STATUS;
917 ctx->evt_type = AMP_HCI_CMD_STATUS;
918 ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
919 acp.phy_handle = ctx->d.apl.phy_handle;
920 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
921 break;
922
923 case AMP_APL_APL_STATUS:
924 if (cs->status != 0)
925 goto apl_finished;
926 /* PAL will accept link, send a2mp response */
927 rsp.local_id = ctx->id;
928 rsp.remote_id = ctx->d.apl.remote_id;
929 rsp.status = 0;
930 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
931 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
932
933 /* send the first assoc fragment */
934 wcp.phy_handle = ctx->d.apl.phy_handle;
935 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
936 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
937 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
938 memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
939 ctx->state = AMP_APL_WRA_COMPLETE;
940 ctx->evt_type = AMP_HCI_CMD_CMPLT;
941 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
942 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
943 break;
944
945 case AMP_APL_WRA_COMPLETE:
946 /* received write remote amp assoc command complete event */
947 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
948 if (wrp->status != 0)
949 goto apl_finished;
950 if (wrp->phy_handle != ctx->d.apl.phy_handle)
951 goto apl_finished;
952 /* update progress */
953 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
954 ctx->d.apl.len_so_far += frag_len;
955 ctx->d.apl.rem_len -= frag_len;
956 if (ctx->d.apl.rem_len > 0) {
957 u8 *assoc;
958 /* another assoc fragment to send */
959 wcp.phy_handle = ctx->d.apl.phy_handle;
960 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
961 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
962 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
963 assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
964 memcpy(wcp.frag, assoc, frag_len);
965 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
966 break;
967 }
968 /* wait for physical link complete event */
969 ctx->state = AMP_APL_PL_COMPLETE;
970 ctx->evt_type = AMP_HCI_EVENT;
971 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
972 break;
973
974 case AMP_APL_PL_COMPLETE:
975 /* physical link complete event received */
976 if (skb->len < sizeof(*ev))
977 goto apl_finished;
978 ev = (struct hci_ev_phys_link_complete *) skb->data;
979 if (ev->phy_handle != ctx->d.apl.phy_handle)
980 break;
981 if (ev->status != 0)
982 goto apl_finished;
983 conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
984 if (!conn)
985 goto apl_finished;
986 result = 0;
987 BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
988 conn->dst_id = ctx->d.apl.remote_id;
989 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
990 goto apl_finished;
991 break;
992
993 default:
994 goto apl_finished;
995 break;
996 }
997 return 0;
998
999apl_finished:
1000 if (ctx->sk)
1001 l2cap_amp_physical_complete(result, ctx->id,
1002 ctx->d.apl.remote_id, ctx->sk);
1003 if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
1004 rsp.local_id = ctx->id;
1005 rsp.remote_id = ctx->d.apl.remote_id;
1006 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
1007 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
1008 }
1009 kfree(ctx->d.apl.remote_assoc);
1010 if (ctx->sk)
1011 sock_put(ctx->sk);
1012 if (ctx->hdev)
1013 hci_dev_put(ctx->hdev);
1014 return 1;
1015}
1016
1017static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
1018{
1019 struct hci_cp_disconn_phys_link dcp;
1020
1021 ctx->state = AMP_CPL_PL_CANCEL;
1022 ctx->evt_type = AMP_HCI_EVENT;
1023 ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
1024 dcp.phy_handle = ctx->d.cpl.phy_handle;
1025 dcp.reason = reason;
1026 hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
1027}
1028
1029static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
1030{
1031 struct amp_ctrl *ctrl;
1032 struct sk_buff *skb = data;
1033 struct a2mp_cmd_hdr *hdr;
1034 struct hci_ev_cmd_status *cs = data;
1035 struct amp_ctx *cplctx;
1036 struct a2mp_discover_req dreq;
1037 struct a2mp_discover_rsp *drsp;
1038 u16 *efm;
1039 struct a2mp_getinfo_req greq;
1040 struct a2mp_getinfo_rsp *grsp;
1041 struct a2mp_cl *cl;
1042 struct a2mp_getampassoc_req areq;
1043 struct a2mp_getampassoc_rsp *arsp;
1044 struct hci_cp_create_phys_link cp;
1045 struct hci_cp_write_remote_amp_assoc wcp;
1046 struct hci_rp_write_remote_amp_assoc *wrp;
1047 struct hci_ev_channel_selected *cev;
1048 struct hci_cp_read_local_amp_assoc rcp;
1049 struct hci_rp_read_local_amp_assoc *rrp;
1050 struct a2mp_createphyslink_req creq;
1051 struct a2mp_createphyslink_rsp *crsp;
1052 struct hci_ev_phys_link_complete *pev;
1053 struct hci_ev_disconn_phys_link_complete *dev;
1054 u8 *assoc, *rassoc, *lassoc;
1055 u16 frag_len;
1056 u16 rem_len;
1057 int result = -EAGAIN;
1058 struct hci_conn *conn;
1059
1060 BT_DBG("state %d", ctx->state);
1061 if (evt_type == AMP_KILLED)
1062 goto cpl_finished;
1063
1064 if (evt_type == AMP_CANCEL) {
1065 if ((ctx->state < AMP_CPL_CPL_STATUS) ||
1066 ((ctx->state == AMP_CPL_PL_COMPLETE) &&
1067 !(ctx->evt_type & AMP_HCI_EVENT)))
1068 goto cpl_finished;
1069
1070 cancel_cpl_ctx(ctx, 0x16);
1071 return 0;
1072 }
1073
1074 switch (ctx->state) {
1075 case AMP_CPL_INIT:
1076 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
1077 if (cplctx) {
1078 BT_DBG("deferred to %p", cplctx);
1079 cplctx->deferred = ctx;
1080 break;
1081 }
1082 ctx->state = AMP_CPL_DISC_RSP;
1083 ctx->evt_type = AMP_A2MP_RSP;
1084 ctx->rsp_ident = next_ident(ctx->mgr);
1085 dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
1086 dreq.ext_feat = 0;
1087 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
1088 sizeof(dreq), &dreq);
1089 break;
1090
1091 case AMP_CPL_DISC_RSP:
1092 drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
1093 if (skb->len < (sizeof(*drsp))) {
1094 result = -EINVAL;
1095 goto cpl_finished;
1096 }
1097
1098 efm = (u16 *) skb_pull(skb, sizeof(*drsp));
1099 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
1100 le16_to_cpu(drsp->ext_feat));
1101
1102 while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
1103 if (skb->len < sizeof(*efm)) {
1104 result = -EINVAL;
1105 goto cpl_finished;
1106 }
1107 drsp->ext_feat = *efm;
1108 BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
1109 efm = (u16 *) skb_pull(skb, sizeof(*efm));
1110 }
1111 cl = (struct a2mp_cl *) efm;
1112
1113 /* find the first remote and local controller with the
1114 * same type
1115 */
1116 greq.id = 0;
1117 result = -ENODEV;
1118 while (skb->len >= sizeof(*cl)) {
1119 if ((cl->id != 0) && (greq.id == 0)) {
1120 struct hci_dev *hdev;
1121 hdev = hci_dev_get_type(cl->type);
1122 if (hdev) {
1123 struct hci_conn *conn;
1124 ctx->hdev = hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08001125 ctx->id = hdev->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126 ctx->d.cpl.remote_id = cl->id;
1127 conn = hci_conn_hash_lookup_ba(hdev,
1128 ACL_LINK,
1129 &ctx->mgr->l2cap_conn->hcon->dst);
1130 if (conn) {
1131 BT_DBG("PL_COMPLETE exists %x",
1132 (int) conn->handle);
1133 result = 0;
1134 }
1135 ctrl = get_create_ctrl(ctx->mgr,
1136 cl->id);
1137 if (ctrl) {
1138 ctrl->type = cl->type;
1139 ctrl->status = cl->status;
1140 }
1141 greq.id = cl->id;
1142 }
1143 }
1144 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
1145 }
1146 if ((!greq.id) || (!result))
1147 goto cpl_finished;
1148 ctx->state = AMP_CPL_GETINFO_RSP;
1149 ctx->evt_type = AMP_A2MP_RSP;
1150 ctx->rsp_ident = next_ident(ctx->mgr);
1151 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
1152 sizeof(greq), &greq);
1153 break;
1154
1155 case AMP_CPL_GETINFO_RSP:
1156 if (skb->len < sizeof(*grsp))
1157 goto cpl_finished;
1158 grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
Peter Krystad02a952a2012-01-24 12:46:00 -08001159 skb_pull(skb, sizeof(*grsp));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001160 if (grsp->status)
1161 goto cpl_finished;
1162 if (grsp->id != ctx->d.cpl.remote_id)
1163 goto cpl_finished;
1164 ctrl = get_ctrl(ctx->mgr, grsp->id);
1165 if (!ctrl)
1166 goto cpl_finished;
1167 ctrl->status = grsp->status;
1168 ctrl->total_bw = le32_to_cpu(grsp->total_bw);
1169 ctrl->max_bw = le32_to_cpu(grsp->max_bw);
1170 ctrl->min_latency = le32_to_cpu(grsp->min_latency);
1171 ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
1172 ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173
1174 ctx->d.cpl.max_len = ctrl->max_assoc_size;
1175
1176 /* setup up GAA request */
1177 areq.id = ctx->d.cpl.remote_id;
1178
1179 /* advance context state */
1180 ctx->state = AMP_CPL_GAA_RSP;
1181 ctx->evt_type = AMP_A2MP_RSP;
1182 ctx->rsp_ident = next_ident(ctx->mgr);
1183 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
1184 sizeof(areq), &areq);
1185 break;
1186
1187 case AMP_CPL_GAA_RSP:
1188 if (skb->len < sizeof(*arsp))
1189 goto cpl_finished;
1190 hdr = (void *) skb->data;
1191 arsp = (void *) skb_pull(skb, sizeof(*hdr));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 if (arsp->status != 0)
1193 goto cpl_finished;
1194
1195 /* store away remote assoc */
1196 assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
1197 ctx->d.cpl.len_so_far = 0;
1198 ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
Peter Krystad02a952a2012-01-24 12:46:00 -08001199 skb_pull(skb, ctx->d.cpl.rem_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200 rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
1201 if (!rassoc)
1202 goto cpl_finished;
1203 memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
1204 ctx->d.cpl.remote_assoc = rassoc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205
1206 /* set up CPL command */
1207 ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
1208 cp.phy_handle = ctx->d.cpl.phy_handle;
1209 if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
1210 &cp.key_len, &cp.type)) {
1211 result = -EPERM;
1212 goto cpl_finished;
1213 }
1214
1215 /* advance context state */
1216 ctx->state = AMP_CPL_CPL_STATUS;
1217 ctx->evt_type = AMP_HCI_CMD_STATUS;
1218 ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
1219 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
1220 break;
1221
1222 case AMP_CPL_CPL_STATUS:
1223 /* received create physical link command status */
1224 if (cs->status != 0)
1225 goto cpl_finished;
1226 /* send the first assoc fragment */
1227 wcp.phy_handle = ctx->d.cpl.phy_handle;
1228 wcp.len_so_far = ctx->d.cpl.len_so_far;
1229 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1230 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1231 memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
1232 ctx->state = AMP_CPL_WRA_COMPLETE;
1233 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1234 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
1235 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1236 break;
1237
1238 case AMP_CPL_WRA_COMPLETE:
1239 /* received write remote amp assoc command complete event */
1240 if (skb->len < sizeof(*wrp))
1241 goto cpl_finished;
1242 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
1243 if (wrp->status != 0)
1244 goto cpl_finished;
1245 if (wrp->phy_handle != ctx->d.cpl.phy_handle)
1246 goto cpl_finished;
1247
1248 /* update progress */
1249 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1250 ctx->d.cpl.len_so_far += frag_len;
1251 ctx->d.cpl.rem_len -= frag_len;
1252 if (ctx->d.cpl.rem_len > 0) {
1253 /* another assoc fragment to send */
1254 wcp.phy_handle = ctx->d.cpl.phy_handle;
1255 wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
1256 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1257 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1258 memcpy(wcp.frag,
1259 ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
1260 frag_len);
1261 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1262 break;
1263 }
1264 /* now wait for channel selected event */
1265 ctx->state = AMP_CPL_CHANNEL_SELECT;
1266 ctx->evt_type = AMP_HCI_EVENT;
1267 ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
1268 break;
1269
1270 case AMP_CPL_CHANNEL_SELECT:
1271 /* received channel selection event */
1272 if (skb->len < sizeof(*cev))
1273 goto cpl_finished;
1274 cev = (void *) skb->data;
1275/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
1276 Create Physical Link collision scenario
1277 if (cev->phy_handle != ctx->d.cpl.phy_handle)
1278 goto cpl_finished;
1279*/
1280
1281 /* request the first local assoc fragment */
1282 rcp.phy_handle = ctx->d.cpl.phy_handle;
1283 rcp.len_so_far = 0;
1284 rcp.max_len = ctx->d.cpl.max_len;
1285 lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
1286 if (!lassoc)
1287 goto cpl_finished;
1288 ctx->d.cpl.local_assoc = lassoc;
1289 ctx->d.cpl.len_so_far = 0;
1290 ctx->state = AMP_CPL_RLA_COMPLETE;
1291 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1292 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
1293 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1294 break;
1295
1296 case AMP_CPL_RLA_COMPLETE:
1297 /* received read local amp assoc command complete event */
1298 if (skb->len < 4)
1299 goto cpl_finished;
1300 rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
1301 if (rrp->status)
1302 goto cpl_finished;
1303 if (rrp->phy_handle != ctx->d.cpl.phy_handle)
1304 goto cpl_finished;
1305 rem_len = le16_to_cpu(rrp->rem_len);
1306 skb_pull(skb, 4);
1307 frag_len = skb->len;
1308
1309 if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
1310 goto cpl_finished;
1311
1312 /* save this fragment in context */
1313 lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
1314 memcpy(lassoc, rrp->frag, frag_len);
1315 ctx->d.cpl.len_so_far += frag_len;
1316 rem_len -= frag_len;
1317 if (rem_len > 0) {
1318 /* request another local assoc fragment */
1319 rcp.phy_handle = ctx->d.cpl.phy_handle;
1320 rcp.len_so_far = ctx->d.cpl.len_so_far;
1321 rcp.max_len = ctx->d.cpl.max_len;
1322 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1323 } else {
1324 creq.local_id = ctx->id;
1325 creq.remote_id = ctx->d.cpl.remote_id;
1326 /* wait for A2MP rsp AND phys link complete event */
1327 ctx->state = AMP_CPL_PL_COMPLETE;
1328 ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
1329 ctx->rsp_ident = next_ident(ctx->mgr);
1330 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
1331 send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
1332 A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
1333 ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
1334 }
1335 break;
1336
1337 case AMP_CPL_PL_COMPLETE:
1338 if (evt_type == AMP_A2MP_RSP) {
1339 /* create physical link response received */
1340 ctx->evt_type &= ~AMP_A2MP_RSP;
1341 if (skb->len < sizeof(*crsp))
1342 goto cpl_finished;
1343 crsp = (void *) skb_pull(skb, sizeof(*hdr));
1344 if ((crsp->local_id != ctx->d.cpl.remote_id) ||
1345 (crsp->remote_id != ctx->id) ||
1346 (crsp->status != 0)) {
1347 cancel_cpl_ctx(ctx, 0x13);
1348 break;
1349 }
1350
1351 /* notify Qualcomm PAL */
1352 if (ctx->hdev->manufacturer == 0x001d)
1353 hci_send_cmd(ctx->hdev,
1354 hci_opcode_pack(0x3f, 0x00), 0, NULL);
1355 }
1356 if (evt_type == AMP_HCI_EVENT) {
1357 ctx->evt_type &= ~AMP_HCI_EVENT;
1358 /* physical link complete event received */
1359 if (skb->len < sizeof(*pev))
1360 goto cpl_finished;
1361 pev = (void *) skb->data;
1362 if (pev->phy_handle != ctx->d.cpl.phy_handle)
1363 break;
1364 if (pev->status != 0)
1365 goto cpl_finished;
1366 }
1367 if (ctx->evt_type)
1368 break;
1369 conn = hci_conn_hash_lookup_handle(ctx->hdev,
1370 ctx->d.cpl.phy_handle);
1371 if (!conn)
1372 goto cpl_finished;
1373 result = 0;
1374 BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
1375 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
1376 conn->dst_id = ctx->d.cpl.remote_id;
1377 conn->out = 1;
1378 goto cpl_finished;
1379 break;
1380
1381 case AMP_CPL_PL_CANCEL:
1382 dev = (void *) skb->data;
1383 BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
1384 result = -EISCONN;
1385 goto cpl_finished;
1386 break;
1387
1388 default:
1389 goto cpl_finished;
1390 break;
1391 }
1392 return 0;
1393
1394cpl_finished:
1395 l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
1396 ctx->sk);
1397 if (ctx->sk)
1398 sock_put(ctx->sk);
1399 if (ctx->hdev)
1400 hci_dev_put(ctx->hdev);
1401 kfree(ctx->d.cpl.remote_assoc);
1402 kfree(ctx->d.cpl.local_assoc);
1403 return 1;
1404}
1405
1406static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
1407{
1408 struct a2mp_cmd_hdr *hdr = (void *) skb->data;
1409 struct a2mp_disconnphyslink_req *req;
1410 struct a2mp_disconnphyslink_rsp rsp;
1411 struct hci_dev *hdev;
1412 struct hci_conn *conn;
1413 struct amp_ctx *aplctx;
1414
1415 BT_DBG("mgr %p skb %p", mgr, skb);
1416 if (hdr->len < sizeof(*req))
1417 return -EINVAL;
1418 req = (void *) skb_pull(skb, sizeof(*hdr));
1419 skb_pull(skb, sizeof(*req));
1420
1421 rsp.local_id = req->remote_id;
1422 rsp.remote_id = req->local_id;
1423 rsp.status = 0;
1424 BT_DBG("local_id %d remote_id %d",
1425 (int) rsp.local_id, (int) rsp.remote_id);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08001426 hdev = hci_dev_get(rsp.local_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427 if (!hdev) {
1428 rsp.status = 1; /* Invalid Controller ID */
1429 goto dpl_finished;
1430 }
1431 BT_DBG("hdev %p", hdev);
1432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1433 &mgr->l2cap_conn->hcon->dst);
1434 if (!conn) {
1435 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
1436 if (aplctx) {
1437 kill_ctx(aplctx);
1438 rsp.status = 0;
1439 goto dpl_finished;
1440 }
1441 rsp.status = 2; /* No Physical Link exists */
1442 goto dpl_finished;
1443 }
1444 BT_DBG("conn %p", conn);
1445 hci_disconnect(conn, 0x13);
1446
1447dpl_finished:
1448 send_a2mp_cmd(mgr, hdr->ident,
1449 A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
1450 if (hdev)
1451 hci_dev_put(hdev);
1452 return 0;
1453}
1454
1455static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
1456{
1457 struct amp_mgr *mgr = ctx->mgr;
1458 u8 finished = 0;
1459
1460 if (!mgr->connected)
1461 return 0;
1462
1463 switch (ctx->type) {
1464 case AMP_GETAMPASSOC:
1465 finished = getampassoc_handler(ctx, evt_type, data);
1466 break;
1467 case AMP_CREATEPHYSLINK:
1468 finished = createphyslink_handler(ctx, evt_type, data);
1469 break;
1470 case AMP_ACCEPTPHYSLINK:
1471 finished = acceptphyslink_handler(ctx, evt_type, data);
1472 break;
1473 }
1474
1475 if (!finished)
1476 mod_timer(&(ctx->timer), jiffies +
1477 msecs_to_jiffies(A2MP_RSP_TIMEOUT));
1478 else
1479 destroy_ctx(ctx);
1480 return finished;
1481}
1482
1483static int cancel_ctx(struct amp_ctx *ctx)
1484{
1485 return execute_ctx(ctx, AMP_CANCEL, 0);
1486}
1487
1488static int kill_ctx(struct amp_ctx *ctx)
1489{
1490 return execute_ctx(ctx, AMP_KILLED, 0);
1491}
1492
1493static void ctx_timeout_worker(struct work_struct *w)
1494{
1495 struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
1496 struct amp_ctx *ctx = work->ctx;
1497 kill_ctx(ctx);
1498 kfree(work);
1499}
1500
1501static void ctx_timeout(unsigned long data)
1502{
1503 struct amp_ctx *ctx = (struct amp_ctx *) data;
1504 struct amp_work_ctx_timeout *work;
1505
1506 BT_DBG("ctx %p", ctx);
1507 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1508 if (work) {
1509 INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
1510 work->ctx = ctx;
1511 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1512 kfree(work);
1513 }
1514}
1515
1516static void launch_ctx(struct amp_mgr *mgr)
1517{
1518 struct amp_ctx *ctx = NULL;
1519
1520 BT_DBG("mgr %p", mgr);
Peter Krystadf5289202011-11-14 15:11:22 -08001521 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 if (!list_empty(&mgr->ctx_list))
1523 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -08001524 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525 BT_DBG("ctx %p", ctx);
1526 if (ctx)
1527 execute_ctx(ctx, AMP_INIT, NULL);
1528}
1529
1530static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
1531{
1532 struct amp_ctx *ctx;
1533 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1534 u16 hdr_len = le16_to_cpu(hdr->len);
1535
1536 /* find context waiting for A2MP rsp with this rsp's identifier */
1537 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
1538 ctx = get_ctx_a2mp(mgr, hdr->ident);
1539 if (ctx) {
1540 execute_ctx(ctx, AMP_A2MP_RSP, skb);
1541 } else {
1542 BT_DBG("context not found");
1543 skb_pull(skb, sizeof(*hdr));
1544 if (hdr_len > skb->len)
1545 hdr_len = skb->len;
1546 skb_pull(skb, hdr_len);
1547 }
1548 return 0;
1549}
1550
1551/* L2CAP-A2MP interface */
1552
Peter Krystadf5289202011-11-14 15:11:22 -08001553static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554{
1555 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1556 int len;
1557 int err = 0;
1558 struct amp_mgr *mgr;
1559
1560 mgr = get_amp_mgr_sk(sk);
1561 if (!mgr)
1562 goto a2mp_finished;
1563
1564 len = skb->len;
1565 while (len >= sizeof(*hdr)) {
1566 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1567 u16 clen = le16_to_cpu(hdr->len);
1568
1569 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
1570 if (clen > len || !hdr->ident) {
1571 err = -EINVAL;
1572 break;
1573 }
1574 switch (hdr->code) {
1575 case A2MP_COMMAND_REJ:
1576 command_rej(mgr, skb);
1577 break;
1578 case A2MP_DISCOVER_REQ:
1579 err = discover_req(mgr, skb);
1580 break;
1581 case A2MP_CHANGE_NOTIFY:
1582 err = change_notify(mgr, skb);
1583 break;
1584 case A2MP_GETINFO_REQ:
1585 err = getinfo_req(mgr, skb);
1586 break;
1587 case A2MP_GETAMPASSOC_REQ:
1588 err = getampassoc_req(mgr, skb);
1589 break;
1590 case A2MP_CREATEPHYSLINK_REQ:
1591 err = createphyslink_req(mgr, skb);
1592 break;
1593 case A2MP_DISCONNPHYSLINK_REQ:
1594 err = disconnphyslink_req(mgr, skb);
1595 break;
1596 case A2MP_CHANGE_RSP:
1597 case A2MP_DISCOVER_RSP:
1598 case A2MP_GETINFO_RSP:
1599 case A2MP_GETAMPASSOC_RSP:
1600 case A2MP_CREATEPHYSLINK_RSP:
1601 case A2MP_DISCONNPHYSLINK_RSP:
1602 err = a2mp_rsp(mgr, skb);
1603 break;
1604 default:
1605 BT_ERR("Unknown A2MP signaling command 0x%2.2x",
1606 hdr->code);
1607 skb_pull(skb, sizeof(*hdr));
1608 err = -EINVAL;
1609 break;
1610 }
1611 len = skb->len;
1612 }
1613
1614a2mp_finished:
1615 if (err && mgr) {
1616 struct a2mp_cmd_rej rej;
1617 rej.reason = cpu_to_le16(0);
1618 send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
1619 sizeof(rej), &rej);
1620 }
1621}
1622
1623/* L2CAP-A2MP interface */
1624
1625static int send_a2mp(struct socket *sock, u8 *data, int len)
1626{
1627 struct kvec iv = { data, len };
1628 struct msghdr msg;
1629
1630 memset(&msg, 0, sizeof(msg));
1631
1632 return kernel_sendmsg(sock, &msg, &iv, 1, len);
1633}
1634
1635static void data_ready_worker(struct work_struct *w)
1636{
1637 struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
1638 struct sock *sk = work->sk;
1639 struct sk_buff *skb;
1640
1641 /* skb_dequeue() is thread-safe */
1642 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1643 a2mp_receive(sk, skb);
1644 kfree_skb(skb);
1645 }
1646 sock_put(work->sk);
1647 kfree(work);
1648}
1649
1650static void data_ready(struct sock *sk, int bytes)
1651{
1652 struct amp_work_data_ready *work;
1653 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1654 if (work) {
1655 INIT_WORK((struct work_struct *) work, data_ready_worker);
1656 sock_hold(sk);
1657 work->sk = sk;
1658 work->bytes = bytes;
1659 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1660 kfree(work);
1661 sock_put(sk);
1662 }
1663 }
1664}
1665
1666static void state_change_worker(struct work_struct *w)
1667{
1668 struct amp_work_state_change *work = (struct amp_work_state_change *) w;
1669 struct amp_mgr *mgr;
1670 switch (work->sk->sk_state) {
1671 case BT_CONNECTED:
1672 /* socket is up */
1673 BT_DBG("CONNECTED");
1674 mgr = get_amp_mgr_sk(work->sk);
1675 if (mgr) {
1676 mgr->connected = 1;
1677 if (mgr->skb) {
1678 l2cap_recv_deferred_frame(work->sk, mgr->skb);
1679 mgr->skb = NULL;
1680 }
1681 launch_ctx(mgr);
1682 }
1683 break;
1684
1685 case BT_CLOSED:
1686 /* connection is gone */
1687 BT_DBG("CLOSED");
1688 mgr = get_amp_mgr_sk(work->sk);
1689 if (mgr) {
1690 if (!sock_flag(work->sk, SOCK_DEAD))
1691 sock_release(mgr->a2mp_sock);
1692 mgr->a2mp_sock = NULL;
1693 remove_amp_mgr(mgr);
1694 }
1695 break;
1696
1697 default:
1698 /* something else happened */
1699 break;
1700 }
1701 sock_put(work->sk);
1702 kfree(work);
1703}
1704
1705static void state_change(struct sock *sk)
1706{
1707 struct amp_work_state_change *work;
1708 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1709 if (work) {
1710 INIT_WORK((struct work_struct *) work, state_change_worker);
1711 sock_hold(sk);
1712 work->sk = sk;
1713 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1714 kfree(work);
1715 sock_put(sk);
1716 }
1717 }
1718}
1719
1720static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
1721{
1722 int err;
1723 struct socket *sock;
1724 struct sockaddr_l2 addr;
1725 struct sock *sk;
1726 struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
1727 L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
1728 L2CAP_MODE_ERTM, 1, 0xFF, 1};
1729
1730
1731 err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
1732 BTPROTO_L2CAP, &sock);
1733
1734 if (err) {
1735 BT_ERR("sock_create_kern failed %d", err);
1736 return NULL;
1737 }
1738
1739 sk = sock->sk;
1740 sk->sk_data_ready = data_ready;
1741 sk->sk_state_change = state_change;
1742
1743 memset(&addr, 0, sizeof(addr));
1744 bacpy(&addr.l2_bdaddr, src);
1745 addr.l2_family = AF_BLUETOOTH;
1746 addr.l2_cid = L2CAP_CID_A2MP;
1747 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1748 if (err) {
1749 BT_ERR("kernel_bind failed %d", err);
1750 sock_release(sock);
1751 return NULL;
1752 }
1753
1754 l2cap_fixed_channel_config(sk, &opts);
1755
1756 memset(&addr, 0, sizeof(addr));
1757 bacpy(&addr.l2_bdaddr, dst);
1758 addr.l2_family = AF_BLUETOOTH;
1759 addr.l2_cid = L2CAP_CID_A2MP;
1760 err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
1761 O_NONBLOCK);
1762 if ((err == 0) || (err == -EINPROGRESS))
1763 return sock;
1764 else {
1765 BT_ERR("kernel_connect failed %d", err);
1766 sock_release(sock);
1767 return NULL;
1768 }
1769}
1770
1771static void conn_ind_worker(struct work_struct *w)
1772{
1773 struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
Peter Krystad072a51f2012-03-30 12:59:33 -07001774 struct hci_conn *hcon = work->hcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 struct sk_buff *skb = work->skb;
1776 struct amp_mgr *mgr;
1777
Peter Krystad072a51f2012-03-30 12:59:33 -07001778 mgr = get_create_amp_mgr(hcon, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779 BT_DBG("mgr %p", mgr);
Peter Krystad072a51f2012-03-30 12:59:33 -07001780 hci_conn_put(hcon);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001781 kfree(work);
1782}
1783
1784static void create_physical_worker(struct work_struct *w)
1785{
1786 struct amp_work_create_physical *work =
1787 (struct amp_work_create_physical *) w;
1788
1789 create_physical(work->conn, work->sk);
1790 sock_put(work->sk);
1791 kfree(work);
1792}
1793
1794static void accept_physical_worker(struct work_struct *w)
1795{
1796 struct amp_work_accept_physical *work =
1797 (struct amp_work_accept_physical *) w;
1798
1799 accept_physical(work->conn, work->id, work->sk);
1800 sock_put(work->sk);
1801 kfree(work);
1802}
1803
1804/* L2CAP Fixed Channel interface */
1805
Peter Krystad072a51f2012-03-30 12:59:33 -07001806void amp_conn_ind(struct hci_conn *hcon, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807{
1808 struct amp_work_conn_ind *work;
Peter Krystad072a51f2012-03-30 12:59:33 -07001809 BT_DBG("hcon %p, skb %p", hcon, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1811 if (work) {
1812 INIT_WORK((struct work_struct *) work, conn_ind_worker);
Peter Krystad072a51f2012-03-30 12:59:33 -07001813 hci_conn_hold(hcon);
1814 work->hcon = hcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001815 work->skb = skb;
Peter Krystad072a51f2012-03-30 12:59:33 -07001816 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1817 hci_conn_put(hcon);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001818 kfree(work);
Peter Krystad072a51f2012-03-30 12:59:33 -07001819 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001820 }
1821}
1822
1823/* L2CAP Physical Link interface */
1824
1825void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
1826{
1827 struct amp_work_create_physical *work;
1828 BT_DBG("conn %p", conn);
1829 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1830 if (work) {
1831 INIT_WORK((struct work_struct *) work, create_physical_worker);
1832 work->conn = conn;
1833 work->sk = sk;
1834 sock_hold(sk);
1835 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1836 sock_put(sk);
1837 kfree(work);
1838 }
1839 }
1840}
1841
1842void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
1843{
1844 struct amp_work_accept_physical *work;
1845 BT_DBG("conn %p", conn);
1846
1847 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1848 if (work) {
1849 INIT_WORK((struct work_struct *) work, accept_physical_worker);
1850 work->conn = conn;
1851 work->sk = sk;
1852 work->id = id;
1853 sock_hold(sk);
1854 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1855 sock_put(sk);
1856 kfree(work);
1857 }
1858 }
1859}
1860
1861/* HCI interface */
1862
1863static void amp_cmd_cmplt_worker(struct work_struct *w)
1864{
1865 struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
1866 struct hci_dev *hdev = work->hdev;
1867 u16 opcode = work->opcode;
1868 struct sk_buff *skb = work->skb;
1869 struct amp_ctx *ctx;
1870
1871 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
1872 if (ctx)
1873 execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
1874 kfree_skb(skb);
1875 kfree(w);
1876}
1877
1878static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
1879 struct sk_buff *skb)
1880{
1881 struct amp_work_cmd_cmplt *work;
1882 struct sk_buff *skbc;
1883 BT_DBG("hdev %p opcode 0x%x skb %p len %d",
1884 hdev, opcode, skb, skb->len);
1885 skbc = skb_clone(skb, GFP_ATOMIC);
1886 if (!skbc)
1887 return;
1888 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1889 if (work) {
1890 INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
1891 work->hdev = hdev;
1892 work->opcode = opcode;
1893 work->skb = skbc;
1894 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1895 kfree(work);
1896 }
1897}
1898
1899static void amp_cmd_status_worker(struct work_struct *w)
1900{
1901 struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
1902 struct hci_dev *hdev = work->hdev;
1903 u16 opcode = work->opcode;
1904 u8 status = work->status;
1905 struct amp_ctx *ctx;
1906
1907 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
1908 if (ctx)
1909 execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
1910 kfree(w);
1911}
1912
1913static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
1914{
1915 struct amp_work_cmd_status *work;
1916 BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
1917 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1918 if (work) {
1919 INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
1920 work->hdev = hdev;
1921 work->opcode = opcode;
1922 work->status = status;
1923 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1924 kfree(work);
1925 }
1926}
1927
1928static void amp_event_worker(struct work_struct *w)
1929{
1930 struct amp_work_event *work = (struct amp_work_event *) w;
1931 struct hci_dev *hdev = work->hdev;
1932 u8 event = work->event;
1933 struct sk_buff *skb = work->skb;
1934 struct amp_ctx *ctx;
1935
1936 if (event == HCI_EV_AMP_STATUS_CHANGE) {
1937 struct hci_ev_amp_status_change *ev;
1938 if (skb->len < sizeof(*ev))
1939 goto amp_event_finished;
1940 ev = (void *) skb->data;
1941 if (ev->status != 0)
1942 goto amp_event_finished;
1943 if (ev->amp_status == hdev->amp_status)
1944 goto amp_event_finished;
1945 hdev->amp_status = ev->amp_status;
1946 send_a2mp_change_notify();
1947 goto amp_event_finished;
1948 }
1949 ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
1950 if (ctx)
1951 execute_ctx(ctx, AMP_HCI_EVENT, skb);
1952
1953amp_event_finished:
1954 kfree_skb(skb);
1955 kfree(w);
1956}
1957
1958static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
1959{
1960 struct amp_work_event *work;
1961 struct sk_buff *skbc;
1962 BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
1963 skbc = skb_clone(skb, GFP_ATOMIC);
1964 if (!skbc)
1965 return;
1966 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1967 if (work) {
1968 INIT_WORK((struct work_struct *) work, amp_event_worker);
1969 work->hdev = hdev;
1970 work->event = event;
1971 work->skb = skbc;
1972 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1973 kfree(work);
1974 }
1975}
1976
1977static void amp_dev_event_worker(struct work_struct *w)
1978{
1979 send_a2mp_change_notify();
1980 kfree(w);
1981}
1982
1983static int amp_dev_event(struct notifier_block *this, unsigned long event,
1984 void *ptr)
1985{
1986 struct hci_dev *hdev = (struct hci_dev *) ptr;
1987 struct amp_work_event *work;
1988
1989 if (hdev->amp_type == HCI_BREDR)
1990 return NOTIFY_DONE;
1991
1992 switch (event) {
1993 case HCI_DEV_UNREG:
1994 case HCI_DEV_REG:
1995 case HCI_DEV_UP:
1996 case HCI_DEV_DOWN:
1997 BT_DBG("hdev %p event %ld", hdev, event);
1998 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1999 if (work) {
2000 INIT_WORK((struct work_struct *) work,
2001 amp_dev_event_worker);
2002 if (queue_work(amp_workqueue,
2003 (struct work_struct *) work) == 0)
2004 kfree(work);
2005 }
2006 }
2007 return NOTIFY_DONE;
2008}
2009
2010
2011/* L2CAP module init continued */
2012
2013static struct notifier_block amp_notifier = {
2014 .notifier_call = amp_dev_event
2015};
2016
2017static struct amp_mgr_cb hci_amp = {
2018 .amp_cmd_complete_event = amp_cmd_cmplt_evt,
2019 .amp_cmd_status_event = amp_cmd_status_evt,
2020 .amp_event = amp_evt
2021};
2022
2023int amp_init(void)
2024{
2025 hci_register_amp(&hci_amp);
2026 hci_register_notifier(&amp_notifier);
2027 amp_next_handle = 1;
2028 amp_workqueue = create_singlethread_workqueue("a2mp");
2029 if (!amp_workqueue)
2030 return -EPERM;
2031 return 0;
2032}
2033
2034void amp_exit(void)
2035{
2036 hci_unregister_amp(&hci_amp);
2037 hci_unregister_notifier(&amp_notifier);
2038 flush_workqueue(amp_workqueue);
2039 destroy_workqueue(amp_workqueue);
2040}