Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4
AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.
* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
PRNG: Device tree entry for qrng device.
vidc:1080p: Set video core timeout value for Thumbnail mode
msm: sps: improve the debugging support in SPS driver
board-8064 msm: Overlap secure and non secure video firmware heaps.
msm: clock: Add handoff ops for 7x30 and copper XO clocks
msm_fb: display: Wait for external vsync before DTV IOMMU unmap
msm: Fix ciruclar dependency in debug UART settings
msm: gdsc: Add GDSC regulator driver for msm-copper
defconfig: Enable Mobicore Driver.
mobicore: Add mobicore driver.
mobicore: rename variable to lower case.
mobicore: rename folder.
mobicore: add makefiles
mobicore: initial import of kernel driver
ASoC: msm: Add SLIMBUS_2_RX CPU DAI
board-8064-gpio: Update FUNC for EPM SPI CS
msm_fb: display: Remove chicken bit config during video playback
mmc: msm_sdcc: enable the sanitize capability
msm-fb: display: lm2 writeback support on mpq platfroms
msm_fb: display: Disable LVDS phy & pll during panel off
...
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 3537d38..fc4543a 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -11,6 +11,8 @@
select CRYPTO_BLKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
help
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a57..828be1d 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
- hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o
+ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
+ amp.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index b9af14e..219df5c 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -80,16 +80,19 @@
"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
-void bt_sock_reclassify_lock(struct sock *sk, int proto)
+static inline void bt_sock_reclassify_lock(struct socket *sock, int proto)
{
- BUG_ON(!sk);
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return;
+
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk,
bt_slock_key_strings[proto], &bt_slock_key[proto],
bt_key_strings[proto], &bt_lock_key[proto]);
}
-EXPORT_SYMBOL(bt_sock_reclassify_lock);
int bt_sock_register(int proto, const struct net_proto_family *ops)
{
@@ -180,8 +183,7 @@
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
err = bt_proto[proto]->create(net, sock, proto, kern);
- if (!err)
- bt_sock_reclassify_lock(sock->sk, proto);
+ bt_sock_reclassify_lock(sock, proto);
module_put(bt_proto[proto]->owner);
}
@@ -192,17 +194,17 @@
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
- write_lock(&l->lock);
+ write_lock_bh(&l->lock);
sk_add_node(sk, &l->head);
- write_unlock(&l->lock);
+ write_unlock_bh(&l->lock);
}
EXPORT_SYMBOL(bt_sock_link);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
- write_lock(&l->lock);
+ write_lock_bh(&l->lock);
sk_del_node_init(sk);
- write_unlock(&l->lock);
+ write_unlock_bh(&l->lock);
}
EXPORT_SYMBOL(bt_sock_unlink);
@@ -235,14 +237,15 @@
BT_DBG("parent %p", parent);
+ local_bh_disable();
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
- lock_sock(sk);
+ bh_lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
- release_sock(sk);
+ bh_unlock_sock(sk);
bt_accept_unlink(sk);
continue;
}
@@ -253,12 +256,14 @@
if (newsock)
sock_graft(sk, newsock);
- release_sock(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
return sk;
}
- release_sock(sk);
+ bh_unlock_sock(sk);
}
+ local_bh_enable();
return NULL;
}
@@ -488,7 +493,7 @@
sk->sk_state == BT_CONFIG)
return mask;
- if (!bt_sk(sk)->suspended && sock_writeable(sk))
+ if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -553,8 +558,9 @@
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
- set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (!timeo) {
err = -EINPROGRESS;
break;
@@ -568,13 +574,12 @@
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
new file mode 100644
index 0000000..ba638d1
--- /dev/null
+++ b/net/bluetooth/amp.c
@@ -0,0 +1,2041 @@
+/*
+ Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/amp.h>
+
+static struct workqueue_struct *amp_workqueue;
+
+LIST_HEAD(amp_mgr_list);
+DEFINE_RWLOCK(amp_mgr_list_lock);
+
+static int send_a2mp(struct socket *sock, u8 *data, int len);
+
+static void ctx_timeout(unsigned long data);
+
+static void launch_ctx(struct amp_mgr *mgr);
+static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
+static int kill_ctx(struct amp_ctx *ctx);
+static int cancel_ctx(struct amp_ctx *ctx);
+
+static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
+
+static void remove_amp_mgr(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p", mgr);
+
+ write_lock(&_mgr_list_lock);
+ list_del(&mgr->list);
+ write_unlock(&_mgr_list_lock);
+
+ read_lock(&mgr->ctx_list_lock);
+ while (!list_empty(&mgr->ctx_list)) {
+ struct amp_ctx *ctx;
+ ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
+ read_unlock(&mgr->ctx_list_lock);
+ BT_DBG("kill ctx %p", ctx);
+ kill_ctx(ctx);
+ read_lock(&mgr->ctx_list_lock);
+ }
+ read_unlock(&mgr->ctx_list_lock);
+
+ kfree(mgr->ctrls);
+
+ kfree(mgr);
+}
+
+static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
+{
+ struct amp_mgr *mgr;
+ struct amp_mgr *found = NULL;
+
+ read_lock(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
+ found = mgr;
+ break;
+ }
+ }
+ read_unlock(&_mgr_list_lock);
+ return found;
+}
+
+static struct amp_mgr *get_create_amp_mgr(struct hci_conn *hcon,
+ struct sk_buff *skb)
+{
+ struct amp_mgr *mgr;
+
+ write_lock(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if (mgr->l2cap_conn == hcon->l2cap_data) {
+ BT_DBG("found %p", mgr);
+ write_unlock(&_mgr_list_lock);
+ goto gc_finished;
+ }
+ }
+ write_unlock(&_mgr_list_lock);
+
+ mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
+ if (!mgr)
+ return NULL;
+
+ mgr->l2cap_conn = hcon->l2cap_data;
+ mgr->next_ident = 1;
+ INIT_LIST_HEAD(&mgr->ctx_list);
+ rwlock_init(&mgr->ctx_list_lock);
+ mgr->skb = skb;
+ BT_DBG("hcon %p mgr %p", hcon, mgr);
+ mgr->a2mp_sock = open_fixed_channel(&hcon->hdev->bdaddr, &hcon->dst);
+ if (!mgr->a2mp_sock) {
+ kfree(mgr);
+ return NULL;
+ }
+ write_lock(&_mgr_list_lock);
+ list_add(&(mgr->list), &_mgr_list);
+ write_unlock(&_mgr_list_lock);
+
+gc_finished:
+ return mgr;
+}
+
+static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
+{
+ if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
+ return mgr->ctrls;
+ else
+ return NULL;
+}
+
+static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
+{
+ struct amp_ctrl *ctrl;
+
+ BT_DBG("mgr %p, id %d", mgr, id);
+ if ((mgr->ctrls) && (mgr->ctrls->id == id))
+ ctrl = mgr->ctrls;
+ else {
+ kfree(mgr->ctrls);
+ ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
+ if (ctrl) {
+ ctrl->mgr = mgr;
+ ctrl->id = id;
+ }
+ mgr->ctrls = ctrl;
+ }
+
+ return ctrl;
+}
+
+static struct amp_ctx *create_ctx(u8 type, u8 state)
+{
+ struct amp_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx) {
+ ctx->type = type;
+ ctx->state = state;
+ init_timer(&(ctx->timer));
+ ctx->timer.function = ctx_timeout;
+ ctx->timer.data = (unsigned long) ctx;
+ }
+ BT_DBG("ctx %p, type %d", ctx, type);
+ return ctx;
+}
+
+static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
+{
+ BT_DBG("ctx %p", ctx);
+ write_lock(&mgr->ctx_list_lock);
+ list_add(&ctx->list, &mgr->ctx_list);
+ write_unlock(&mgr->ctx_list_lock);
+ ctx->mgr = mgr;
+ execute_ctx(ctx, AMP_INIT, 0);
+}
+
+static void destroy_ctx(struct amp_ctx *ctx)
+{
+ struct amp_mgr *mgr = ctx->mgr;
+
+ BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
+ del_timer(&ctx->timer);
+ write_lock(&mgr->ctx_list_lock);
+ list_del(&ctx->list);
+ write_unlock(&mgr->ctx_list_lock);
+ if (ctx->deferred)
+ execute_ctx(ctx->deferred, AMP_INIT, 0);
+ kfree(ctx);
+}
+
+static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
+{
+ struct amp_ctx *fnd = NULL;
+ struct amp_ctx *ctx;
+
+ read_lock(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ if (ctx->type == type) {
+ fnd = ctx;
+ break;
+ }
+ }
+ read_unlock(&mgr->ctx_list_lock);
+ return fnd;
+}
+
+static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
+{
+ struct amp_mgr *mgr = cur->mgr;
+ struct amp_ctx *fnd = NULL;
+ struct amp_ctx *ctx;
+
+ read_lock(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ if ((ctx->type == type) && (ctx != cur)) {
+ fnd = ctx;
+ break;
+ }
+ }
+ read_unlock(&mgr->ctx_list_lock);
+ return fnd;
+}
+
+static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
+{
+ struct amp_ctx *fnd = NULL;
+ struct amp_ctx *ctx;
+
+ read_lock(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ if ((ctx->evt_type & AMP_A2MP_RSP) &&
+ (ctx->rsp_ident == ident)) {
+ fnd = ctx;
+ break;
+ }
+ }
+ read_unlock(&mgr->ctx_list_lock);
+ return fnd;
+}
+
+static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
+ u16 evt_value)
+{
+ struct amp_mgr *mgr;
+ struct amp_ctx *fnd = NULL;
+
+ read_lock(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ struct amp_ctx *ctx;
+ read_lock(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ struct hci_dev *ctx_hdev;
+ ctx_hdev = hci_dev_get(ctx->id);
+ if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
+ switch (evt_type) {
+ case AMP_HCI_CMD_STATUS:
+ case AMP_HCI_CMD_CMPLT:
+ if (ctx->opcode == evt_value)
+ fnd = ctx;
+ break;
+ case AMP_HCI_EVENT:
+ if (ctx->evt_code == (u8) evt_value)
+ fnd = ctx;
+ break;
+ }
+ }
+ if (ctx_hdev)
+ hci_dev_put(ctx_hdev);
+
+ if (fnd)
+ break;
+ }
+ read_unlock(&mgr->ctx_list_lock);
+ }
+ read_unlock(&_mgr_list_lock);
+ return fnd;
+}
+
+static inline u8 next_ident(struct amp_mgr *mgr)
+{
+ if (++mgr->next_ident == 0)
+ mgr->next_ident = 1;
+ return mgr->next_ident;
+}
+
+static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
+ u16 len, void *data, u16 len2, void *data2)
+{
+ struct a2mp_cmd_hdr *hdr;
+ int plen;
+ u8 *p, *cmd;
+
+ BT_DBG("ident %d code 0x%02x", ident, code);
+ if (!mgr->a2mp_sock)
+ return;
+ plen = sizeof(*hdr) + len + len2;
+ cmd = kzalloc(plen, GFP_ATOMIC);
+ if (!cmd)
+ return;
+ hdr = (struct a2mp_cmd_hdr *) cmd;
+ hdr->code = code;
+ hdr->ident = ident;
+ hdr->len = cpu_to_le16(len+len2);
+ p = cmd + sizeof(*hdr);
+ memcpy(p, data, len);
+ p += len;
+ memcpy(p, data2, len2);
+ send_a2mp(mgr->a2mp_sock, cmd, plen);
+ kfree(cmd);
+}
+
+static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
+ u8 code, u16 len, void *data)
+{
+ send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
+}
+
+static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct a2mp_cmd_rej *rej;
+ struct amp_ctx *ctx;
+
+ BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+ rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
+ if (skb->len < sizeof(*rej))
+ return -EINVAL;
+ BT_DBG("reason %d", le16_to_cpu(rej->reason));
+ ctx = get_ctx_a2mp(mgr, hdr->ident);
+ if (ctx)
+ kill_ctx(ctx);
+ skb_pull(skb, sizeof(*rej));
+ return 0;
+}
+
+static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
+ void *msg)
+{
+ struct a2mp_cl clist[16];
+ struct a2mp_cl *cl;
+ struct hci_dev *hdev;
+ int num_ctrls = 1, id;
+
+ cl = clist;
+ cl->id = 0;
+ cl->type = 0;
+ cl->status = 1;
+
+ for (id = 0; id < 16; ++id) {
+ hdev = hci_dev_get(id);
+ if (hdev) {
+ if ((hdev->amp_type != HCI_BREDR) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ (cl + num_ctrls)->id = hdev->id;
+ (cl + num_ctrls)->type = hdev->amp_type;
+ (cl + num_ctrls)->status = hdev->amp_status;
+ ++num_ctrls;
+ }
+ hci_dev_put(hdev);
+ }
+ }
+ send_a2mp_cmd2(mgr, ident, code, len, msg,
+ num_ctrls*sizeof(*cl), clist);
+
+ return 0;
+}
+
+static void send_a2mp_change_notify(void)
+{
+ struct amp_mgr *mgr;
+
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if (mgr->discovered)
+ send_a2mp_cl(mgr, next_ident(mgr),
+ A2MP_CHANGE_NOTIFY, 0, NULL);
+ }
+}
+
+static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct a2mp_discover_req *req;
+ u16 *efm;
+ struct a2mp_discover_rsp rsp;
+
+ req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
+ if (skb->len < sizeof(*req))
+ return -EINVAL;
+ efm = (u16 *) skb_pull(skb, sizeof(*req));
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
+ le16_to_cpu(req->ext_feat));
+
+ while (le16_to_cpu(req->ext_feat) & 0x8000) {
+ if (skb->len < sizeof(*efm))
+ return -EINVAL;
+ req->ext_feat = *efm;
+ BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
+ efm = (u16 *) skb_pull(skb, sizeof(*efm));
+ }
+
+ rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ rsp.ext_feat = 0;
+
+ mgr->discovered = 1;
+
+ return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
+ sizeof(rsp), &rsp);
+}
+
+static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct a2mp_cl *cl;
+
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
+ while (skb->len >= sizeof(*cl)) {
+ struct amp_ctrl *ctrl;
+ if (cl->id != 0) {
+ ctrl = get_create_ctrl(mgr, cl->id);
+ if (ctrl != NULL) {
+ ctrl->type = cl->type;
+ ctrl->status = cl->status;
+ }
+ }
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* TODO find controllers in manager that were not on received */
+ /* controller list and destroy them */
+ send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
+
+ return 0;
+}
+
+static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ u8 *data;
+ int id;
+ struct hci_dev *hdev;
+ struct a2mp_getinfo_rsp rsp;
+
+ data = (u8 *) skb_pull(skb, sizeof(*hdr));
+ if (le16_to_cpu(hdr->len) < sizeof(*data))
+ return -EINVAL;
+ if (skb->len < sizeof(*data))
+ return -EINVAL;
+ id = *data;
+ skb_pull(skb, sizeof(*data));
+ rsp.id = id;
+ rsp.status = 1;
+
+ BT_DBG("id %d", id);
+ hdev = hci_dev_get(id);
+
+ if (hdev && hdev->amp_type != HCI_BREDR) {
+ rsp.status = 0;
+ rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+ rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+ rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+ rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+ rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+ }
+
+ send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+ return 0;
+}
+
+static void create_physical(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct amp_mgr *mgr;
+ struct amp_ctx *ctx = NULL;
+
+ BT_DBG("conn %p", conn);
+ mgr = get_create_amp_mgr(conn->hcon, NULL);
+ if (!mgr)
+ goto cp_finished;
+ BT_DBG("mgr %p", mgr);
+ ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
+ if (!ctx)
+ goto cp_finished;
+ ctx->sk = sk;
+ sock_hold(sk);
+ start_ctx(mgr, ctx);
+ return;
+
+cp_finished:
+ l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
+}
+
+static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
+{
+ struct amp_mgr *mgr;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ struct amp_ctx *aplctx = NULL;
+ u8 remote_id = 0;
+ int result = -EINVAL;
+
+ BT_DBG("lcon %p", lcon);
+ hdev = hci_dev_get(id);
+ if (!hdev)
+ goto ap_finished;
+ BT_DBG("hdev %p", hdev);
+ mgr = get_create_amp_mgr(lcon->hcon, NULL);
+ if (!mgr)
+ goto ap_finished;
+ BT_DBG("mgr %p", mgr);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &mgr->l2cap_conn->hcon->dst);
+ if (conn) {
+ BT_DBG("conn %p", hdev);
+ result = 0;
+ remote_id = conn->dst_id;
+ goto ap_finished;
+ }
+ aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
+ if (!aplctx)
+ goto ap_finished;
+ aplctx->sk = sk;
+ sock_hold(sk);
+ return;
+
+ap_finished:
+ if (hdev)
+ hci_dev_put(hdev);
+ l2cap_amp_physical_complete(result, id, remote_id, sk);
+}
+
+static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct amp_ctx *ctx;
+ struct a2mp_getampassoc_req *req;
+
+ if (hdr->len < sizeof(*req))
+ return -EINVAL;
+ req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*req));
+
+ ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->id = req->id;
+ ctx->d.gaa.req_ident = hdr->ident;
+ ctx->hdev = hci_dev_get(ctx->id);
+ if (ctx->hdev)
+ ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
+ GFP_ATOMIC);
+ start_ctx(mgr, ctx);
+ return 0;
+}
+
+static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct sk_buff *skb = (struct sk_buff *) data;
+ struct hci_cp_read_local_amp_assoc cp;
+ struct hci_rp_read_local_amp_assoc *rp;
+ struct a2mp_getampassoc_rsp rsp;
+ u16 rem_len;
+ u16 frag_len;
+
+ rsp.status = 1;
+ if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
+ goto gaa_finished;
+
+ switch (ctx->state) {
+ case AMP_GAA_INIT:
+ ctx->state = AMP_GAA_RLAA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
+ ctx->d.gaa.len_so_far = 0;
+ cp.phy_handle = 0;
+ cp.len_so_far = 0;
+ cp.max_len = ctx->hdev->amp_assoc_size;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ break;
+
+ case AMP_GAA_RLAA_COMPLETE:
+ if (skb->len < 4)
+ goto gaa_finished;
+ rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
+ if (rp->status)
+ goto gaa_finished;
+ rem_len = le16_to_cpu(rp->rem_len);
+ skb_pull(skb, 4);
+ frag_len = skb->len;
+
+ if (ctx->d.gaa.len_so_far + rem_len <=
+ ctx->hdev->amp_assoc_size) {
+ struct hci_cp_read_local_amp_assoc cp;
+ u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
+ memcpy(assoc, rp->frag, frag_len);
+ ctx->d.gaa.len_so_far += rem_len;
+ rem_len -= frag_len;
+ if (rem_len == 0) {
+ rsp.status = 0;
+ goto gaa_finished;
+ }
+ /* more assoc data to read */
+ cp.phy_handle = 0;
+ cp.len_so_far = ctx->d.gaa.len_so_far;
+ cp.max_len = ctx->hdev->amp_assoc_size;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ }
+ break;
+
+ default:
+ goto gaa_finished;
+ break;
+ }
+ return 0;
+
+gaa_finished:
+ rsp.id = ctx->id;
+ send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
+ sizeof(rsp), &rsp,
+ ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
+ kfree(ctx->d.gaa.assoc);
+ if (ctx->hdev)
+ hci_dev_put(ctx->hdev);
+ return 1;
+}
+
+struct hmac_sha256_result {
+ struct completion completion;
+ int err;
+};
+
+static void hmac_sha256_final(struct crypto_async_request *req, int err)
+{
+ struct hmac_sha256_result *r = req->data;
+ if (err == -EINPROGRESS)
+ return;
+ r->err = err;
+ complete(&r->completion);
+}
+
+int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
+ u8 *output, u8 outlen)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm;
+ struct scatterlist sg;
+ struct ahash_request *req;
+ struct hmac_sha256_result tresult;
+ void *hash_buff = NULL;
+
+ unsigned char hash_result[64];
+ int i;
+
+ memset(output, 0, outlen);
+
+ init_completion(&tresult.completion);
+
+ tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(tfm)) {
+ BT_DBG("crypto_alloc_ahash failed");
+ ret = PTR_ERR(tfm);
+ goto err_tfm;
+ }
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ BT_DBG("failed to allocate request for hmac(sha256)");
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ hmac_sha256_final, &tresult);
+
+ hash_buff = kzalloc(psize, GFP_KERNEL);
+ if (!hash_buff) {
+ BT_DBG("failed to kzalloc hash_buff");
+ ret = -ENOMEM;
+ goto err_hash_buf;
+ }
+
+ memset(hash_result, 0, 64);
+ memcpy(hash_buff, plaintext, psize);
+ sg_init_one(&sg, hash_buff, psize);
+
+ if (ksize) {
+ crypto_ahash_clear_flags(tfm, ~0);
+ ret = crypto_ahash_setkey(tfm, key, ksize);
+
+ if (ret) {
+ BT_DBG("crypto_ahash_setkey failed");
+ goto err_setkey;
+ }
+ }
+
+ ahash_request_set_crypt(req, &sg, hash_result, psize);
+ ret = crypto_ahash_digest(req);
+
+ BT_DBG("ret 0x%x", ret);
+
+ switch (ret) {
+ case 0:
+ for (i = 0; i < outlen; i++)
+ output[i] = hash_result[i];
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(&tresult.completion);
+ if (!ret && !tresult.err) {
+ INIT_COMPLETION(tresult.completion);
+ break;
+ } else {
+ BT_DBG("wait_for_completion_interruptible failed");
+ if (!ret)
+ ret = tresult.err;
+ goto out;
+ }
+ default:
+ goto out;
+ }
+
+out:
+err_setkey:
+ kfree(hash_buff);
+err_hash_buf:
+ ahash_request_free(req);
+err_req:
+ crypto_free_ahash(tfm);
+err_tfm:
+ return ret;
+}
+
+static void show_key(u8 *k)
+{
+ int i = 0;
+ for (i = 0; i < 32; i += 8)
+ BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
+ *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
+ *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
+}
+
+static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
+{
+ u8 bt2_key[32];
+ u8 gamp_key[32];
+ u8 b802_key[32];
+ int result;
+
+ if (!hci_conn_check_link_mode(conn))
+ return -EACCES;
+
+ BT_DBG("key_type %d", conn->key_type);
+ if (conn->key_type < 3)
+ return -EACCES;
+
+ *type = conn->key_type;
+ *len = 32;
+ memcpy(&bt2_key[0], conn->link_key, 16);
+ memcpy(&bt2_key[16], conn->link_key, 16);
+ result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
+ if (result)
+ goto ps_finished;
+
+ if (conn->key_type == 3) {
+ BT_DBG("gamp_key");
+ show_key(gamp_key);
+ memcpy(data, gamp_key, 32);
+ goto ps_finished;
+ }
+
+ result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
+ if (result)
+ goto ps_finished;
+
+ BT_DBG("802b_key");
+ show_key(b802_key);
+ memcpy(data, b802_key, 32);
+
+ps_finished:
+ return result;
+}
+
+static u8 amp_next_handle;
+static inline u8 physlink_handle(struct hci_dev *hdev)
+{
+ /* TODO amp_next_handle should be part of hci_dev */
+ if (amp_next_handle == 0)
+ amp_next_handle = 1;
+ return amp_next_handle++;
+}
+
+/* Start an Accept Physical Link sequence */
+static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct amp_ctx *ctx = NULL;
+ struct a2mp_createphyslink_req *req;
+
+ if (hdr->len < sizeof(*req))
+ return -EINVAL;
+ req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*req));
+ BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+
+ /* initialize the context */
+ ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->d.apl.req_ident = hdr->ident;
+ ctx->d.apl.remote_id = req->local_id;
+ ctx->id = req->remote_id;
+
+ /* add the supplied remote assoc to the context */
+ ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
+ if (ctx->d.apl.remote_assoc)
+ memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
+ ctx->d.apl.len_so_far = 0;
+ ctx->d.apl.rem_len = skb->len;
+ skb_pull(skb, skb->len);
+ ctx->hdev = hci_dev_get(ctx->id);
+ start_ctx(mgr, ctx);
+ return 0;
+}
+
+static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct sk_buff *skb = data;
+ struct hci_cp_accept_phys_link acp;
+ struct hci_cp_write_remote_amp_assoc wcp;
+ struct hci_rp_write_remote_amp_assoc *wrp;
+ struct hci_ev_cmd_status *cs = data;
+ struct hci_ev_phys_link_complete *ev;
+ struct a2mp_createphyslink_rsp rsp;
+ struct amp_ctx *cplctx;
+ struct amp_ctx *aplctx;
+ u16 frag_len;
+ struct hci_conn *conn;
+ int result;
+
+ BT_DBG("state %d", ctx->state);
+ result = -EINVAL;
+ rsp.status = 1; /* Invalid Controller ID */
+ if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
+ goto apl_finished;
+ if (evt_type == AMP_KILLED) {
+ result = -EAGAIN;
+ rsp.status = 4; /* Disconnect request received */
+ goto apl_finished;
+ }
+ if (!ctx->d.apl.remote_assoc) {
+ result = -ENOMEM;
+ rsp.status = 2; /* Unable to Start */
+ goto apl_finished;
+ }
+
+ switch (ctx->state) {
+ case AMP_APL_INIT:
+ BT_DBG("local_id %d, remote_id %d",
+ ctx->id, ctx->d.apl.remote_id);
+ conn = hci_conn_hash_lookup_id(ctx->hdev,
+ &ctx->mgr->l2cap_conn->hcon->dst,
+ ctx->d.apl.remote_id);
+ if (conn) {
+ result = -EEXIST;
+ rsp.status = 5; /* Already Exists */
+ goto apl_finished;
+ }
+
+ aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
+ if ((aplctx) &&
+ (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
+ BT_DBG("deferred to %p", aplctx);
+ aplctx->deferred = ctx;
+ break;
+ }
+
+ cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
+ if ((cplctx) &&
+ (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
+ struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
+ BT_DBG("local %s remote %s",
+ batostr(&bcon->hdev->bdaddr),
+ batostr(&bcon->dst));
+ if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
+ (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
+ BT_DBG("COLLISION LOSER");
+ cplctx->deferred = ctx;
+ cancel_ctx(cplctx);
+ break;
+ } else {
+ BT_DBG("COLLISION WINNER");
+ result = -EISCONN;
+ rsp.status = 3; /* Collision */
+ goto apl_finished;
+ }
+ }
+
+ result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
+ &acp.key_len, &acp.type);
+ if (result) {
+ BT_DBG("SECURITY");
+ rsp.status = 6; /* Security Violation */
+ goto apl_finished;
+ }
+
+ ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
+ ctx->state = AMP_APL_APL_STATUS;
+ ctx->evt_type = AMP_HCI_CMD_STATUS;
+ ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
+ acp.phy_handle = ctx->d.apl.phy_handle;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
+ break;
+
+ case AMP_APL_APL_STATUS:
+ if (cs->status != 0)
+ goto apl_finished;
+ /* PAL will accept link, send a2mp response */
+ rsp.local_id = ctx->id;
+ rsp.remote_id = ctx->d.apl.remote_id;
+ rsp.status = 0;
+ send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
+ A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
+
+ /* send the first assoc fragment */
+ wcp.phy_handle = ctx->d.apl.phy_handle;
+ wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
+ wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
+ memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
+ ctx->state = AMP_APL_WRA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+
+ case AMP_APL_WRA_COMPLETE:
+ /* received write remote amp assoc command complete event */
+ wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
+ if (wrp->status != 0)
+ goto apl_finished;
+ if (wrp->phy_handle != ctx->d.apl.phy_handle)
+ goto apl_finished;
+ /* update progress */
+ frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
+ ctx->d.apl.len_so_far += frag_len;
+ ctx->d.apl.rem_len -= frag_len;
+ if (ctx->d.apl.rem_len > 0) {
+ u8 *assoc;
+ /* another assoc fragment to send */
+ wcp.phy_handle = ctx->d.apl.phy_handle;
+ wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
+ wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
+ assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
+ memcpy(wcp.frag, assoc, frag_len);
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+ }
+ /* wait for physical link complete event */
+ ctx->state = AMP_APL_PL_COMPLETE;
+ ctx->evt_type = AMP_HCI_EVENT;
+ ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
+ break;
+
+ case AMP_APL_PL_COMPLETE:
+ /* physical link complete event received */
+ if (skb->len < sizeof(*ev))
+ goto apl_finished;
+ ev = (struct hci_ev_phys_link_complete *) skb->data;
+ if (ev->phy_handle != ctx->d.apl.phy_handle)
+ break;
+ if (ev->status != 0)
+ goto apl_finished;
+ conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
+ if (!conn)
+ goto apl_finished;
+ result = 0;
+ BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
+ conn->dst_id = ctx->d.apl.remote_id;
+ bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
+ goto apl_finished;
+ break;
+
+ default:
+ goto apl_finished;
+ break;
+ }
+ return 0;
+
+apl_finished:
+ if (ctx->sk)
+ l2cap_amp_physical_complete(result, ctx->id,
+ ctx->d.apl.remote_id, ctx->sk);
+ if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
+ rsp.local_id = ctx->id;
+ rsp.remote_id = ctx->d.apl.remote_id;
+ send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
+ A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
+ }
+ kfree(ctx->d.apl.remote_assoc);
+ if (ctx->sk)
+ sock_put(ctx->sk);
+ if (ctx->hdev)
+ hci_dev_put(ctx->hdev);
+ return 1;
+}
+
+static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
+{
+ struct hci_cp_disconn_phys_link dcp;
+
+ ctx->state = AMP_CPL_PL_CANCEL;
+ ctx->evt_type = AMP_HCI_EVENT;
+ ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
+ dcp.phy_handle = ctx->d.cpl.phy_handle;
+ dcp.reason = reason;
+ hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
+}
+
+static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct amp_ctrl *ctrl;
+ struct sk_buff *skb = data;
+ struct a2mp_cmd_hdr *hdr;
+ struct hci_ev_cmd_status *cs = data;
+ struct amp_ctx *cplctx;
+ struct a2mp_discover_req dreq;
+ struct a2mp_discover_rsp *drsp;
+ u16 *efm;
+ struct a2mp_getinfo_req greq;
+ struct a2mp_getinfo_rsp *grsp;
+ struct a2mp_cl *cl;
+ struct a2mp_getampassoc_req areq;
+ struct a2mp_getampassoc_rsp *arsp;
+ struct hci_cp_create_phys_link cp;
+ struct hci_cp_write_remote_amp_assoc wcp;
+ struct hci_rp_write_remote_amp_assoc *wrp;
+ struct hci_ev_channel_selected *cev;
+ struct hci_cp_read_local_amp_assoc rcp;
+ struct hci_rp_read_local_amp_assoc *rrp;
+ struct a2mp_createphyslink_req creq;
+ struct a2mp_createphyslink_rsp *crsp;
+ struct hci_ev_phys_link_complete *pev;
+ struct hci_ev_disconn_phys_link_complete *dev;
+ u8 *assoc, *rassoc, *lassoc;
+ u16 frag_len;
+ u16 rem_len;
+ int result = -EAGAIN;
+ struct hci_conn *conn;
+
+ BT_DBG("state %d", ctx->state);
+ if (evt_type == AMP_KILLED)
+ goto cpl_finished;
+
+ if (evt_type == AMP_CANCEL) {
+ if ((ctx->state < AMP_CPL_CPL_STATUS) ||
+ ((ctx->state == AMP_CPL_PL_COMPLETE) &&
+ !(ctx->evt_type & AMP_HCI_EVENT)))
+ goto cpl_finished;
+
+ cancel_cpl_ctx(ctx, 0x16);
+ return 0;
+ }
+
+ switch (ctx->state) {
+ case AMP_CPL_INIT:
+ cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
+ if (cplctx) {
+ BT_DBG("deferred to %p", cplctx);
+ cplctx->deferred = ctx;
+ break;
+ }
+ ctx->state = AMP_CPL_DISC_RSP;
+ ctx->evt_type = AMP_A2MP_RSP;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ dreq.ext_feat = 0;
+ send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
+ sizeof(dreq), &dreq);
+ break;
+
+ case AMP_CPL_DISC_RSP:
+ drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
+ if (skb->len < (sizeof(*drsp))) {
+ result = -EINVAL;
+ goto cpl_finished;
+ }
+
+ efm = (u16 *) skb_pull(skb, sizeof(*drsp));
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
+ le16_to_cpu(drsp->ext_feat));
+
+ while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
+ if (skb->len < sizeof(*efm)) {
+ result = -EINVAL;
+ goto cpl_finished;
+ }
+ drsp->ext_feat = *efm;
+ BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
+ efm = (u16 *) skb_pull(skb, sizeof(*efm));
+ }
+ cl = (struct a2mp_cl *) efm;
+
+ /* find the first remote and local controller with the
+ * same type
+ */
+ greq.id = 0;
+ result = -ENODEV;
+ while (skb->len >= sizeof(*cl)) {
+ if ((cl->id != 0) && (greq.id == 0)) {
+ struct hci_dev *hdev;
+ hdev = hci_dev_get_type(cl->type);
+ if (hdev) {
+ struct hci_conn *conn;
+ ctx->hdev = hdev;
+ ctx->id = hdev->id;
+ ctx->d.cpl.remote_id = cl->id;
+ conn = hci_conn_hash_lookup_ba(hdev,
+ ACL_LINK,
+ &ctx->mgr->l2cap_conn->hcon->dst);
+ if (conn) {
+ BT_DBG("PL_COMPLETE exists %x",
+ (int) conn->handle);
+ result = 0;
+ }
+ ctrl = get_create_ctrl(ctx->mgr,
+ cl->id);
+ if (ctrl) {
+ ctrl->type = cl->type;
+ ctrl->status = cl->status;
+ }
+ greq.id = cl->id;
+ }
+ }
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+ if ((!greq.id) || (!result))
+ goto cpl_finished;
+ ctx->state = AMP_CPL_GETINFO_RSP;
+ ctx->evt_type = AMP_A2MP_RSP;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
+ sizeof(greq), &greq);
+ break;
+
+ case AMP_CPL_GETINFO_RSP:
+ if (skb->len < sizeof(*grsp))
+ goto cpl_finished;
+ grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*grsp));
+ if (grsp->status)
+ goto cpl_finished;
+ if (grsp->id != ctx->d.cpl.remote_id)
+ goto cpl_finished;
+ ctrl = get_ctrl(ctx->mgr, grsp->id);
+ if (!ctrl)
+ goto cpl_finished;
+ ctrl->status = grsp->status;
+ ctrl->total_bw = le32_to_cpu(grsp->total_bw);
+ ctrl->max_bw = le32_to_cpu(grsp->max_bw);
+ ctrl->min_latency = le32_to_cpu(grsp->min_latency);
+ ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
+ ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
+
+ ctx->d.cpl.max_len = ctrl->max_assoc_size;
+
+ /* setup up GAA request */
+ areq.id = ctx->d.cpl.remote_id;
+
+ /* advance context state */
+ ctx->state = AMP_CPL_GAA_RSP;
+ ctx->evt_type = AMP_A2MP_RSP;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
+ sizeof(areq), &areq);
+ break;
+
+ case AMP_CPL_GAA_RSP:
+ if (skb->len < sizeof(*arsp))
+ goto cpl_finished;
+ hdr = (void *) skb->data;
+ arsp = (void *) skb_pull(skb, sizeof(*hdr));
+ if (arsp->status != 0)
+ goto cpl_finished;
+
+ /* store away remote assoc */
+ assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
+ ctx->d.cpl.len_so_far = 0;
+ ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
+ skb_pull(skb, ctx->d.cpl.rem_len);
+ rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
+ if (!rassoc)
+ goto cpl_finished;
+ memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
+ ctx->d.cpl.remote_assoc = rassoc;
+
+ /* set up CPL command */
+ ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
+ cp.phy_handle = ctx->d.cpl.phy_handle;
+ if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
+ &cp.key_len, &cp.type)) {
+ result = -EPERM;
+ goto cpl_finished;
+ }
+
+ /* advance context state */
+ ctx->state = AMP_CPL_CPL_STATUS;
+ ctx->evt_type = AMP_HCI_CMD_STATUS;
+ ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ break;
+
+ case AMP_CPL_CPL_STATUS:
+ /* received create physical link command status */
+ if (cs->status != 0)
+ goto cpl_finished;
+ /* send the first assoc fragment */
+ wcp.phy_handle = ctx->d.cpl.phy_handle;
+ wcp.len_so_far = ctx->d.cpl.len_so_far;
+ wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
+ memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
+ ctx->state = AMP_CPL_WRA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+
+ case AMP_CPL_WRA_COMPLETE:
+ /* received write remote amp assoc command complete event */
+ if (skb->len < sizeof(*wrp))
+ goto cpl_finished;
+ wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
+ if (wrp->status != 0)
+ goto cpl_finished;
+ if (wrp->phy_handle != ctx->d.cpl.phy_handle)
+ goto cpl_finished;
+
+ /* update progress */
+ frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
+ ctx->d.cpl.len_so_far += frag_len;
+ ctx->d.cpl.rem_len -= frag_len;
+ if (ctx->d.cpl.rem_len > 0) {
+ /* another assoc fragment to send */
+ wcp.phy_handle = ctx->d.cpl.phy_handle;
+ wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
+ wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
+ memcpy(wcp.frag,
+ ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
+ frag_len);
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+ }
+ /* now wait for channel selected event */
+ ctx->state = AMP_CPL_CHANNEL_SELECT;
+ ctx->evt_type = AMP_HCI_EVENT;
+ ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
+ break;
+
+ case AMP_CPL_CHANNEL_SELECT:
+ /* received channel selection event */
+ if (skb->len < sizeof(*cev))
+ goto cpl_finished;
+ cev = (void *) skb->data;
+/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
+ Create Physical Link collision scenario
+ if (cev->phy_handle != ctx->d.cpl.phy_handle)
+ goto cpl_finished;
+*/
+
+ /* request the first local assoc fragment */
+ rcp.phy_handle = ctx->d.cpl.phy_handle;
+ rcp.len_so_far = 0;
+ rcp.max_len = ctx->d.cpl.max_len;
+ lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
+ if (!lassoc)
+ goto cpl_finished;
+ ctx->d.cpl.local_assoc = lassoc;
+ ctx->d.cpl.len_so_far = 0;
+ ctx->state = AMP_CPL_RLA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
+ break;
+
+ case AMP_CPL_RLA_COMPLETE:
+ /* received read local amp assoc command complete event */
+ if (skb->len < 4)
+ goto cpl_finished;
+ rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
+ if (rrp->status)
+ goto cpl_finished;
+ if (rrp->phy_handle != ctx->d.cpl.phy_handle)
+ goto cpl_finished;
+ rem_len = le16_to_cpu(rrp->rem_len);
+ skb_pull(skb, 4);
+ frag_len = skb->len;
+
+ if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
+ goto cpl_finished;
+
+ /* save this fragment in context */
+ lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
+ memcpy(lassoc, rrp->frag, frag_len);
+ ctx->d.cpl.len_so_far += frag_len;
+ rem_len -= frag_len;
+ if (rem_len > 0) {
+ /* request another local assoc fragment */
+ rcp.phy_handle = ctx->d.cpl.phy_handle;
+ rcp.len_so_far = ctx->d.cpl.len_so_far;
+ rcp.max_len = ctx->d.cpl.max_len;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
+ } else {
+ creq.local_id = ctx->id;
+ creq.remote_id = ctx->d.cpl.remote_id;
+ /* wait for A2MP rsp AND phys link complete event */
+ ctx->state = AMP_CPL_PL_COMPLETE;
+ ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
+ send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
+ A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
+ ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
+ }
+ break;
+
+ case AMP_CPL_PL_COMPLETE:
+ if (evt_type == AMP_A2MP_RSP) {
+ /* create physical link response received */
+ ctx->evt_type &= ~AMP_A2MP_RSP;
+ if (skb->len < sizeof(*crsp))
+ goto cpl_finished;
+ crsp = (void *) skb_pull(skb, sizeof(*hdr));
+ if ((crsp->local_id != ctx->d.cpl.remote_id) ||
+ (crsp->remote_id != ctx->id) ||
+ (crsp->status != 0)) {
+ cancel_cpl_ctx(ctx, 0x13);
+ break;
+ }
+
+ /* notify Qualcomm PAL */
+ if (ctx->hdev->manufacturer == 0x001d)
+ hci_send_cmd(ctx->hdev,
+ hci_opcode_pack(0x3f, 0x00), 0, NULL);
+ }
+ if (evt_type == AMP_HCI_EVENT) {
+ ctx->evt_type &= ~AMP_HCI_EVENT;
+ /* physical link complete event received */
+ if (skb->len < sizeof(*pev))
+ goto cpl_finished;
+ pev = (void *) skb->data;
+ if (pev->phy_handle != ctx->d.cpl.phy_handle)
+ break;
+ if (pev->status != 0)
+ goto cpl_finished;
+ }
+ if (ctx->evt_type)
+ break;
+ conn = hci_conn_hash_lookup_handle(ctx->hdev,
+ ctx->d.cpl.phy_handle);
+ if (!conn)
+ goto cpl_finished;
+ result = 0;
+ BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
+ bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
+ conn->dst_id = ctx->d.cpl.remote_id;
+ conn->out = 1;
+ goto cpl_finished;
+ break;
+
+ case AMP_CPL_PL_CANCEL:
+ dev = (void *) skb->data;
+ BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
+ result = -EISCONN;
+ goto cpl_finished;
+ break;
+
+ default:
+ goto cpl_finished;
+ break;
+ }
+ return 0;
+
+cpl_finished:
+ l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
+ ctx->sk);
+ if (ctx->sk)
+ sock_put(ctx->sk);
+ if (ctx->hdev)
+ hci_dev_put(ctx->hdev);
+ kfree(ctx->d.cpl.remote_assoc);
+ kfree(ctx->d.cpl.local_assoc);
+ return 1;
+}
+
+static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (void *) skb->data;
+ struct a2mp_disconnphyslink_req *req;
+ struct a2mp_disconnphyslink_rsp rsp;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ struct amp_ctx *aplctx;
+
+ BT_DBG("mgr %p skb %p", mgr, skb);
+ if (hdr->len < sizeof(*req))
+ return -EINVAL;
+ req = (void *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*req));
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+ rsp.status = 0;
+ BT_DBG("local_id %d remote_id %d",
+ (int) rsp.local_id, (int) rsp.remote_id);
+ hdev = hci_dev_get(rsp.local_id);
+ if (!hdev) {
+ rsp.status = 1; /* Invalid Controller ID */
+ goto dpl_finished;
+ }
+ BT_DBG("hdev %p", hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &mgr->l2cap_conn->hcon->dst);
+ if (!conn) {
+ aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
+ if (aplctx) {
+ kill_ctx(aplctx);
+ rsp.status = 0;
+ goto dpl_finished;
+ }
+ rsp.status = 2; /* No Physical Link exists */
+ goto dpl_finished;
+ }
+ BT_DBG("conn %p", conn);
+ hci_disconnect(conn, 0x13);
+
+dpl_finished:
+ send_a2mp_cmd(mgr, hdr->ident,
+ A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
+ if (hdev)
+ hci_dev_put(hdev);
+ return 0;
+}
+
+static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct amp_mgr *mgr = ctx->mgr;
+ u8 finished = 0;
+
+ if (!mgr->connected)
+ return 0;
+
+ switch (ctx->type) {
+ case AMP_GETAMPASSOC:
+ finished = getampassoc_handler(ctx, evt_type, data);
+ break;
+ case AMP_CREATEPHYSLINK:
+ finished = createphyslink_handler(ctx, evt_type, data);
+ break;
+ case AMP_ACCEPTPHYSLINK:
+ finished = acceptphyslink_handler(ctx, evt_type, data);
+ break;
+ }
+
+ if (!finished)
+ mod_timer(&(ctx->timer), jiffies +
+ msecs_to_jiffies(A2MP_RSP_TIMEOUT));
+ else
+ destroy_ctx(ctx);
+ return finished;
+}
+
+static int cancel_ctx(struct amp_ctx *ctx)
+{
+ return execute_ctx(ctx, AMP_CANCEL, 0);
+}
+
+static int kill_ctx(struct amp_ctx *ctx)
+{
+ return execute_ctx(ctx, AMP_KILLED, 0);
+}
+
+static void ctx_timeout_worker(struct work_struct *w)
+{
+ struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
+ struct amp_ctx *ctx = work->ctx;
+ kill_ctx(ctx);
+ kfree(work);
+}
+
+static void ctx_timeout(unsigned long data)
+{
+ struct amp_ctx *ctx = (struct amp_ctx *) data;
+ struct amp_work_ctx_timeout *work;
+
+ BT_DBG("ctx %p", ctx);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
+ work->ctx = ctx;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void launch_ctx(struct amp_mgr *mgr)
+{
+ struct amp_ctx *ctx = NULL;
+
+ BT_DBG("mgr %p", mgr);
+ read_lock(&mgr->ctx_list_lock);
+ if (!list_empty(&mgr->ctx_list))
+ ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
+ read_unlock(&mgr->ctx_list_lock);
+ BT_DBG("ctx %p", ctx);
+ if (ctx)
+ execute_ctx(ctx, AMP_INIT, NULL);
+}
+
+static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct amp_ctx *ctx;
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ u16 hdr_len = le16_to_cpu(hdr->len);
+
+ /* find context waiting for A2MP rsp with this rsp's identifier */
+ BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+ ctx = get_ctx_a2mp(mgr, hdr->ident);
+ if (ctx) {
+ execute_ctx(ctx, AMP_A2MP_RSP, skb);
+ } else {
+ BT_DBG("context not found");
+ skb_pull(skb, sizeof(*hdr));
+ if (hdr_len > skb->len)
+ hdr_len = skb->len;
+ skb_pull(skb, hdr_len);
+ }
+ return 0;
+}
+
+/* L2CAP-A2MP interface */
+
+static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ int len;
+ int err = 0;
+ struct amp_mgr *mgr;
+
+ mgr = get_amp_mgr_sk(sk);
+ if (!mgr)
+ goto a2mp_finished;
+
+ len = skb->len;
+ while (len >= sizeof(*hdr)) {
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ u16 clen = le16_to_cpu(hdr->len);
+
+ BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
+ if (clen > len || !hdr->ident) {
+ err = -EINVAL;
+ break;
+ }
+ switch (hdr->code) {
+ case A2MP_COMMAND_REJ:
+ command_rej(mgr, skb);
+ break;
+ case A2MP_DISCOVER_REQ:
+ err = discover_req(mgr, skb);
+ break;
+ case A2MP_CHANGE_NOTIFY:
+ err = change_notify(mgr, skb);
+ break;
+ case A2MP_GETINFO_REQ:
+ err = getinfo_req(mgr, skb);
+ break;
+ case A2MP_GETAMPASSOC_REQ:
+ err = getampassoc_req(mgr, skb);
+ break;
+ case A2MP_CREATEPHYSLINK_REQ:
+ err = createphyslink_req(mgr, skb);
+ break;
+ case A2MP_DISCONNPHYSLINK_REQ:
+ err = disconnphyslink_req(mgr, skb);
+ break;
+ case A2MP_CHANGE_RSP:
+ case A2MP_DISCOVER_RSP:
+ case A2MP_GETINFO_RSP:
+ case A2MP_GETAMPASSOC_RSP:
+ case A2MP_CREATEPHYSLINK_RSP:
+ case A2MP_DISCONNPHYSLINK_RSP:
+ err = a2mp_rsp(mgr, skb);
+ break;
+ default:
+ BT_ERR("Unknown A2MP signaling command 0x%2.2x",
+ hdr->code);
+ skb_pull(skb, sizeof(*hdr));
+ err = -EINVAL;
+ break;
+ }
+ len = skb->len;
+ }
+
+a2mp_finished:
+ if (err && mgr) {
+ struct a2mp_cmd_rej rej;
+ rej.reason = cpu_to_le16(0);
+ send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
+ sizeof(rej), &rej);
+ }
+}
+
+/* L2CAP-A2MP interface */
+
+static int send_a2mp(struct socket *sock, u8 *data, int len)
+{
+ struct kvec iv = { data, len };
+ struct msghdr msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ return kernel_sendmsg(sock, &msg, &iv, 1, len);
+}
+
+static void data_ready_worker(struct work_struct *w)
+{
+ struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
+ struct sock *sk = work->sk;
+ struct sk_buff *skb;
+
+ /* skb_dequeue() is thread-safe */
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ a2mp_receive(sk, skb);
+ kfree_skb(skb);
+ }
+ sock_put(work->sk);
+ kfree(work);
+}
+
+static void data_ready(struct sock *sk, int bytes)
+{
+ struct amp_work_data_ready *work;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, data_ready_worker);
+ sock_hold(sk);
+ work->sk = sk;
+ work->bytes = bytes;
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ kfree(work);
+ sock_put(sk);
+ }
+ }
+}
+
+static void state_change_worker(struct work_struct *w)
+{
+ struct amp_work_state_change *work = (struct amp_work_state_change *) w;
+ struct amp_mgr *mgr;
+ switch (work->sk->sk_state) {
+ case BT_CONNECTED:
+ /* socket is up */
+ BT_DBG("CONNECTED");
+ mgr = get_amp_mgr_sk(work->sk);
+ if (mgr) {
+ mgr->connected = 1;
+ if (mgr->skb) {
+ l2cap_recv_deferred_frame(work->sk, mgr->skb);
+ mgr->skb = NULL;
+ }
+ launch_ctx(mgr);
+ }
+ break;
+
+ case BT_CLOSED:
+ /* connection is gone */
+ BT_DBG("CLOSED");
+ mgr = get_amp_mgr_sk(work->sk);
+ if (mgr) {
+ if (!sock_flag(work->sk, SOCK_DEAD))
+ sock_release(mgr->a2mp_sock);
+ mgr->a2mp_sock = NULL;
+ remove_amp_mgr(mgr);
+ }
+ break;
+
+ default:
+ /* something else happened */
+ break;
+ }
+ sock_put(work->sk);
+ kfree(work);
+}
+
+static void state_change(struct sock *sk)
+{
+ struct amp_work_state_change *work;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, state_change_worker);
+ sock_hold(sk);
+ work->sk = sk;
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ kfree(work);
+ sock_put(sk);
+ }
+ }
+}
+
+static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
+{
+ int err;
+ struct socket *sock;
+ struct sockaddr_l2 addr;
+ struct sock *sk;
+ struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
+ L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
+ L2CAP_MODE_ERTM, 1, 0xFF, 1};
+
+
+ err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
+ BTPROTO_L2CAP, &sock);
+
+ if (err) {
+ BT_ERR("sock_create_kern failed %d", err);
+ return NULL;
+ }
+
+ sk = sock->sk;
+ sk->sk_data_ready = data_ready;
+ sk->sk_state_change = state_change;
+
+ memset(&addr, 0, sizeof(addr));
+ bacpy(&addr.l2_bdaddr, src);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_cid = L2CAP_CID_A2MP;
+ err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
+ if (err) {
+ BT_ERR("kernel_bind failed %d", err);
+ sock_release(sock);
+ return NULL;
+ }
+
+ l2cap_fixed_channel_config(sk, &opts);
+
+ memset(&addr, 0, sizeof(addr));
+ bacpy(&addr.l2_bdaddr, dst);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_cid = L2CAP_CID_A2MP;
+ err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
+ O_NONBLOCK);
+ if ((err == 0) || (err == -EINPROGRESS))
+ return sock;
+ else {
+ BT_ERR("kernel_connect failed %d", err);
+ sock_release(sock);
+ return NULL;
+ }
+}
+
+static void conn_ind_worker(struct work_struct *w)
+{
+ struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
+ struct hci_conn *hcon = work->hcon;
+ struct sk_buff *skb = work->skb;
+ struct amp_mgr *mgr;
+
+ mgr = get_create_amp_mgr(hcon, skb);
+ BT_DBG("mgr %p", mgr);
+ hci_conn_put(hcon);
+ kfree(work);
+}
+
+static void create_physical_worker(struct work_struct *w)
+{
+ struct amp_work_create_physical *work =
+ (struct amp_work_create_physical *) w;
+
+ create_physical(work->conn, work->sk);
+ sock_put(work->sk);
+ kfree(work);
+}
+
+static void accept_physical_worker(struct work_struct *w)
+{
+ struct amp_work_accept_physical *work =
+ (struct amp_work_accept_physical *) w;
+
+ accept_physical(work->conn, work->id, work->sk);
+ sock_put(work->sk);
+ kfree(work);
+}
+
+/* L2CAP Fixed Channel interface */
+
+void amp_conn_ind(struct hci_conn *hcon, struct sk_buff *skb)
+{
+ struct amp_work_conn_ind *work;
+ BT_DBG("hcon %p, skb %p", hcon, skb);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, conn_ind_worker);
+ hci_conn_hold(hcon);
+ work->hcon = hcon;
+ work->skb = skb;
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ hci_conn_put(hcon);
+ kfree(work);
+ }
+ }
+}
+
+/* L2CAP Physical Link interface */
+
+void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct amp_work_create_physical *work;
+ BT_DBG("conn %p", conn);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, create_physical_worker);
+ work->conn = conn;
+ work->sk = sk;
+ sock_hold(sk);
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ sock_put(sk);
+ kfree(work);
+ }
+ }
+}
+
+void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
+{
+ struct amp_work_accept_physical *work;
+ BT_DBG("conn %p", conn);
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, accept_physical_worker);
+ work->conn = conn;
+ work->sk = sk;
+ work->id = id;
+ sock_hold(sk);
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ sock_put(sk);
+ kfree(work);
+ }
+ }
+}
+
+/* HCI interface */
+
+static void amp_cmd_cmplt_worker(struct work_struct *w)
+{
+ struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
+ struct hci_dev *hdev = work->hdev;
+ u16 opcode = work->opcode;
+ struct sk_buff *skb = work->skb;
+ struct amp_ctx *ctx;
+
+ ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
+ if (ctx)
+ execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
+ kfree_skb(skb);
+ kfree(w);
+}
+
+static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
+ struct sk_buff *skb)
+{
+ struct amp_work_cmd_cmplt *work;
+ struct sk_buff *skbc;
+ BT_DBG("hdev %p opcode 0x%x skb %p len %d",
+ hdev, opcode, skb, skb->len);
+ skbc = skb_clone(skb, GFP_ATOMIC);
+ if (!skbc)
+ return;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
+ work->hdev = hdev;
+ work->opcode = opcode;
+ work->skb = skbc;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void amp_cmd_status_worker(struct work_struct *w)
+{
+ struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
+ struct hci_dev *hdev = work->hdev;
+ u16 opcode = work->opcode;
+ u8 status = work->status;
+ struct amp_ctx *ctx;
+
+ ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
+ if (ctx)
+ execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
+ kfree(w);
+}
+
+static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
+{
+ struct amp_work_cmd_status *work;
+ BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
+ work->hdev = hdev;
+ work->opcode = opcode;
+ work->status = status;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void amp_event_worker(struct work_struct *w)
+{
+ struct amp_work_event *work = (struct amp_work_event *) w;
+ struct hci_dev *hdev = work->hdev;
+ u8 event = work->event;
+ struct sk_buff *skb = work->skb;
+ struct amp_ctx *ctx;
+
+ if (event == HCI_EV_AMP_STATUS_CHANGE) {
+ struct hci_ev_amp_status_change *ev;
+ if (skb->len < sizeof(*ev))
+ goto amp_event_finished;
+ ev = (void *) skb->data;
+ if (ev->status != 0)
+ goto amp_event_finished;
+ if (ev->amp_status == hdev->amp_status)
+ goto amp_event_finished;
+ hdev->amp_status = ev->amp_status;
+ send_a2mp_change_notify();
+ goto amp_event_finished;
+ }
+ ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
+ if (ctx)
+ execute_ctx(ctx, AMP_HCI_EVENT, skb);
+
+amp_event_finished:
+ kfree_skb(skb);
+ kfree(w);
+}
+
+static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
+{
+ struct amp_work_event *work;
+ struct sk_buff *skbc;
+ BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
+ skbc = skb_clone(skb, GFP_ATOMIC);
+ if (!skbc)
+ return;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, amp_event_worker);
+ work->hdev = hdev;
+ work->event = event;
+ work->skb = skbc;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void amp_dev_event_worker(struct work_struct *w)
+{
+ send_a2mp_change_notify();
+ kfree(w);
+}
+
+static int amp_dev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct hci_dev *hdev = (struct hci_dev *) ptr;
+ struct amp_work_event *work;
+
+ if (hdev->amp_type == HCI_BREDR)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case HCI_DEV_UNREG:
+ case HCI_DEV_REG:
+ case HCI_DEV_UP:
+ case HCI_DEV_DOWN:
+ BT_DBG("hdev %p event %ld", hdev, event);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work,
+ amp_dev_event_worker);
+ if (queue_work(amp_workqueue,
+ (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+
+/* L2CAP module init continued */
+
+static struct notifier_block amp_notifier = {
+ .notifier_call = amp_dev_event
+};
+
+static struct amp_mgr_cb hci_amp = {
+ .amp_cmd_complete_event = amp_cmd_cmplt_evt,
+ .amp_cmd_status_event = amp_cmd_status_evt,
+ .amp_event = amp_evt
+};
+
+int amp_init(void)
+{
+ hci_register_amp(&hci_amp);
+ hci_register_notifier(&_notifier);
+ amp_next_handle = 1;
+ amp_workqueue = create_singlethread_workqueue("a2mp");
+ if (!amp_workqueue)
+ return -EPERM;
+ return 0;
+}
+
+void amp_exit(void)
+{
+ hci_unregister_amp(&hci_amp);
+ hci_unregister_notifier(&_notifier);
+ flush_workqueue(amp_workqueue);
+ destroy_workqueue(amp_workqueue);
+}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a779ec7..f504921 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -56,8 +57,8 @@
#define VERSION "1.3"
-static bool compress_src = true;
-static bool compress_dst = true;
+static bool compress_src = 1;
+static bool compress_dst = 1;
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
@@ -65,24 +66,31 @@
static struct bnep_session *__bnep_get_session(u8 *dst)
{
struct bnep_session *s;
+ struct list_head *p;
BT_DBG("");
- list_for_each_entry(s, &bnep_session_list, list)
+ list_for_each(p, &bnep_session_list) {
+ s = list_entry(p, struct bnep_session, list);
if (!compare_ether_addr(dst, s->eh.h_source))
return s;
-
+ }
return NULL;
}
static void __bnep_link_session(struct bnep_session *s)
{
+ /* It's safe to call __module_get() here because sessions are added
+ by the socket layer which has to hold the reference to this module.
+ */
+ __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
+ module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -502,7 +510,7 @@
schedule();
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@@ -523,7 +531,6 @@
up_write(&bnep_session_sem);
free_netdev(dev);
- module_put_and_exit(0);
return 0;
}
@@ -610,11 +617,9 @@
__bnep_link_session(s);
- __module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
- module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
@@ -663,14 +668,17 @@
int bnep_get_connlist(struct bnep_connlist_req *req)
{
- struct bnep_session *s;
+ struct list_head *p;
int err = 0, n = 0;
down_read(&bnep_session_sem);
- list_for_each_entry(s, &bnep_session_list, list) {
+ list_for_each(p, &bnep_session_list) {
+ struct bnep_session *s;
struct bnep_conninfo ci;
+ s = list_entry(p, struct bnep_session, list);
+
__bnep_copy_ci(&ci, s);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc40864..155ff74 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc4..17800b1 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -42,6 +42,7 @@
#include <linux/uaccess.h>
#include <net/sock.h>
+#include <asm/system.h>
#include "bnep.h"
@@ -142,10 +143,10 @@
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
- u32 uci;
+ uint32_t uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
+ if (get_user(cl.cnum, (uint32_t __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -156,7 +157,7 @@
err = bnep_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
err = -EFAULT;
return err;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 50f0d13..744233c 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -326,7 +326,7 @@
{
struct capi_ctr *ctrl = &session->ctrl;
struct cmtp_application *application;
- __u16 appl;
+ __u16 cmd, appl;
__u32 contr;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,6 +344,7 @@
return;
}
+ cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
appl = CAPIMSG_APPID(skb->data);
contr = CAPIMSG_CONTROL(skb->data);
@@ -386,8 +387,7 @@
capi_ctr_down(ctrl);
- atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ kthread_stop(session->task);
}
static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index c32638d..db43b54 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -81,7 +81,6 @@
char name[BTNAMSIZ];
- atomic_t terminate;
struct task_struct *task;
wait_queue_head_t wait;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 6c9c1fd..bff02ad 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,24 +53,28 @@
static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
{
struct cmtp_session *session;
+ struct list_head *p;
BT_DBG("");
- list_for_each_entry(session, &cmtp_session_list, list)
+ list_for_each(p, &cmtp_session_list) {
+ session = list_entry(p, struct cmtp_session, list);
if (!bacmp(bdaddr, &session->bdaddr))
return session;
-
+ }
return NULL;
}
static void __cmtp_link_session(struct cmtp_session *session)
{
+ __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
+ module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -288,11 +292,9 @@
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (1) {
+ while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&session->terminate))
- break;
if (sk->sk_state != BT_CONNECTED)
break;
@@ -308,7 +310,7 @@
schedule();
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
@@ -323,7 +325,6 @@
up_write(&cmtp_session_sem);
kfree(session);
- module_put_and_exit(0);
return 0;
}
@@ -348,8 +349,7 @@
bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
- session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
- l2cap_pi(sock->sk)->chan->imtu);
+ session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu);
BT_DBG("mtu %d", session->mtu);
@@ -373,28 +373,25 @@
__cmtp_link_session(session);
- __module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
if (IS_ERR(session->task)) {
- module_put(THIS_MODULE);
err = PTR_ERR(session->task);
goto unlink;
}
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
- if (err < 0) {
- atomic_inc(&session->terminate);
- wake_up_process(session->task);
- up_write(&cmtp_session_sem);
- return err;
- }
+ if (err < 0)
+ goto detach;
}
up_write(&cmtp_session_sem);
return 0;
+detach:
+ cmtp_detach_device(session);
+
unlink:
__cmtp_unlink_session(session);
@@ -419,8 +416,7 @@
skb_queue_purge(&session->transmit);
/* Stop session thread */
- atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ kthread_stop(session->task);
} else
err = -ENOENT;
@@ -430,16 +426,19 @@
int cmtp_get_connlist(struct cmtp_connlist_req *req)
{
- struct cmtp_session *session;
+ struct list_head *p;
int err = 0, n = 0;
BT_DBG("");
down_read(&cmtp_session_sem);
- list_for_each_entry(session, &cmtp_session_list, list) {
+ list_for_each(p, &cmtp_session_list) {
+ struct cmtp_session *session;
struct cmtp_conninfo ci;
+ session = list_entry(p, struct cmtp_session, list);
+
__cmtp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 311668d..3f2dd5c 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -39,6 +39,7 @@
#include <linux/isdn/capilli.h>
+#include <asm/system.h>
#include "cmtp.h"
@@ -136,10 +137,10 @@
{
if (cmd == CMTPGETCONNLIST) {
struct cmtp_connlist_req cl;
- u32 uci;
+ uint32_t uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
+ if (get_user(cl.cnum, (uint32_t __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -150,7 +151,7 @@
err = cmtp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
err = -EFAULT;
return err;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a5f93a9..02ea082 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+ Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -35,37 +35,86 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
-static void hci_le_connect(struct hci_conn *conn)
+struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type,
+ bdaddr_t *dst, __u8 sec_level, __u8 auth_type,
+ struct bt_le_params *le_params)
{
- struct hci_dev *hdev = conn->hdev;
+ struct hci_conn *le;
struct hci_cp_le_create_conn cp;
+ struct adv_entry *entry;
+ struct link_key *key;
- conn->state = BT_CONNECT;
- conn->out = true;
- conn->link_mode |= HCI_LM_MASTER;
- conn->sec_level = BT_SECURITY_LOW;
+ BT_DBG("%p", hdev);
+
+ le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (le) {
+ hci_conn_hold(le);
+ return le;
+ }
+
+ key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK);
+ if (!key) {
+ entry = hci_find_adv_entry(hdev, dst);
+ if (entry)
+ le = hci_le_conn_add(hdev, dst,
+ entry->bdaddr_type);
+ else
+ le = hci_le_conn_add(hdev, dst, 0);
+ } else {
+ le = hci_le_conn_add(hdev, dst, key->addr_type);
+ }
+
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+
+ hci_conn_hold(le);
+
+ le->state = BT_CONNECT;
+ le->out = 1;
+ le->link_mode |= HCI_LM_MASTER;
+ le->sec_level = BT_SECURITY_LOW;
+ le->type = LE_LINK;
memset(&cp, 0, sizeof(cp));
- cp.scan_interval = cpu_to_le16(0x0060);
- cp.scan_window = cpu_to_le16(0x0030);
- bacpy(&cp.peer_addr, &conn->dst);
- cp.peer_addr_type = conn->dst_type;
- cp.conn_interval_min = cpu_to_le16(0x0028);
- cp.conn_interval_max = cpu_to_le16(0x0038);
- cp.supervision_timeout = cpu_to_le16(0x002a);
- cp.min_ce_len = cpu_to_le16(0x0000);
- cp.max_ce_len = cpu_to_le16(0x0000);
+ if (l2cap_sock_le_params_valid(le_params)) {
+ cp.supervision_timeout =
+ cpu_to_le16(le_params->supervision_timeout);
+ cp.scan_interval = cpu_to_le16(le_params->scan_interval);
+ cp.scan_window = cpu_to_le16(le_params->scan_window);
+ cp.conn_interval_min = cpu_to_le16(le_params->interval_min);
+ cp.conn_interval_max = cpu_to_le16(le_params->interval_max);
+ cp.conn_latency = cpu_to_le16(le_params->latency);
+ cp.min_ce_len = cpu_to_le16(le_params->min_ce_len);
+ cp.max_ce_len = cpu_to_le16(le_params->max_ce_len);
+ le->conn_timeout = le_params->conn_timeout;
+ } else {
+ cp.supervision_timeout = cpu_to_le16(BT_LE_SUP_TO_DEFAULT);
+ cp.scan_interval = cpu_to_le16(BT_LE_SCAN_INTERVAL_DEF);
+ cp.scan_window = cpu_to_le16(BT_LE_SCAN_WINDOW_DEF);
+ cp.conn_interval_min = cpu_to_le16(BT_LE_CONN_INTERVAL_MIN_DEF);
+ cp.conn_interval_max = cpu_to_le16(BT_LE_CONN_INTERVAL_MAX_DEF);
+ cp.conn_latency = cpu_to_le16(BT_LE_LATENCY_DEF);
+ le->conn_timeout = 5;
+ }
+ bacpy(&cp.peer_addr, &le->dst);
+ cp.peer_addr_type = le->dst_type;
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+
+ return le;
}
+EXPORT_SYMBOL(hci_le_connect);
static void hci_le_connect_cancel(struct hci_conn *conn)
{
@@ -78,10 +127,10 @@
struct inquiry_entry *ie;
struct hci_cp_create_conn cp;
- BT_DBG("hcon %p", conn);
+ BT_DBG("%p", conn);
conn->state = BT_CONNECT;
- conn->out = true;
+ conn->out = 1;
conn->link_mode = HCI_LM_MASTER;
@@ -103,8 +152,7 @@
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
- if (ie->data.ssp_mode > 0)
- set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+ conn->ssp_mode = ie->data.ssp_mode;
}
cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -122,7 +170,7 @@
BT_DBG("%p", conn);
- if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
+ if (conn->hdev->hci_ver < 2)
return;
bacpy(&cp.bdaddr, &conn->dst);
@@ -131,15 +179,22 @@
void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
{
- struct hci_cp_disconnect cp;
-
BT_DBG("%p", conn);
conn->state = BT_DISCONN;
- cp.handle = cpu_to_le16(conn->handle);
- cp.reason = reason;
- hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
+ if (conn->hdev->dev_type == HCI_BREDR) {
+ struct hci_cp_disconnect cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
+ } else {
+ struct hci_cp_disconn_phys_link cp;
+ cp.phy_handle = (u8) conn->handle;
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHYS_LINK,
+ sizeof(cp), &cp);
+ }
}
void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -150,7 +205,7 @@
BT_DBG("%p", conn);
conn->state = BT_CONNECT;
- conn->out = true;
+ conn->out = 1;
conn->attempt++;
@@ -168,18 +223,28 @@
BT_DBG("%p", conn);
conn->state = BT_CONNECT;
- conn->out = true;
+ conn->out = 1;
conn->attempt++;
cp.handle = cpu_to_le16(handle);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
- cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ if (conn->hdev->is_wbs) {
+ /* Transparent Data */
+ uint16_t voice_setting = hdev->voice_setting | ACF_TRANS;
+ cp.max_latency = cpu_to_le16(0x000D);
+ cp.pkt_type = cpu_to_le16(ESCO_WBS);
+ cp.voice_setting = cpu_to_le16(voice_setting);
+ /* Retransmission Effort */
+ cp.retrans_effort = RE_LINK_QUALITY;
+ } else {
+ cp.max_latency = cpu_to_le16(0x000A);
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ cp.voice_setting = cpu_to_le16(hdev->voice_setting);
+ cp.retrans_effort = RE_POWER_CONSUMP;
+ }
hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
}
@@ -204,6 +269,18 @@
}
EXPORT_SYMBOL(hci_le_conn_update);
+void hci_read_rssi(struct hci_conn *conn)
+{
+ struct hci_cp_read_rssi cp;
+ struct hci_dev *hdev = conn->hdev;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+
+ hci_send_cmd(hdev, HCI_OP_READ_RSSI, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_read_rssi);
+
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
__u8 ltk[16])
{
@@ -274,16 +351,15 @@
}
}
-static void hci_conn_timeout(struct work_struct *work)
+static void hci_conn_timeout(unsigned long arg)
{
- struct hci_conn *conn = container_of(work, struct hci_conn,
- disc_work.work);
+ struct hci_conn *conn = (void *) arg;
+ struct hci_dev *hdev = conn->hdev;
__u8 reason;
- BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
+ BT_DBG("conn %p state %d", conn, conn->state);
- if (atomic_read(&conn->refcnt))
- return;
+ hci_dev_lock(hdev);
switch (conn->state) {
case BT_CONNECT:
@@ -297,49 +373,18 @@
break;
case BT_CONFIG:
case BT_CONNECTED:
- reason = hci_proto_disconn_ind(conn);
- hci_acl_disconn(conn, reason);
+ if (!atomic_read(&conn->refcnt)) {
+ reason = hci_proto_disconn_ind(conn);
+ hci_acl_disconn(conn, reason);
+ }
break;
default:
- conn->state = BT_CLOSED;
+ if (!atomic_read(&conn->refcnt))
+ conn->state = BT_CLOSED;
break;
}
-}
-/* Enter sniff mode */
-static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p mode %d", conn, conn->mode);
-
- if (test_bit(HCI_RAW, &hdev->flags))
- return;
-
- if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
- return;
-
- if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
- return;
-
- if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
- struct hci_cp_sniff_subrate cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
- hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
- }
-
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
- struct hci_cp_sniff_mode cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
- cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
- hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
- }
+ hci_dev_unlock(hdev);
}
static void hci_conn_idle(unsigned long arg)
@@ -351,13 +396,34 @@
hci_conn_enter_sniff_mode(conn);
}
-static void hci_conn_auto_accept(unsigned long arg)
+static void hci_conn_rssi_update(struct work_struct *work)
{
- struct hci_conn *conn = (void *) arg;
- struct hci_dev *hdev = conn->hdev;
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct hci_conn *conn =
+ container_of(delayed, struct hci_conn, rssi_update_work);
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
- &conn->dst);
+ BT_DBG("conn %p mode %d", conn, conn->mode);
+
+ hci_read_rssi(conn);
+}
+
+static void encryption_disabled_timeout(unsigned long userdata)
+{
+ struct hci_conn *conn = (struct hci_conn *)userdata;
+ BT_INFO("conn %p Grace Prd Exp ", conn);
+
+ hci_encrypt_cfm(conn, 0, 0);
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ struct hci_cp_set_conn_encrypt cp;
+ BT_INFO("HCI_CONN_ENCRYPT_PEND is set");
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.encrypt = 1;
+ hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ }
+
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
@@ -367,7 +433,7 @@
BT_DBG("%s dst %s", hdev->name, batostr(dst));
- conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
+ conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
if (!conn)
return NULL;
@@ -379,14 +445,15 @@
conn->auth_type = HCI_AT_GENERAL_BONDING;
conn->io_capability = hdev->io_capability;
conn->remote_auth = 0xff;
- conn->key_type = 0xff;
- set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ wake_lock_init(&conn->idle_lock, WAKE_LOCK_SUSPEND, "bt_idle");
switch (type) {
case ACL_LINK:
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
+ conn->link_policy = hdev->link_policy;
break;
case SCO_LINK:
if (!pkt_type)
@@ -410,17 +477,18 @@
skb_queue_head_init(&conn->data_q);
- INIT_LIST_HEAD(&conn->chan_list);
-
- INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
+ setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
- setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->rssi_update_work, hci_conn_rssi_update);
+ setup_timer(&conn->encrypt_pause_timer, encryption_disabled_timeout,
+ (unsigned long)conn);
atomic_set(&conn->refcnt, 0);
hci_dev_hold(hdev);
+ tasklet_disable(&hdev->tx_task);
+
hci_conn_hash_add(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
@@ -429,6 +497,20 @@
hci_conn_init_sysfs(conn);
+ tasklet_enable(&hdev->tx_task);
+
+ return conn;
+}
+
+struct hci_conn *hci_le_conn_add(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 addr_type)
+{
+ struct hci_conn *conn = hci_conn_add(hdev, LE_LINK, 0, dst);
+ if (!conn)
+ return NULL;
+
+ conn->dst_type = addr_type;
+
return conn;
}
@@ -438,11 +520,13 @@
BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
+ /* Make sure no timers are running */
del_timer(&conn->idle_timer);
-
- cancel_delayed_work_sync(&conn->disc_work);
-
- del_timer(&conn->auto_accept_timer);
+ wake_lock_destroy(&conn->idle_lock);
+ del_timer(&conn->disc_timer);
+ del_timer(&conn->smp_timer);
+ __cancel_delayed_work(&conn->rssi_update_work);
+ del_timer(&conn->encrypt_pause_timer);
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
@@ -464,35 +548,102 @@
}
}
-
- hci_chan_list_flush(conn);
+ tasklet_disable(&hdev->tx_task);
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+ tasklet_schedule(&hdev->tx_task);
+
+ tasklet_enable(&hdev->tx_task);
+
skb_queue_purge(&conn->data_q);
hci_conn_put_device(conn);
hci_dev_put(hdev);
- if (conn->handle == 0)
- kfree(conn);
+ return 0;
+}
+
+struct hci_chan *hci_chan_add(struct hci_dev *hdev)
+{
+ struct hci_chan *chan;
+
+ BT_DBG("%s", hdev->name);
+
+ chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
+
+ atomic_set(&chan->refcnt, 0);
+
+ hci_dev_hold(hdev);
+
+ chan->hdev = hdev;
+
+ list_add(&chan->list, &hdev->chan_list.list);
+
+ return chan;
+}
+EXPORT_SYMBOL(hci_chan_add);
+
+int hci_chan_del(struct hci_chan *chan)
+{
+ BT_DBG("%s chan %p", chan->hdev->name, chan);
+
+ list_del(&chan->list);
+
+ hci_conn_put(chan->conn);
+ hci_dev_put(chan->hdev);
+
+ kfree(chan);
return 0;
}
+int hci_chan_put(struct hci_chan *chan)
+{
+ struct hci_cp_disconn_logical_link cp;
+ struct hci_conn *hcon;
+ u16 ll_handle;
+
+ BT_DBG("chan %p refcnt %d", chan, atomic_read(&chan->refcnt));
+ if (!atomic_dec_and_test(&chan->refcnt))
+ return 0;
+
+ hcon = chan->conn;
+ ll_handle = chan->ll_handle;
+
+ hci_chan_del(chan);
+
+ BT_DBG("chan->conn->state %d", hcon->state);
+ if (hcon->state == BT_CONNECTED) {
+ cp.log_handle = cpu_to_le16(ll_handle);
+ hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK,
+ sizeof(cp), &cp);
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(hci_chan_put);
+
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
- struct hci_dev *hdev = NULL, *d;
+ struct hci_dev *hdev = NULL;
+ struct list_head *p;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
- read_lock(&hci_dev_list_lock);
+ read_lock_bh(&hci_dev_list_lock);
- list_for_each_entry(d, &hci_dev_list, list) {
+ list_for_each(p, &hci_dev_list) {
+ struct hci_dev *d = list_entry(p, struct hci_dev, list);
+
+ if (d->dev_type != HCI_BREDR)
+ continue;
if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
continue;
@@ -515,11 +666,63 @@
if (hdev)
hdev = hci_dev_hold(hdev);
- read_unlock(&hci_dev_list_lock);
+ read_unlock_bh(&hci_dev_list_lock);
return hdev;
}
EXPORT_SYMBOL(hci_get_route);
+struct hci_dev *hci_dev_get_type(u8 amp_type)
+{
+ struct hci_dev *hdev = NULL;
+ struct hci_dev *d;
+
+ BT_DBG("amp_type %d", amp_type);
+
+ read_lock_bh(&hci_dev_list_lock);
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if ((d->amp_type == amp_type) && test_bit(HCI_UP, &d->flags)) {
+ hdev = d;
+ break;
+ }
+ }
+
+ if (hdev)
+ hdev = hci_dev_hold(hdev);
+
+ read_unlock_bh(&hci_dev_list_lock);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_dev_get_type);
+
+struct hci_dev *hci_dev_get_amp(bdaddr_t *dst)
+{
+ struct hci_dev *d;
+ struct hci_dev *hdev = NULL;
+
+ BT_DBG("%s dst %s", hdev->name, batostr(dst));
+
+ read_lock_bh(&hci_dev_list_lock);
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ struct hci_conn *conn;
+ if (d->dev_type == HCI_BREDR)
+ continue;
+ conn = hci_conn_hash_lookup_ba(d, ACL_LINK, dst);
+ if (conn) {
+ hdev = d;
+ break;
+ }
+ }
+
+ if (hdev)
+ hdev = hci_dev_hold(hdev);
+
+ read_unlock_bh(&hci_dev_list_lock);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_dev_get_amp);
+
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
@@ -528,39 +731,18 @@
{
struct hci_conn *acl;
struct hci_conn *sco;
- struct hci_conn *le;
BT_DBG("%s dst %s", hdev->name, batostr(dst));
- if (type == LE_LINK) {
- struct adv_entry *entry;
-
- le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le)
- return ERR_PTR(-EBUSY);
-
- entry = hci_find_adv_entry(hdev, dst);
- if (!entry)
- return ERR_PTR(-EHOSTUNREACH);
-
- le = hci_conn_add(hdev, LE_LINK, 0, dst);
- if (!le)
- return ERR_PTR(-ENOMEM);
-
- le->dst_type = entry->bdaddr_type;
-
- hci_le_connect(le);
-
- hci_conn_hold(le);
-
- return le;
- }
+ if (type == LE_LINK)
+ return hci_le_connect(hdev, pkt_type, dst, sec_level,
+ auth_type, NULL);
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
if (!acl)
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
hci_conn_hold(acl);
@@ -580,7 +762,7 @@
sco = hci_conn_add(hdev, type, pkt_type, dst);
if (!sco) {
hci_conn_put(acl);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
}
@@ -591,12 +773,12 @@
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
- set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
- hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
+ acl->power_save = 1;
+ hci_conn_enter_active_mode(acl, 1);
- if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
+ if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
/* defer SCO setup until mode change completed */
- set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
+ set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
return sco;
}
@@ -607,12 +789,43 @@
}
EXPORT_SYMBOL(hci_connect);
+void hci_disconnect(struct hci_conn *conn, __u8 reason)
+{
+ BT_DBG("conn %p", conn);
+
+ hci_proto_disconn_cfm(conn, reason, 0);
+}
+EXPORT_SYMBOL(hci_disconnect);
+
+void hci_disconnect_amp(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_dev *hdev = NULL;
+
+ BT_DBG("conn %p", conn);
+
+ read_lock_bh(&hci_dev_list_lock);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ struct hci_conn *c;
+ if (hdev == conn->hdev)
+ continue;
+ if (hdev->amp_type == HCI_BREDR)
+ continue;
+ c = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &conn->dst);
+ if (c)
+ hci_disconnect(c, reason);
+ }
+
+ read_unlock_bh(&hci_dev_list_lock);
+}
+
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
+ if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
+ !(conn->link_mode & HCI_LM_ENCRYPT))
return 0;
return 1;
@@ -634,115 +847,71 @@
/* Make sure we preserve an existing MITM requirement*/
auth_type |= (conn->auth_type & 0x01);
-
conn->auth_type = auth_type;
+ conn->auth_initiator = 1;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_auth_requested cp;
/* encrypt must be pending if auth is also pending */
- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
- if (conn->key_type != 0xff)
- set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
}
return 0;
}
-/* Encrypt the the link */
-static void hci_conn_encrypt(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.encrypt = 0x01;
- hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
- }
-}
-
/* Enable security */
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %p %d %d", conn, sec_level, auth_type);
- /* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
return 1;
- /* For non 2.1 devices and low security level we don't need the link
- key. */
- if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
+ if (sec_level == BT_SECURITY_LOW &&
+ (!conn->ssp_mode || !conn->hdev->ssp_mode))
return 1;
- /* For other security levels we need the link key. */
- if (!(conn->link_mode & HCI_LM_AUTH))
- goto auth;
+ if (conn->type == LE_LINK) {
+ if (conn->pending_sec_level > sec_level)
+ sec_level = conn->pending_sec_level;
- /* An authenticated combination key has sufficient security for any
- security level. */
- if (conn->key_type == HCI_LK_AUTH_COMBINATION)
- goto encrypt;
-
- /* An unauthenticated combination key has sufficient security for
- security level 1 and 2. */
- if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
- (sec_level == BT_SECURITY_MEDIUM ||
- sec_level == BT_SECURITY_LOW))
- goto encrypt;
-
- /* A combination key has always sufficient security for the security
- levels 1 or 2. High security level requires the combination key
- is generated using maximum PIN code length (16).
- For pre 2.1 units. */
- if (conn->key_type == HCI_LK_COMBINATION &&
- (sec_level != BT_SECURITY_HIGH ||
- conn->pin_length == 16))
- goto encrypt;
-
-auth:
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+ if (sec_level > conn->sec_level)
+ conn->pending_sec_level = sec_level;
+ hci_proto_connect_cfm(conn, 0);
return 0;
-
- if (!hci_conn_auth(conn, sec_level, auth_type))
+ } else if (conn->link_mode & HCI_LM_ENCRYPT) {
+ return hci_conn_auth(conn, sec_level, auth_type);
+ } else if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
return 0;
+ }
-encrypt:
- if (conn->link_mode & HCI_LM_ENCRYPT)
- return 1;
+ if (hci_conn_auth(conn, sec_level, auth_type)) {
+ struct hci_cp_set_conn_encrypt cp;
+ if (timer_pending(&conn->encrypt_pause_timer)) {
+ BT_INFO("encrypt_pause_timer is pending");
+ return 0;
+ }
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.encrypt = 1;
+ hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ }
- hci_conn_encrypt(conn);
return 0;
}
EXPORT_SYMBOL(hci_conn_security);
-/* Check secure link requirement */
-int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
-{
- BT_DBG("conn %p", conn);
-
- if (sec_level != BT_SECURITY_HIGH)
- return 1; /* Accept if non-secure is required */
-
- if (conn->sec_level == BT_SECURITY_HIGH)
- return 1;
-
- return 0; /* Reject not secure link */
-}
-EXPORT_SYMBOL(hci_conn_check_secure);
-
/* Change link key */
int hci_conn_change_link_key(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
@@ -761,7 +930,7 @@
if (!role && conn->link_mode & HCI_LM_MASTER)
return 1;
- if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
+ if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
struct hci_cp_switch_role cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.role = role;
@@ -785,33 +954,174 @@
if (conn->mode != HCI_CM_SNIFF)
goto timer;
- if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
+ if (!conn->power_save && !force_active)
goto timer;
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
struct hci_cp_exit_sniff_mode cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
}
timer:
- if (hdev->idle_timeout > 0)
+ if (hdev->idle_timeout > 0) {
mod_timer(&conn->idle_timer,
jiffies + msecs_to_jiffies(hdev->idle_timeout));
+ wake_lock(&conn->idle_lock);
+ }
}
+static inline void hci_conn_stop_rssi_timer(struct hci_conn *conn)
+{
+ BT_DBG("conn %p", conn);
+ cancel_delayed_work(&conn->rssi_update_work);
+}
+
+static inline void hci_conn_start_rssi_timer(struct hci_conn *conn,
+ u16 interval)
+{
+ struct hci_dev *hdev = conn->hdev;
+ BT_DBG("conn %p, pending %d", conn,
+ delayed_work_pending(&conn->rssi_update_work));
+ if (!delayed_work_pending(&conn->rssi_update_work)) {
+ queue_delayed_work(hdev->workqueue, &conn->rssi_update_work,
+ msecs_to_jiffies(interval));
+ }
+}
+
+void hci_conn_set_rssi_reporter(struct hci_conn *conn,
+ s8 rssi_threshold, u16 interval, u8 updateOnThreshExceed)
+{
+ if (conn) {
+ conn->rssi_threshold = rssi_threshold;
+ conn->rssi_update_interval = interval;
+ conn->rssi_update_thresh_exceed = updateOnThreshExceed;
+ hci_conn_start_rssi_timer(conn, interval);
+ }
+}
+
+void hci_conn_unset_rssi_reporter(struct hci_conn *conn)
+{
+ if (conn) {
+ BT_DBG("Deleting the rssi_update_timer");
+ hci_conn_stop_rssi_timer(conn);
+ }
+}
+
+/* Enter sniff mode */
+void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p mode %d", conn, conn->mode);
+
+ if (test_bit(HCI_RAW, &hdev->flags))
+ return;
+
+ if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
+ return;
+
+ if (conn->mode != HCI_CM_ACTIVE ||
+ !(conn->link_policy & HCI_LP_SNIFF) ||
+ (hci_find_link_key(hdev, &conn->dst) == NULL))
+ return;
+
+ if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
+ struct hci_cp_sniff_subrate cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_latency = cpu_to_le16(0);
+ cp.min_remote_timeout = cpu_to_le16(0);
+ cp.min_local_timeout = cpu_to_le16(0);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
+ }
+
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ struct hci_cp_sniff_mode cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+ cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+ cp.attempt = cpu_to_le16(4);
+ cp.timeout = cpu_to_le16(1);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
+ }
+}
+
+struct hci_chan *hci_chan_create(struct hci_chan *chan,
+ struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
+{
+ struct hci_cp_create_logical_link cp;
+
+ chan->state = BT_CONNECT;
+ chan->tx_fs = *tx_fs;
+ chan->rx_fs = *rx_fs;
+ cp.phy_handle = chan->conn->handle;
+ cp.tx_fs.id = chan->tx_fs.id;
+ cp.tx_fs.type = chan->tx_fs.type;
+ cp.tx_fs.max_sdu = cpu_to_le16(chan->tx_fs.max_sdu);
+ cp.tx_fs.sdu_arr_time = cpu_to_le32(chan->tx_fs.sdu_arr_time);
+ cp.tx_fs.acc_latency = cpu_to_le32(chan->tx_fs.acc_latency);
+ cp.tx_fs.flush_to = cpu_to_le32(chan->tx_fs.flush_to);
+ cp.rx_fs.id = chan->rx_fs.id;
+ cp.rx_fs.type = chan->rx_fs.type;
+ cp.rx_fs.max_sdu = cpu_to_le16(chan->rx_fs.max_sdu);
+ cp.rx_fs.sdu_arr_time = cpu_to_le32(chan->rx_fs.sdu_arr_time);
+ cp.rx_fs.acc_latency = cpu_to_le32(chan->rx_fs.acc_latency);
+ cp.rx_fs.flush_to = cpu_to_le32(chan->rx_fs.flush_to);
+ hci_conn_hold(chan->conn);
+ if (chan->conn->out)
+ hci_send_cmd(chan->conn->hdev, HCI_OP_CREATE_LOGICAL_LINK,
+ sizeof(cp), &cp);
+ else
+ hci_send_cmd(chan->conn->hdev, HCI_OP_ACCEPT_LOGICAL_LINK,
+ sizeof(cp), &cp);
+ return chan;
+}
+EXPORT_SYMBOL(hci_chan_create);
+
+void hci_chan_modify(struct hci_chan *chan,
+ struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
+{
+ struct hci_cp_flow_spec_modify cp;
+
+ chan->tx_fs = *tx_fs;
+ chan->rx_fs = *rx_fs;
+ cp.log_handle = cpu_to_le16(chan->ll_handle);
+ cp.tx_fs.id = tx_fs->id;
+ cp.tx_fs.type = tx_fs->type;
+ cp.tx_fs.max_sdu = cpu_to_le16(tx_fs->max_sdu);
+ cp.tx_fs.sdu_arr_time = cpu_to_le32(tx_fs->sdu_arr_time);
+ cp.tx_fs.acc_latency = cpu_to_le32(tx_fs->acc_latency);
+ cp.tx_fs.flush_to = cpu_to_le32(tx_fs->flush_to);
+ cp.rx_fs.id = rx_fs->id;
+ cp.rx_fs.type = rx_fs->type;
+ cp.rx_fs.max_sdu = cpu_to_le16(rx_fs->max_sdu);
+ cp.rx_fs.sdu_arr_time = cpu_to_le32(rx_fs->sdu_arr_time);
+ cp.rx_fs.acc_latency = cpu_to_le32(rx_fs->acc_latency);
+ cp.rx_fs.flush_to = cpu_to_le32(rx_fs->flush_to);
+ hci_conn_hold(chan->conn);
+ hci_send_cmd(chan->conn->hdev, HCI_OP_FLOW_SPEC_MODIFY, sizeof(cp),
+ &cp);
+}
+EXPORT_SYMBOL(hci_chan_modify);
+
/* Drop all connection on the device */
-void hci_conn_hash_flush(struct hci_dev *hdev)
+void hci_conn_hash_flush(struct hci_dev *hdev, u8 is_process)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c, *n;
+ struct list_head *p;
BT_DBG("hdev %s", hdev->name);
- list_for_each_entry_safe(c, n, &h->list, list) {
+ p = h->list.next;
+ while (p != &h->list) {
+ struct hci_conn *c;
+
+ c = list_entry(p, struct hci_conn, list);
+ p = p->next;
+
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
+ hci_proto_disconn_cfm(c, 0x16, is_process);
hci_conn_del(c);
}
}
@@ -847,10 +1157,10 @@
int hci_get_conn_list(void __user *arg)
{
- register struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
+ struct list_head *p;
int n = 0, size, err;
if (copy_from_user(&req, arg, sizeof(req)))
@@ -873,8 +1183,11 @@
ci = cl->conn_info;
- hci_dev_lock(hdev);
- list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ hci_dev_lock_bh(hdev);
+ list_for_each(p, &hdev->conn_hash.list) {
+ register struct hci_conn *c;
+ c = list_entry(p, struct hci_conn, list);
+
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
@@ -893,7 +1206,7 @@
if (++n >= req.conn_num)
break;
}
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
cl->dev_id = hdev->id;
cl->conn_num = n;
@@ -917,7 +1230,7 @@
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
if (conn) {
bacpy(&ci.bdaddr, &conn->dst);
@@ -935,8 +1248,10 @@
ci.cnt = hdev->acl_cnt;
ci.pkts = hdev->acl_pkts;
}
+ ci.pending_sec_level = conn->pending_sec_level;
+ ci.ssp_mode = conn->ssp_mode;
}
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
if (!conn)
return -ENOENT;
@@ -952,11 +1267,11 @@
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
if (conn)
req.type = conn->auth_type;
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
if (!conn)
return -ENOENT;
@@ -964,48 +1279,22 @@
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
-struct hci_chan *hci_chan_create(struct hci_conn *conn)
+int hci_set_auth_info(struct hci_dev *hdev, void __user *arg)
{
- struct hci_dev *hdev = conn->hdev;
- struct hci_chan *chan;
+ struct hci_auth_info_req req;
+ struct hci_conn *conn;
- BT_DBG("%s conn %p", hdev->name, conn);
+ if (copy_from_user(&req, arg, sizeof(req)))
+ return -EFAULT;
- chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
- if (!chan)
- return NULL;
+ hci_dev_lock_bh(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
+ if (conn)
+ conn->auth_type = req.type;
+ hci_dev_unlock_bh(hdev);
- chan->conn = conn;
- skb_queue_head_init(&chan->data_q);
+ if (!conn)
+ return -ENOENT;
- list_add_rcu(&chan->list, &conn->chan_list);
-
- return chan;
-}
-
-int hci_chan_del(struct hci_chan *chan)
-{
- struct hci_conn *conn = chan->conn;
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
-
- list_del_rcu(&chan->list);
-
- synchronize_rcu();
-
- skb_queue_purge(&chan->data_q);
- kfree(chan);
-
- return 0;
-}
-
-void hci_chan_list_flush(struct hci_conn *conn)
-{
- struct hci_chan *chan, *n;
-
- BT_DBG("conn %p", conn);
-
- list_for_each_entry_safe(chan, n, &conn->chan_list, list)
- hci_chan_del(chan);
+ return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d6dc44c..da8b2dc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,7 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -40,11 +39,13 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/rfkill.h>
#include <linux/timer.h>
#include <linux/crypto.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
@@ -53,9 +54,13 @@
#define AUTO_OFF_TIMEOUT 2000
-static void hci_rx_work(struct work_struct *work);
-static void hci_cmd_work(struct work_struct *work);
-static void hci_tx_work(struct work_struct *work);
+static void hci_cmd_task(unsigned long arg);
+static void hci_rx_task(unsigned long arg);
+static void hci_tx_task(unsigned long arg);
+
+static DEFINE_RWLOCK(hci_task_lock);
+
+static bool enable_smp = 1;
/* HCI device list */
LIST_HEAD(hci_dev_list);
@@ -65,11 +70,32 @@
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
+/* AMP Manager event callbacks */
+LIST_HEAD(amp_mgr_cb_list);
+DEFINE_RWLOCK(amp_mgr_cb_list_lock);
+
+/* HCI protocols */
+#define HCI_MAX_PROTO 2
+struct hci_proto *hci_proto[HCI_MAX_PROTO];
+
+/* HCI notifiers list */
+static ATOMIC_NOTIFIER_HEAD(hci_notifier);
+
/* ---- HCI notifications ---- */
+int hci_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&hci_notifier, nb);
+}
+
+int hci_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&hci_notifier, nb);
+}
+
static void hci_notify(struct hci_dev *hdev, int event)
{
- hci_sock_dev_event(hdev, event);
+ atomic_notifier_call_chain(&hci_notifier, event, hdev);
}
/* ---- HCI requests ---- */
@@ -81,28 +107,8 @@
/* If this is the init phase check if the completed command matches
* the last init command, and if not just return.
*/
- if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
- struct sk_buff *skb;
-
- /* Some CSR based controllers generate a spontaneous
- * reset complete event during init and any pending
- * command will never be completed. In such a case we
- * need to resend whatever was the last sent
- * command.
- */
-
- if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
- return;
-
- skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
- if (skb) {
- skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
-
+ if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
return;
- }
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -146,7 +152,7 @@
switch (hdev->req_status) {
case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
+ err = -bt_err(hdev->req_result);
break;
case HCI_REQ_CANCELED:
@@ -187,75 +193,16 @@
/* Reset device */
set_bit(HCI_RESET, &hdev->flags);
+ memset(&hdev->features, 0, sizeof(hdev->features));
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
-static void bredr_init(struct hci_dev *hdev)
-{
- struct hci_cp_delete_stored_link_key cp;
- __le16 param;
- __u8 flt_type;
-
- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
-
- /* Mandatory initialization */
-
- /* Reset */
- if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
- set_bit(HCI_RESET, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
- }
-
- /* Read Local Supported Features */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
- /* Read Local Version */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read Buffer Size (ACL mtu, max pkt, etc.) */
- hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-
- /* Read BD Address */
- hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
-
- /* Read Class of Device */
- hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-
- /* Read Local Name */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
- /* Read Voice Setting */
- hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
-
- /* Optional initialization */
-
- /* Clear Event Filters */
- flt_type = HCI_FLT_CLEAR_ALL;
- hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-
- /* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
- hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
-
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 1;
- hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
-}
-
-static void amp_init(struct hci_dev *hdev)
-{
- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
-
- /* Reset */
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
-
- /* Read Local Version */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-}
-
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
+ struct hci_cp_delete_stored_link_key cp;
struct sk_buff *skb;
+ __le16 param;
+ __u8 flt_type;
BT_DBG("%s %ld", hdev->name, opt);
@@ -267,24 +214,89 @@
skb->dev = (void *) hdev;
skb_queue_tail(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ tasklet_schedule(&hdev->cmd_task);
}
skb_queue_purge(&hdev->driver_init);
- switch (hdev->dev_type) {
- case HCI_BREDR:
- bredr_init(hdev);
- break;
+ /* Mandatory initialization */
- case HCI_AMP:
- amp_init(hdev);
- break;
-
- default:
- BT_ERR("Unknown device type %d", hdev->dev_type);
- break;
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
+ set_bit(HCI_RESET, &hdev->flags);
+ hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
+ /* Read Local Version */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+
+ /* Set default HCI Flow Control Mode */
+ if (hdev->dev_type == HCI_BREDR)
+ hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
+ else
+ hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
+
+ /* Read HCI Flow Control Mode */
+ hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
+ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+ hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+
+ /* Read Data Block Size (ACL mtu, max pkt, etc.) */
+ hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
+#if 0
+ /* Host buffer size */
+ {
+ struct hci_cp_host_buffer_size cp;
+ cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
+ cp.sco_mtu = HCI_MAX_SCO_SIZE;
+ cp.acl_max_pkt = cpu_to_le16(0xffff);
+ cp.sco_max_pkt = cpu_to_le16(0xffff);
+ hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
+ }
+#endif
+
+ if (hdev->dev_type == HCI_BREDR) {
+ /* BR-EDR initialization */
+
+ /* Read Local Supported Features */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
+ /* Read BD Address */
+ hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
+
+ /* Read Class of Device */
+ hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+
+ /* Read Local Name */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+
+ /* Read Voice Setting */
+ hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+
+ /* Optional initialization */
+ /* Clear Event Filters */
+ flt_type = HCI_FLT_CLEAR_ALL;
+ hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+
+ /* Connection accept timeout ~20 secs */
+ param = cpu_to_le16(0x7d00);
+ hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 1;
+ hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp);
+ } else {
+ /* AMP initialization */
+ /* Connection accept timeout ~5 secs */
+ param = cpu_to_le16(0x1f40);
+ hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+
+ /* Read AMP Info */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ }
}
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -339,7 +351,8 @@
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
- struct hci_dev *hdev = NULL, *d;
+ struct hci_dev *hdev = NULL;
+ struct list_head *p;
BT_DBG("%d", index);
@@ -347,7 +360,8 @@
return NULL;
read_lock(&hci_dev_list_lock);
- list_for_each_entry(d, &hci_dev_list, list) {
+ list_for_each(p, &hci_dev_list) {
+ struct hci_dev *d = list_entry(p, struct hci_dev, list);
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
@@ -356,211 +370,75 @@
read_unlock(&hci_dev_list_lock);
return hdev;
}
+EXPORT_SYMBOL(hci_dev_get);
/* ---- Inquiry support ---- */
-
-bool hci_discovery_active(struct hci_dev *hdev)
-{
- struct discovery_state *discov = &hdev->discovery;
-
- switch (discov->state) {
- case DISCOVERY_FINDING:
- case DISCOVERY_RESOLVING:
- return true;
-
- default:
- return false;
- }
-}
-
-void hci_discovery_set_state(struct hci_dev *hdev, int state)
-{
- BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
-
- if (hdev->discovery.state == state)
- return;
-
- switch (state) {
- case DISCOVERY_STOPPED:
- if (hdev->discovery.state != DISCOVERY_STARTING)
- mgmt_discovering(hdev, 0);
- hdev->discovery.type = 0;
- break;
- case DISCOVERY_STARTING:
- break;
- case DISCOVERY_FINDING:
- mgmt_discovering(hdev, 1);
- break;
- case DISCOVERY_RESOLVING:
- break;
- case DISCOVERY_STOPPING:
- break;
- }
-
- hdev->discovery.state = state;
-}
-
static void inquiry_cache_flush(struct hci_dev *hdev)
{
- struct discovery_state *cache = &hdev->discovery;
- struct inquiry_entry *p, *n;
+ struct inquiry_cache *cache = &hdev->inq_cache;
+ struct inquiry_entry *next = cache->list, *e;
- list_for_each_entry_safe(p, n, &cache->all, all) {
- list_del(&p->all);
- kfree(p);
+ BT_DBG("cache %p", cache);
+
+ cache->list = NULL;
+ while ((e = next)) {
+ next = e->next;
+ kfree(e);
}
-
- INIT_LIST_HEAD(&cache->unknown);
- INIT_LIST_HEAD(&cache->resolve);
}
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
- list_for_each_entry(e, &cache->all, all) {
+ for (e = cache->list; e; e = e->next)
if (!bacmp(&e->data.bdaddr, bdaddr))
- return e;
- }
-
- return NULL;
-}
-
-struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
-{
- struct discovery_state *cache = &hdev->discovery;
- struct inquiry_entry *e;
-
- BT_DBG("cache %p, %s", cache, batostr(bdaddr));
-
- list_for_each_entry(e, &cache->unknown, list) {
- if (!bacmp(&e->data.bdaddr, bdaddr))
- return e;
- }
-
- return NULL;
-}
-
-struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
- bdaddr_t *bdaddr,
- int state)
-{
- struct discovery_state *cache = &hdev->discovery;
- struct inquiry_entry *e;
-
- BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
-
- list_for_each_entry(e, &cache->resolve, list) {
- if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
- return e;
- if (!bacmp(&e->data.bdaddr, bdaddr))
- return e;
- }
-
- return NULL;
-}
-
-void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
- struct inquiry_entry *ie)
-{
- struct discovery_state *cache = &hdev->discovery;
- struct list_head *pos = &cache->resolve;
- struct inquiry_entry *p;
-
- list_del(&ie->list);
-
- list_for_each_entry(p, &cache->resolve, list) {
- if (p->name_state != NAME_PENDING &&
- abs(p->data.rssi) >= abs(ie->data.rssi))
break;
- pos = &p->list;
- }
-
- list_add(&ie->list, pos);
+ return e;
}
-bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
- bool name_known, bool *ssp)
+void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
{
- struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *ie;
BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
- if (ssp)
- *ssp = data->ssp_mode;
-
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
- if (ie) {
- if (ie->data.ssp_mode && ssp)
- *ssp = true;
+ if (!ie) {
+ /* Entry not in the cache. Add new one. */
+ ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+ if (!ie)
+ return;
- if (ie->name_state == NAME_NEEDED &&
- data->rssi != ie->data.rssi) {
- ie->data.rssi = data->rssi;
- hci_inquiry_cache_update_resolve(hdev, ie);
- }
-
- goto update;
- }
-
- /* Entry not in the cache. Add new one. */
- ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
- if (!ie)
- return false;
-
- list_add(&ie->all, &cache->all);
-
- if (name_known) {
- ie->name_state = NAME_KNOWN;
- } else {
- ie->name_state = NAME_NOT_KNOWN;
- list_add(&ie->list, &cache->unknown);
- }
-
-update:
- if (name_known && ie->name_state != NAME_KNOWN &&
- ie->name_state != NAME_PENDING) {
- ie->name_state = NAME_KNOWN;
- list_del(&ie->list);
+ ie->next = cache->list;
+ cache->list = ie;
}
memcpy(&ie->data, data, sizeof(*data));
ie->timestamp = jiffies;
cache->timestamp = jiffies;
-
- if (ie->name_state == NAME_NOT_KNOWN)
- return false;
-
- return true;
}
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
- struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_info *info = (struct inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
- list_for_each_entry(e, &cache->all, all) {
+ for (e = cache->list; e && copied < num; e = e->next, copied++) {
struct inquiry_data *data = &e->data;
-
- if (copied >= num)
- break;
-
bacpy(&info->bdaddr, &data->bdaddr);
info->pscan_rep_mode = data->pscan_rep_mode;
info->pscan_period_mode = data->pscan_period_mode;
info->pscan_mode = data->pscan_mode;
memcpy(info->dev_class, data->dev_class, 3);
info->clock_offset = data->clock_offset;
-
info++;
- copied++;
}
BT_DBG("cache %p, copied %d", cache, copied);
@@ -600,14 +478,14 @@
if (!hdev)
return -ENODEV;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
@@ -629,9 +507,9 @@
goto done;
}
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
@@ -665,11 +543,6 @@
hci_req_lock(hdev);
- if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
- ret = -ENODEV;
- goto done;
- }
-
if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
ret = -ERFKILL;
goto done;
@@ -683,16 +556,24 @@
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
- /* Treat all non BR/EDR controllers as raw devices if
- enable_hs is not set */
- if (hdev->dev_type != HCI_BREDR && !enable_hs)
- set_bit(HCI_RAW, &hdev->flags);
-
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
+ if (!skb_queue_empty(&hdev->cmd_q)) {
+ BT_ERR("command queue is not empty, purging");
+ skb_queue_purge(&hdev->cmd_q);
+ }
+ if (!skb_queue_empty(&hdev->rx_q)) {
+ BT_ERR("rx queue is not empty, purging");
+ skb_queue_purge(&hdev->rx_q);
+ }
+ if (!skb_queue_empty(&hdev->raw_q)) {
+ BT_ERR("raw queue is not empty, purging");
+ skb_queue_purge(&hdev->raw_q);
+ }
+
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
@@ -701,7 +582,7 @@
ret = __hci_request(hdev, hci_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
- if (lmp_host_le_capable(hdev))
+ if (lmp_le_capable(hdev))
ret = __hci_request(hdev, hci_le_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
@@ -712,16 +593,17 @@
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 1);
- hci_dev_unlock(hdev);
+ if (!test_bit(HCI_SETUP, &hdev->flags) &&
+ hdev->dev_type == HCI_BREDR) {
+ hci_dev_lock_bh(hdev);
+ mgmt_powered(hdev->id, 1);
+ hci_dev_unlock_bh(hdev);
}
} else {
/* Init failed, cleanup */
- flush_work(&hdev->tx_work);
- flush_work(&hdev->cmd_work);
- flush_work(&hdev->rx_work);
+ tasklet_kill(&hdev->rx_task);
+ tasklet_kill(&hdev->tx_task);
+ tasklet_kill(&hdev->cmd_task);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
@@ -744,11 +626,11 @@
return ret;
}
-static int hci_dev_do_close(struct hci_dev *hdev)
+static int hci_dev_do_close(struct hci_dev *hdev, u8 is_process)
{
- BT_DBG("%s %p", hdev->name, hdev);
+ unsigned long keepflags = 0;
- cancel_work_sync(&hdev->le_scan);
+ BT_DBG("%s %p", hdev->name, hdev);
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
@@ -759,44 +641,38 @@
return 0;
}
- /* Flush RX and TX works */
- flush_work(&hdev->tx_work);
- flush_work(&hdev->rx_work);
+ /* Kill RX and TX tasks */
+ tasklet_kill(&hdev->rx_task);
+ tasklet_kill(&hdev->tx_task);
- if (hdev->discov_timeout > 0) {
- cancel_delayed_work(&hdev->discov_off);
- hdev->discov_timeout = 0;
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- }
-
- if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
- cancel_delayed_work(&hdev->service_cache);
-
- cancel_delayed_work_sync(&hdev->le_scan_disable);
-
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
- hci_conn_hash_flush(hdev);
- hci_dev_unlock(hdev);
+ hci_conn_hash_flush(hdev, is_process);
+ hci_dev_unlock_bh(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
+ if (hdev->dev_type == HCI_BREDR) {
+ hci_dev_lock_bh(hdev);
+ mgmt_powered(hdev->id, 0);
+ hci_dev_unlock_bh(hdev);
+ }
+
if (hdev->flush)
hdev->flush(hdev);
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
- if (!test_bit(HCI_RAW, &hdev->flags) &&
- test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}
- /* flush cmd work */
- flush_work(&hdev->cmd_work);
+ /* Kill cmd task */
+ tasklet_kill(&hdev->cmd_task);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -814,17 +690,15 @@
* and no tasks are scheduled. */
hdev->close(hdev);
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 0);
- hci_dev_unlock(hdev);
- }
+ /* Clear only non-persistent flags */
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_MGMT, &keepflags);
+ if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+ set_bit(HCI_LINK_KEYS, &keepflags);
+ if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
+ set_bit(HCI_DEBUG_KEYS, &keepflags);
- /* Clear flags */
- hdev->flags = 0;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
- memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ hdev->flags = keepflags;
hci_req_unlock(hdev);
@@ -840,12 +714,7 @@
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
-
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
- cancel_delayed_work(&hdev->power_off);
-
- err = hci_dev_do_close(hdev);
-
+ err = hci_dev_do_close(hdev, 1);
hci_dev_put(hdev);
return err;
}
@@ -860,6 +729,7 @@
return -ENODEV;
hci_req_lock(hdev);
+ tasklet_disable(&hdev->tx_task);
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
@@ -868,10 +738,10 @@
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
- hci_conn_hash_flush(hdev);
- hci_dev_unlock(hdev);
+ hci_conn_hash_flush(hdev, 0);
+ hci_dev_unlock_bh(hdev);
if (hdev->flush)
hdev->flush(hdev);
@@ -884,6 +754,7 @@
msecs_to_jiffies(HCI_INIT_TIMEOUT));
done:
+ tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
@@ -982,9 +853,9 @@
int hci_get_dev_list(void __user *arg)
{
- struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
+ struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
@@ -1002,13 +873,16 @@
dr = dl->dev_req;
- read_lock(&hci_dev_list_lock);
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
- cancel_delayed_work(&hdev->power_off);
+ read_lock_bh(&hci_dev_list_lock);
+ list_for_each(p, &hci_dev_list) {
+ struct hci_dev *hdev;
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
- set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ hdev = list_entry(p, struct hci_dev, list);
+
+ hci_del_off_timer(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_PAIRABLE, &hdev->flags);
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
@@ -1016,7 +890,7 @@
if (++n >= dev_num)
break;
}
- read_unlock(&hci_dev_list_lock);
+ read_unlock_bh(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
@@ -1040,11 +914,10 @@
if (!hdev)
return -ENODEV;
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
- cancel_delayed_work_sync(&hdev->power_off);
+ hci_del_off_timer(hdev);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
- set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_PAIRABLE, &hdev->flags);
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
@@ -1080,7 +953,7 @@
if (!blocked)
return 0;
- hci_dev_do_close(hdev);
+ hci_dev_do_close(hdev, 0);
return 0;
}
@@ -1098,7 +971,6 @@
if (!hdev)
return NULL;
- hci_init_sysfs(hdev);
skb_queue_head_init(&hdev->driver_init);
return hdev;
@@ -1118,46 +990,50 @@
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+ int err;
BT_DBG("%s", hdev->name);
- if (hci_dev_open(hdev->id) < 0)
+ err = hci_dev_open(hdev->id);
+ if (err && err != -EALREADY)
return;
- if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
- schedule_delayed_work(&hdev->power_off,
- msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
+ hdev->dev_type == HCI_BREDR)
+ mod_timer(&hdev->off_timer,
+ jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
- if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
- mgmt_index_added(hdev);
+ if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
+ hdev->dev_type == HCI_BREDR)
+ mgmt_index_added(hdev->id);
}
static void hci_power_off(struct work_struct *work)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- power_off.work);
+ struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
BT_DBG("%s", hdev->name);
- hci_dev_do_close(hdev);
+ hci_dev_close(hdev->id);
}
-static void hci_discov_off(struct work_struct *work)
+static void hci_auto_off(unsigned long data)
{
- struct hci_dev *hdev;
- u8 scan = SCAN_PAGE;
-
- hdev = container_of(work, struct hci_dev, discov_off.work);
+ struct hci_dev *hdev = (struct hci_dev *) data;
BT_DBG("%s", hdev->name);
- hci_dev_lock(hdev);
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+ queue_work(hdev->workqueue, &hdev->power_off);
+}
- hdev->discov_timeout = 0;
+void hci_del_off_timer(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
- hci_dev_unlock(hdev);
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ del_timer(&hdev->off_timer);
}
int hci_uuids_clear(struct hci_dev *hdev)
@@ -1192,108 +1068,80 @@
return 0;
}
-int hci_smp_ltks_clear(struct hci_dev *hdev)
-{
- struct smp_ltk *k, *tmp;
-
- list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
- list_del(&k->list);
- kfree(k);
- }
-
- return 0;
-}
-
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct link_key *k;
+ struct list_head *p;
- list_for_each_entry(k, &hdev->link_keys, list)
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
+
+ k = list_entry(p, struct link_key, list);
+
if (bacmp(bdaddr, &k->bdaddr) == 0)
return k;
+ }
return NULL;
}
-static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
- u8 key_type, u8 old_key_type)
+struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
{
- /* Legacy key */
- if (key_type < 0x03)
- return true;
+ struct list_head *p;
- /* Debug keys are insecure so don't store them persistently */
- if (key_type == HCI_LK_DEBUG_COMBINATION)
- return false;
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
+ struct key_master_id *id;
- /* Changed combination key and there's no previous one */
- if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
- return false;
+ k = list_entry(p, struct link_key, list);
- /* Security mode 3 case */
- if (!conn)
- return true;
-
- /* Neither local nor remote side had no-bonding as requirement */
- if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
- return true;
-
- /* Local side had dedicated bonding as requirement */
- if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
- return true;
-
- /* Remote side had dedicated bonding as requirement */
- if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
- return true;
-
- /* If none of the above criteria match, then don't store the key
- * persistently */
- return false;
-}
-
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
-{
- struct smp_ltk *k;
-
- list_for_each_entry(k, &hdev->long_term_keys, list) {
- if (k->ediv != ediv ||
- memcmp(rand, k->rand, sizeof(k->rand)))
+ if (k->key_type != KEY_TYPE_LTK)
continue;
- return k;
+ if (k->dlen != sizeof(*id))
+ continue;
+
+ id = (void *) &k->data;
+ if (id->ediv == ediv &&
+ (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
+ return k;
}
return NULL;
}
EXPORT_SYMBOL(hci_find_ltk);
-struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 addr_type)
+struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type)
{
- struct smp_ltk *k;
+ struct list_head *p;
- list_for_each_entry(k, &hdev->long_term_keys, list)
- if (addr_type == k->bdaddr_type &&
- bacmp(bdaddr, &k->bdaddr) == 0)
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
+
+ k = list_entry(p, struct link_key, list);
+
+ if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
return k;
+ }
return NULL;
}
-EXPORT_SYMBOL(hci_find_ltk_by_addr);
+EXPORT_SYMBOL(hci_find_link_key_type);
-int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
- bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+ u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
+ struct hci_conn *conn;
u8 old_key_type;
- bool persistent;
+ u8 bonded = 0;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
- old_key_type = old_key->type;
+ old_key_type = old_key->key_type;
key = old_key;
} else {
- old_key_type = conn ? conn->key_type : 0xff;
+ old_key_type = 0xff;
key = kzalloc(sizeof(*key), GFP_ATOMIC);
if (!key)
return -ENOMEM;
@@ -1302,72 +1150,76 @@
BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
- /* Some buggy controller combinations generate a changed
- * combination key for legacy pairing even when there's no
- * previous key */
- if (type == HCI_LK_CHANGED_COMBINATION &&
- (!conn || conn->remote_auth == 0xff) &&
- old_key_type == 0xff) {
- type = HCI_LK_COMBINATION;
- if (conn)
- conn->key_type = type;
- }
-
bacpy(&key->bdaddr, bdaddr);
memcpy(key->val, val, 16);
+ key->auth = 0x01;
+ key->key_type = type;
key->pin_len = pin_len;
- if (type == HCI_LK_CHANGED_COMBINATION)
- key->type = old_key_type;
- else
- key->type = type;
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+ /* Store the link key persistently if one of the following is true:
+ * 1. the remote side is using dedicated bonding since in that case
+ * also the local requirements are set to dedicated bonding
+ * 2. the local side had dedicated bonding as a requirement
+ * 3. this is a legacy link key
+ * 4. this is a changed combination key and there was a previously
+ * stored one
+ * If none of the above match only keep the link key around for
+ * this connection and set the temporary flag for the device.
+ */
- if (!new_key)
- return 0;
+ if (conn) {
+ if ((conn->remote_auth > 0x01) ||
+ (conn->auth_initiator && conn->auth_type > 0x01) ||
+ (key->key_type < 0x03) ||
+ (key->key_type == 0x06 && old_key_type != 0xff))
+ bonded = 1;
+ }
- persistent = hci_persistent_key(hdev, conn, type, old_key_type);
+ if (new_key)
+ mgmt_new_key(hdev->id, key, bonded);
- mgmt_new_link_key(hdev, key, persistent);
-
- if (conn)
- conn->flush_key = !persistent;
+ if (type == 0x06)
+ key->key_type = old_key_type;
return 0;
}
-int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
- int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
- ediv, u8 rand[8])
+int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+ u8 addr_type, u8 key_size, u8 auth,
+ __le16 ediv, u8 rand[8], u8 ltk[16])
{
- struct smp_ltk *key, *old_key;
+ struct link_key *key, *old_key;
+ struct key_master_id *id;
- if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
- return 0;
+ BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
+ batostr(bdaddr), addr_type);
- old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
- if (old_key)
+ old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
+ if (old_key) {
key = old_key;
- else {
- key = kzalloc(sizeof(*key), GFP_ATOMIC);
+ } else {
+ key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
if (!key)
return -ENOMEM;
- list_add(&key->list, &hdev->long_term_keys);
+ list_add(&key->list, &hdev->link_keys);
}
+ key->dlen = sizeof(*id);
+
bacpy(&key->bdaddr, bdaddr);
- key->bdaddr_type = addr_type;
- memcpy(key->val, tk, sizeof(key->val));
- key->authenticated = authenticated;
- key->ediv = ediv;
- key->enc_size = enc_size;
- key->type = type;
- memcpy(key->rand, rand, sizeof(key->rand));
+ key->addr_type = addr_type;
+ memcpy(key->val, ltk, sizeof(key->val));
+ key->key_type = KEY_TYPE_LTK;
+ key->pin_len = key_size;
+ key->auth = auth;
- if (!new_key)
- return 0;
+ id = (void *) &key->data;
+ id->ediv = ediv;
+ memcpy(id->rand, rand, sizeof(id->rand));
- if (type & HCI_SMP_LTK)
- mgmt_new_ltk(hdev, key, 1);
+ if (new_key)
+ mgmt_new_key(hdev->id, key, auth & 0x01);
return 0;
}
@@ -1388,23 +1240,6 @@
return 0;
}
-int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct smp_ltk *k, *tmp;
-
- list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
- if (bacmp(bdaddr, &k->bdaddr))
- continue;
-
- BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
-
- list_del(&k->list);
- kfree(k);
- }
-
- return 0;
-}
-
/* HCI command timer function */
static void hci_cmd_timer(unsigned long arg)
{
@@ -1412,11 +1247,12 @@
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ clear_bit(HCI_RESET, &hdev->flags);
+ tasklet_schedule(&hdev->cmd_task);
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
+ bdaddr_t *bdaddr)
{
struct oob_data *data;
@@ -1455,8 +1291,67 @@
return 0;
}
+static void hci_adv_clear(unsigned long arg)
+{
+ struct hci_dev *hdev = (void *) arg;
+
+ hci_adv_entries_clear(hdev);
+}
+
+int hci_adv_entries_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ BT_DBG("");
+ write_lock_bh(&hdev->adv_entries_lock);
+
+ list_for_each_safe(p, n, &hdev->adv_entries) {
+ struct adv_entry *entry;
+
+ entry = list_entry(p, struct adv_entry, list);
+
+ list_del(p);
+ kfree(entry);
+ }
+
+ write_unlock_bh(&hdev->adv_entries_lock);
+
+ return 0;
+}
+
+struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct list_head *p;
+ struct adv_entry *res = NULL;
+
+ BT_DBG("");
+ read_lock_bh(&hdev->adv_entries_lock);
+
+ list_for_each(p, &hdev->adv_entries) {
+ struct adv_entry *entry;
+
+ entry = list_entry(p, struct adv_entry, list);
+
+ if (bacmp(bdaddr, &entry->bdaddr) == 0) {
+ res = entry;
+ goto out;
+ }
+ }
+out:
+ read_unlock_bh(&hdev->adv_entries_lock);
+ return res;
+}
+
+static inline int is_connectable_adv(u8 evt_type)
+{
+ if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
+ return 1;
+
+ return 0;
+}
+
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
- u8 *randomizer)
+ u8 *randomizer)
{
struct oob_data *data;
@@ -1479,258 +1374,75 @@
return 0;
}
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct bdaddr_list *b;
-
- list_for_each_entry(b, &hdev->blacklist, list)
- if (bacmp(bdaddr, &b->bdaddr) == 0)
- return b;
-
- return NULL;
-}
-
-int hci_blacklist_clear(struct hci_dev *hdev)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
-
- list_del(p);
- kfree(b);
- }
-
- return 0;
-}
-
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
- struct bdaddr_list *entry;
-
- if (bacmp(bdaddr, BDADDR_ANY) == 0)
- return -EBADF;
-
- if (hci_blacklist_lookup(hdev, bdaddr))
- return -EEXIST;
-
- entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- bacpy(&entry->bdaddr, bdaddr);
-
- list_add(&entry->list, &hdev->blacklist);
-
- return mgmt_device_blocked(hdev, bdaddr, type);
-}
-
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
- struct bdaddr_list *entry;
-
- if (bacmp(bdaddr, BDADDR_ANY) == 0)
- return hci_blacklist_clear(hdev);
-
- entry = hci_blacklist_lookup(hdev, bdaddr);
- if (!entry)
- return -ENOENT;
-
- list_del(&entry->list);
- kfree(entry);
-
- return mgmt_device_unblocked(hdev, bdaddr, type);
-}
-
-static void hci_clear_adv_cache(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_work.work);
-
- hci_dev_lock(hdev);
-
- hci_adv_entries_clear(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct adv_entry *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- BT_DBG("%s adv cache cleared", hdev->name);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_add_adv_entry(struct hci_dev *hdev,
+ struct hci_ev_le_advertising_info *ev)
{
struct adv_entry *entry;
+ u8 flags = 0;
+ int i;
- list_for_each_entry(entry, &hdev->adv_entries, list)
- if (bacmp(bdaddr, &entry->bdaddr) == 0)
- return entry;
+ BT_DBG("");
- return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
+ if (!is_connectable_adv(ev->evt_type))
return -EINVAL;
+ if (ev->data && ev->length) {
+ for (i = 0; (i + 2) < ev->length; i++)
+ if (ev->data[i+1] == 0x01) {
+ flags = ev->data[i+2];
+ BT_DBG("flags: %2.2x", flags);
+ break;
+ } else {
+ i += ev->data[i];
+ }
+ }
+
+ entry = hci_find_adv_entry(hdev, &ev->bdaddr);
/* Only new entries should be added to adv_entries. So, if
* bdaddr was found, don't add it. */
- if (hci_find_adv_entry(hdev, &ev->bdaddr))
+ if (entry) {
+ entry->flags = flags;
return 0;
+ }
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
bacpy(&entry->bdaddr, &ev->bdaddr);
entry->bdaddr_type = ev->bdaddr_type;
+ entry->flags = flags;
+ write_lock(&hdev->adv_entries_lock);
list_add(&entry->list, &hdev->adv_entries);
-
- BT_DBG("%s adv entry added: address %s type %u", hdev->name,
- batostr(&entry->bdaddr), entry->bdaddr_type);
+ write_unlock(&hdev->adv_entries_lock);
return 0;
}
-static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
+static struct crypto_blkcipher *alloc_cypher(void)
{
- struct le_scan_params *param = (struct le_scan_params *) opt;
- struct hci_cp_le_set_scan_param cp;
+ if (enable_smp)
+ return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
- memset(&cp, 0, sizeof(cp));
- cp.type = param->type;
- cp.interval = cpu_to_le16(param->interval);
- cp.window = cpu_to_le16(param->window);
-
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
-}
-
-static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
-{
- struct hci_cp_le_set_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = 1;
-
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
- u16 window, int timeout)
-{
- long timeo = msecs_to_jiffies(3000);
- struct le_scan_params param;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
- return -EINPROGRESS;
-
- param.type = type;
- param.interval = interval;
- param.window = window;
-
- hci_req_lock(hdev);
-
- err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
- timeo);
- if (!err)
- err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
-
- hci_req_unlock(hdev);
-
- if (err < 0)
- return err;
-
- schedule_delayed_work(&hdev->le_scan_disable,
- msecs_to_jiffies(timeout));
-
- return 0;
-}
-
-static void le_scan_disable_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- le_scan_disable.work);
- struct hci_cp_le_set_scan_enable cp;
-
- BT_DBG("%s", hdev->name);
-
- memset(&cp, 0, sizeof(cp));
-
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static void le_scan_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
- struct le_scan_params *param = &hdev->le_scan_params;
-
- BT_DBG("%s", hdev->name);
-
- hci_do_le_scan(hdev, param->type, param->interval, param->window,
- param->timeout);
-}
-
-int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
- int timeout)
-{
- struct le_scan_params *param = &hdev->le_scan_params;
-
- BT_DBG("%s", hdev->name);
-
- if (work_busy(&hdev->le_scan))
- return -EINPROGRESS;
-
- param->type = type;
- param->interval = interval;
- param->window = window;
- param->timeout = timeout;
-
- queue_work(system_long_wq, &hdev->le_scan);
-
- return 0;
+ return ERR_PTR(-ENOTSUPP);
}
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head = &hci_dev_list, *p;
- int i, id, error;
+ int i, id;
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
+ hdev->bus, hdev->owner);
- if (!hdev->open || !hdev->close)
+ if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
- write_lock(&hci_dev_list_lock);
+ write_lock_bh(&hci_dev_list_lock);
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
@@ -1741,12 +1453,12 @@
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
- list_add_tail(&hdev->list, head);
+ list_add(&hdev->list, head);
- mutex_init(&hdev->lock);
+ atomic_set(&hdev->refcnt, 1);
+ spin_lock_init(&hdev->lock);
hdev->flags = 0;
- hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1756,16 +1468,19 @@
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- INIT_WORK(&hdev->rx_work, hci_rx_work);
- INIT_WORK(&hdev->cmd_work, hci_cmd_work);
- INIT_WORK(&hdev->tx_work, hci_tx_work);
-
+ tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
+ tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
+ tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
+ (unsigned long) hdev);
+ setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
+ (unsigned long) hdev);
for (i = 0; i < NUM_REASSEMBLY; i++)
hdev->reassembly[i] = NULL;
@@ -1773,49 +1488,43 @@
init_waitqueue_head(&hdev->req_wait_q);
mutex_init(&hdev->req_lock);
- discovery_init(hdev);
+ inquiry_cache_init(hdev);
hci_conn_hash_init(hdev);
-
- INIT_LIST_HEAD(&hdev->mgmt_pending);
+ hci_chan_list_init(hdev);
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys);
- INIT_LIST_HEAD(&hdev->long_term_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->adv_entries);
+ rwlock_init(&hdev->adv_entries_lock);
+ setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
- INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
-
- INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_WORK(&hdev->power_off, hci_power_off);
+ setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
- INIT_WORK(&hdev->le_scan, le_scan_work);
+ write_unlock_bh(&hci_dev_list_lock);
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ hdev->workqueue = create_singlethread_workqueue(hdev->name);
+ if (!hdev->workqueue)
+ goto nomem;
- write_unlock(&hci_dev_list_lock);
+ hdev->tfm = alloc_cypher();
+ if (IS_ERR(hdev->tfm))
+ BT_INFO("Failed to load transform for ecb(aes): %ld",
+ PTR_ERR(hdev->tfm));
- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
- if (!hdev->workqueue) {
- error = -ENOMEM;
- goto err;
- }
-
- error = hci_add_sysfs(hdev);
- if (error < 0)
- goto err_wqueue;
+ hci_register_sysfs(hdev);
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1826,54 +1535,49 @@
}
}
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
- set_bit(HCI_SETUP, &hdev->dev_flags);
- schedule_work(&hdev->power_on);
+ set_bit(HCI_AUTO_OFF, &hdev->flags);
+ set_bit(HCI_SETUP, &hdev->flags);
+ queue_work(hdev->workqueue, &hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
- hci_dev_hold(hdev);
return id;
-err_wqueue:
- destroy_workqueue(hdev->workqueue);
-err:
- write_lock(&hci_dev_list_lock);
+nomem:
+ write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock(&hci_dev_list_lock);
+ write_unlock_bh(&hci_dev_list_lock);
- return error;
+ return -ENOMEM;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
-void hci_unregister_dev(struct hci_dev *hdev)
+int hci_unregister_dev(struct hci_dev *hdev)
{
int i;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- set_bit(HCI_UNREGISTER, &hdev->dev_flags);
-
- write_lock(&hci_dev_list_lock);
+ write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock(&hci_dev_list_lock);
+ write_unlock_bh(&hci_dev_list_lock);
- hci_dev_do_close(hdev);
+ hci_dev_do_close(hdev, hdev->bus == HCI_SMD);
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->dev_flags)) {
- hci_dev_lock(hdev);
- mgmt_index_removed(hdev);
- hci_dev_unlock(hdev);
+ !test_bit(HCI_SETUP, &hdev->flags) &&
+ hdev->dev_type == HCI_BREDR) {
+ hci_dev_lock_bh(hdev);
+ mgmt_index_removed(hdev->id);
+ hci_dev_unlock_bh(hdev);
}
- /* mgmt_index_removed should take care of emptying the
- * pending list */
- BUG_ON(!list_empty(&hdev->mgmt_pending));
+ if (!IS_ERR(hdev->tfm))
+ crypto_free_blkcipher(hdev->tfm);
hci_notify(hdev, HCI_DEV_UNREG);
@@ -1882,22 +1586,28 @@
rfkill_destroy(hdev->rfkill);
}
- hci_del_sysfs(hdev);
+ hci_unregister_sysfs(hdev);
- cancel_delayed_work_sync(&hdev->adv_work);
+ /* Disable all timers */
+ hci_del_off_timer(hdev);
+ del_timer(&hdev->adv_timer);
+ del_timer(&hdev->cmd_timer);
+ del_timer(&hdev->disco_timer);
+ del_timer(&hdev->disco_le_timer);
destroy_workqueue(hdev->workqueue);
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
- hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ __hci_dev_put(hdev);
+
+ return 0;
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1933,8 +1643,9 @@
/* Time stamp */
__net_timestamp(skb);
+ /* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
- queue_work(hdev->workqueue, &hdev->rx_work);
+ tasklet_schedule(&hdev->rx_task);
return 0;
}
@@ -1985,7 +1696,7 @@
while (count) {
scb = (void *) skb->cb;
- len = min_t(uint, scb->expect, count);
+ len = min(scb->expect, (__u16)count);
memcpy(skb_put(skb, len), data, len);
@@ -2063,7 +1774,7 @@
data += (count - rem);
count = rem;
- }
+ };
return rem;
}
@@ -2098,7 +1809,7 @@
data += (count - rem);
count = rem;
- }
+ };
return rem;
}
@@ -2106,13 +1817,59 @@
/* ---- Interface to upper protocols ---- */
+/* Register/Unregister protocols.
+ * hci_task_lock is used to ensure that no tasks are running. */
+int hci_register_proto(struct hci_proto *hp)
+{
+ int err = 0;
+
+ BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
+
+ if (hp->id >= HCI_MAX_PROTO)
+ return -EINVAL;
+
+ write_lock_bh(&hci_task_lock);
+
+ if (!hci_proto[hp->id])
+ hci_proto[hp->id] = hp;
+ else
+ err = -EEXIST;
+
+ write_unlock_bh(&hci_task_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hci_register_proto);
+
+int hci_unregister_proto(struct hci_proto *hp)
+{
+ int err = 0;
+
+ BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
+
+ if (hp->id >= HCI_MAX_PROTO)
+ return -EINVAL;
+
+ write_lock_bh(&hci_task_lock);
+
+ if (hci_proto[hp->id])
+ hci_proto[hp->id] = NULL;
+ else
+ err = -ENOENT;
+
+ write_unlock_bh(&hci_task_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hci_unregister_proto);
+
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock(&hci_cb_list_lock);
+ write_lock_bh(&hci_cb_list_lock);
list_add(&cb->list, &hci_cb_list);
- write_unlock(&hci_cb_list_lock);
+ write_unlock_bh(&hci_cb_list_lock);
return 0;
}
@@ -2122,14 +1879,82 @@
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock(&hci_cb_list_lock);
+ write_lock_bh(&hci_cb_list_lock);
list_del(&cb->list);
- write_unlock(&hci_cb_list_lock);
+ write_unlock_bh(&hci_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);
+int hci_register_amp(struct amp_mgr_cb *cb)
+{
+ BT_DBG("%p", cb);
+
+ write_lock_bh(&_mgr_cb_list_lock);
+ list_add(&cb->list, &_mgr_cb_list);
+ write_unlock_bh(&_mgr_cb_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_register_amp);
+
+int hci_unregister_amp(struct amp_mgr_cb *cb)
+{
+ BT_DBG("%p", cb);
+
+ write_lock_bh(&_mgr_cb_list_lock);
+ list_del(&cb->list);
+ write_unlock_bh(&_mgr_cb_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_unregister_amp);
+
+void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
+ struct sk_buff *skb)
+{
+ struct amp_mgr_cb *cb;
+
+ BT_DBG("opcode 0x%x", opcode);
+
+ read_lock_bh(&_mgr_cb_list_lock);
+ list_for_each_entry(cb, &_mgr_cb_list, list) {
+ if (cb->amp_cmd_complete_event)
+ cb->amp_cmd_complete_event(hdev, opcode, skb);
+ }
+ read_unlock_bh(&_mgr_cb_list_lock);
+}
+
+void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
+{
+ struct amp_mgr_cb *cb;
+
+ BT_DBG("opcode 0x%x, status %d", opcode, status);
+
+ read_lock_bh(&_mgr_cb_list_lock);
+ list_for_each_entry(cb, &_mgr_cb_list, list) {
+ if (cb->amp_cmd_status_event)
+ cb->amp_cmd_status_event(hdev, opcode, status);
+ }
+ read_unlock_bh(&_mgr_cb_list_lock);
+}
+
+void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
+ struct sk_buff *skb)
+{
+ struct amp_mgr_cb *cb;
+
+ BT_DBG("ev_code 0x%x", ev_code);
+
+ read_lock_bh(&_mgr_cb_list_lock);
+ list_for_each_entry(cb, &_mgr_cb_list, list) {
+ if (cb->amp_event)
+ cb->amp_event(hdev, ev_code, skb);
+ }
+ read_unlock_bh(&_mgr_cb_list_lock);
+}
+
static int hci_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
@@ -2141,20 +1966,17 @@
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
- /* Time stamp */
- __net_timestamp(skb);
-
- /* Send copy to monitor */
- hci_send_to_monitor(hdev, skb);
-
if (atomic_read(&hdev->promisc)) {
- /* Send copy to the sockets */
- hci_send_to_sock(hdev, skb);
+ /* Time stamp */
+ __net_timestamp(skb);
+
+ hci_send_to_sock(hdev, skb, NULL);
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
+ hci_notify(hdev, HCI_DEV_WRITE);
return hdev->send(skb);
}
@@ -2189,10 +2011,11 @@
hdev->init_last_cmd = opcode;
skb_queue_tail(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ tasklet_schedule(&hdev->cmd_task);
return 0;
}
+EXPORT_SYMBOL(hci_send_cmd);
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
@@ -2225,18 +2048,27 @@
hdr->dlen = cpu_to_le16(len);
}
-static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
- struct sk_buff *skb, __u16 flags)
+void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
+ BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
+
+ skb->dev = (void *) hdev;
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ if (hdev->dev_type == HCI_BREDR)
+ hci_add_acl_hdr(skb, conn->handle, flags);
+ else
+ hci_add_acl_hdr(skb, chan->ll_handle, flags);
+
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
- skb_queue_tail(queue, skb);
+ skb_queue_tail(&conn->data_q, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -2244,11 +2076,10 @@
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock(&queue->lock);
+ spin_lock_bh(&conn->data_q.lock);
- __skb_queue_tail(queue, skb);
-
- flags &= ~ACL_START;
+ __skb_queue_tail(&conn->data_q, skb);
+ flags &= ~ACL_PB_MASK;
flags |= ACL_CONT;
do {
skb = list; list = list->next;
@@ -2259,27 +2090,13 @@
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
- __skb_queue_tail(queue, skb);
+ __skb_queue_tail(&conn->data_q, skb);
} while (list);
- spin_unlock(&queue->lock);
+ spin_unlock_bh(&conn->data_q.lock);
}
-}
-void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
-{
- struct hci_conn *conn = chan->conn;
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
-
- skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
-
- hci_queue_acl(conn, &chan->data_q, skb, flags);
-
- queue_work(hdev->workqueue, &hdev->tx_work);
+ tasklet_schedule(&hdev->tx_task);
}
EXPORT_SYMBOL(hci_send_acl);
@@ -2302,7 +2119,7 @@
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
- queue_work(hdev->workqueue, &hdev->tx_work);
+ tasklet_schedule(&hdev->tx_task);
}
EXPORT_SYMBOL(hci_send_sco);
@@ -2312,15 +2129,16 @@
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL, *c;
+ struct hci_conn *conn = NULL;
int num = 0, min = ~0;
+ struct list_head *p;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
+ list_for_each(p, &h->list) {
+ struct hci_conn *c;
+ c = list_entry(p, struct hci_conn, list);
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
@@ -2333,13 +2151,8 @@
min = c->sent;
conn = c;
}
-
- if (hci_conn_num(hdev, type) == num)
- break;
}
- rcu_read_unlock();
-
if (conn) {
int cnt, q;
@@ -2371,269 +2184,66 @@
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
+ struct list_head *p;
+ struct hci_conn *c;
BT_ERR("%s link tx timeout", hdev->name);
- rcu_read_lock();
-
/* Kill stalled connections */
- list_for_each_entry_rcu(c, &h->list, list) {
+ list_for_each(p, &h->list) {
+ c = list_entry(p, struct hci_conn, list);
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
}
-
- rcu_read_unlock();
-}
-
-static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
- int *quote)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_chan *chan = NULL;
- int num = 0, min = ~0, cur_prio = 0;
- struct hci_conn *conn;
- int cnt, q, conn_num = 0;
-
- BT_DBG("%s", hdev->name);
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(conn, &h->list, list) {
- struct hci_chan *tmp;
-
- if (conn->type != type)
- continue;
-
- if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
- continue;
-
- conn_num++;
-
- list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
- struct sk_buff *skb;
-
- if (skb_queue_empty(&tmp->data_q))
- continue;
-
- skb = skb_peek(&tmp->data_q);
- if (skb->priority < cur_prio)
- continue;
-
- if (skb->priority > cur_prio) {
- num = 0;
- min = ~0;
- cur_prio = skb->priority;
- }
-
- num++;
-
- if (conn->sent < min) {
- min = conn->sent;
- chan = tmp;
- }
- }
-
- if (hci_conn_num(hdev, type) == conn_num)
- break;
- }
-
- rcu_read_unlock();
-
- if (!chan)
- return NULL;
-
- switch (chan->conn->type) {
- case ACL_LINK:
- cnt = hdev->acl_cnt;
- break;
- case SCO_LINK:
- case ESCO_LINK:
- cnt = hdev->sco_cnt;
- break;
- case LE_LINK:
- cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
- break;
- default:
- cnt = 0;
- BT_ERR("Unknown link type");
- }
-
- q = cnt / num;
- *quote = q ? q : 1;
- BT_DBG("chan %p quote %d", chan, *quote);
- return chan;
-}
-
-static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn;
- int num = 0;
-
- BT_DBG("%s", hdev->name);
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(conn, &h->list, list) {
- struct hci_chan *chan;
-
- if (conn->type != type)
- continue;
-
- if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
- continue;
-
- num++;
-
- list_for_each_entry_rcu(chan, &conn->chan_list, list) {
- struct sk_buff *skb;
-
- if (chan->sent) {
- chan->sent = 0;
- continue;
- }
-
- if (skb_queue_empty(&chan->data_q))
- continue;
-
- skb = skb_peek(&chan->data_q);
- if (skb->priority >= HCI_PRIO_MAX - 1)
- continue;
-
- skb->priority = HCI_PRIO_MAX - 1;
-
- BT_DBG("chan %p skb %p promoted to %d", chan, skb,
- skb->priority);
- }
-
- if (hci_conn_num(hdev, type) == num)
- break;
- }
-
- rcu_read_unlock();
-
-}
-
-static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
-{
- /* Calculate count of blocks used by this packet */
- return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
-}
-
-static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
-{
- if (!test_bit(HCI_RAW, &hdev->flags)) {
- /* ACL tx timeout must be longer than maximum
- * link supervision timeout (40.9 seconds) */
- if (!cnt && time_after(jiffies, hdev->acl_last_tx +
- msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
- hci_link_tx_to(hdev, ACL_LINK);
- }
-}
-
-static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
-{
- unsigned int cnt = hdev->acl_cnt;
- struct hci_chan *chan;
- struct sk_buff *skb;
- int quote;
-
- __check_timeout(hdev, cnt);
-
- while (hdev->acl_cnt &&
- (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
- u32 priority = (skb_peek(&chan->data_q))->priority;
- while (quote-- && (skb = skb_peek(&chan->data_q))) {
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
-
- /* Stop if priority has changed */
- if (skb->priority < priority)
- break;
-
- skb = skb_dequeue(&chan->data_q);
-
- hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
-
- hci_send_frame(skb);
- hdev->acl_last_tx = jiffies;
-
- hdev->acl_cnt--;
- chan->sent++;
- chan->conn->sent++;
- }
- }
-
- if (cnt != hdev->acl_cnt)
- hci_prio_recalculate(hdev, ACL_LINK);
-}
-
-static inline void hci_sched_acl_blk(struct hci_dev *hdev)
-{
- unsigned int cnt = hdev->block_cnt;
- struct hci_chan *chan;
- struct sk_buff *skb;
- int quote;
-
- __check_timeout(hdev, cnt);
-
- while (hdev->block_cnt > 0 &&
- (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
- u32 priority = (skb_peek(&chan->data_q))->priority;
- while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
- int blocks;
-
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
-
- /* Stop if priority has changed */
- if (skb->priority < priority)
- break;
-
- skb = skb_dequeue(&chan->data_q);
-
- blocks = __get_blocks(hdev, skb);
- if (blocks > hdev->block_cnt)
- return;
-
- hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
-
- hci_send_frame(skb);
- hdev->acl_last_tx = jiffies;
-
- hdev->block_cnt -= blocks;
- quote -= blocks;
-
- chan->sent += blocks;
- chan->conn->sent += blocks;
- }
- }
-
- if (cnt != hdev->block_cnt)
- hci_prio_recalculate(hdev, ACL_LINK);
}
static inline void hci_sched_acl(struct hci_dev *hdev)
{
+ struct hci_conn *conn;
+ struct sk_buff *skb;
+ int quote;
+
BT_DBG("%s", hdev->name);
- if (!hci_conn_num(hdev, ACL_LINK))
- return;
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
+ /* ACL tx timeout must be longer than maximum
+ * link supervision timeout (40.9 seconds) */
+ if (hdev->acl_cnt <= 0 &&
+ time_after(jiffies, hdev->acl_last_tx + HZ * 45))
+ hci_link_tx_to(hdev, ACL_LINK);
+ }
- switch (hdev->flow_ctl_mode) {
- case HCI_FLOW_CTL_MODE_PACKET_BASED:
- hci_sched_acl_pkt(hdev);
- break;
+ while (hdev->acl_cnt > 0 &&
+ (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
+ while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
+ int count = 1;
- case HCI_FLOW_CTL_MODE_BLOCK_BASED:
- hci_sched_acl_blk(hdev);
- break;
+ BT_DBG("skb %p len %d", skb, skb->len);
+
+ if (hdev->flow_ctl_mode ==
+ HCI_BLOCK_BASED_FLOW_CTL_MODE)
+ /* Calculate count of blocks used by
+ * this packet
+ */
+ count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
+ hdev->data_block_len) + 1;
+
+ if (count > hdev->acl_cnt)
+ return;
+
+ hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+
+ hci_send_frame(skb);
+ hdev->acl_last_tx = jiffies;
+
+ hdev->acl_cnt -= count;
+ quote -= count;
+
+ conn->sent += count;
+ }
}
}
@@ -2646,9 +2256,6 @@
BT_DBG("%s", hdev->name);
- if (!hci_conn_num(hdev, SCO_LINK))
- return;
-
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2669,9 +2276,6 @@
BT_DBG("%s", hdev->name);
- if (!hci_conn_num(hdev, ESCO_LINK))
- return;
-
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2686,15 +2290,12 @@
static inline void hci_sched_le(struct hci_dev *hdev)
{
- struct hci_chan *chan;
+ struct hci_conn *conn;
struct sk_buff *skb;
- int quote, cnt, tmp;
+ int quote, cnt;
BT_DBG("%s", hdev->name);
- if (!hci_conn_num(hdev, LE_LINK))
- return;
-
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
@@ -2704,42 +2305,30 @@
}
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
- tmp = cnt;
- while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
- u32 priority = (skb_peek(&chan->data_q))->priority;
- while (quote-- && (skb = skb_peek(&chan->data_q))) {
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
-
- /* Stop if priority has changed */
- if (skb->priority < priority)
- break;
-
- skb = skb_dequeue(&chan->data_q);
+ while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
+ while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+ BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
hdev->le_last_tx = jiffies;
cnt--;
- chan->sent++;
- chan->conn->sent++;
+ conn->sent++;
}
}
-
if (hdev->le_pkts)
hdev->le_cnt = cnt;
else
hdev->acl_cnt = cnt;
-
- if (cnt != tmp)
- hci_prio_recalculate(hdev, LE_LINK);
}
-static void hci_tx_work(struct work_struct *work)
+static void hci_tx_task(unsigned long arg)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
+ struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
+ read_lock(&hci_task_lock);
+
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
@@ -2756,9 +2345,11 @@
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
+
+ read_unlock(&hci_task_lock);
}
-/* ----- HCI RX task (incoming data processing) ----- */
+/* ----- HCI RX task (incoming data proccessing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2782,19 +2373,16 @@
hci_dev_unlock(hdev);
if (conn) {
- hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
+ register struct hci_proto *hp;
- hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0,
- conn->dev_class);
- hci_dev_unlock(hdev);
+ hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
/* Send to upper protocol */
- l2cap_recv_acldata(conn, skb, flags);
- return;
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->recv_acldata) {
+ hp->recv_acldata(conn, skb, flags);
+ return;
+ }
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
hdev->name, handle);
@@ -2823,9 +2411,14 @@
hci_dev_unlock(hdev);
if (conn) {
+ register struct hci_proto *hp;
+
/* Send to upper protocol */
- sco_recv_scodata(conn, skb);
- return;
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->recv_scodata) {
+ hp->recv_scodata(conn, skb);
+ return;
+ }
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
hdev->name, handle);
@@ -2834,20 +2427,19 @@
kfree_skb(skb);
}
-static void hci_rx_work(struct work_struct *work)
+static void hci_rx_task(unsigned long arg)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
+ struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
- while ((skb = skb_dequeue(&hdev->rx_q))) {
- /* Send copy to monitor */
- hci_send_to_monitor(hdev, skb);
+ read_lock(&hci_task_lock);
+ while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
}
if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -2868,7 +2460,6 @@
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
- BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
break;
@@ -2887,11 +2478,13 @@
break;
}
}
+
+ read_unlock(&hci_task_lock);
}
-static void hci_cmd_work(struct work_struct *work)
+static void hci_cmd_task(unsigned long arg)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
+ struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2908,44 +2501,14 @@
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
- if (test_bit(HCI_RESET, &hdev->flags))
- del_timer(&hdev->cmd_timer);
- else
- mod_timer(&hdev->cmd_timer,
+ mod_timer(&hdev->cmd_timer,
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ tasklet_schedule(&hdev->cmd_task);
}
}
}
-int hci_do_inquiry(struct hci_dev *hdev, u8 length)
-{
- /* General inquiry access code (GIAC) */
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_INQUIRY, &hdev->flags))
- return -EINPROGRESS;
-
- inquiry_cache_flush(hdev);
-
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, sizeof(cp.lap));
- cp.length = length;
-
- return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
-}
-
-int hci_cancel_inquiry(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
-
- if (!test_bit(HCI_INQUIRY, &hdev->flags))
- return -EPERM;
-
- return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-}
+module_param(enable_smp, bool, 0644);
+MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
old mode 100755
new mode 100644
index 626318c..2b14423
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+ Copyright (c) 2000-2001, 2010-2012, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -35,8 +35,10 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
@@ -51,19 +53,11 @@
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status) {
- hci_dev_lock(hdev);
- mgmt_stop_discovery_failed(hdev, status);
- hci_dev_unlock(hdev);
+ if (status)
return;
- }
clear_bit(HCI_INQUIRY, &hdev->flags);
- hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- hci_dev_unlock(hdev);
-
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
hci_conn_check_pending(hdev);
@@ -78,9 +72,36 @@
if (status)
return;
+ clear_bit(HCI_INQUIRY, &hdev->flags);
+
hci_conn_check_pending(hdev);
}
+static void hci_cc_link_key_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_link_key_reply *rp = (void *) skb->data;
+ struct hci_conn *conn;
+ struct hci_cp_link_key_reply *cp;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ if (rp->status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LINK_KEY_REPLY);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn) {
+ hci_conn_hold(conn);
+ memcpy(conn->link_key, cp->link_key, sizeof(conn->link_key));
+ conn->key_type = 5;
+ hci_conn_put(conn);
+ }
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
@@ -190,11 +211,6 @@
clear_bit(HCI_RESET, &hdev->flags);
hci_req_complete(hdev, HCI_OP_RESET, status);
-
- /* Reset all non-persistent flags */
- hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
-
- hdev->discovery.state = DISCOVERY_STOPPED;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -207,17 +223,13 @@
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
return;
-
hci_dev_lock(hdev);
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_set_local_name_complete(hdev, sent, status);
- else if (!status)
+ if (!status)
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_set_local_name_complete(hdev->id, sent, status);
hci_dev_unlock(hdev);
-
- hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -229,8 +241,7 @@
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
- memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
+ memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -253,9 +264,6 @@
clear_bit(HCI_AUTH, &hdev->flags);
}
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_auth_enable_complete(hdev, status);
-
hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
}
@@ -284,8 +292,7 @@
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- __u8 param, status = *((__u8 *) skb->data);
- int old_pscan, old_iscan;
+ __u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -294,40 +301,30 @@
if (!sent)
return;
- param = *((__u8 *) sent);
+ if (!status) {
+ __u8 param = *((__u8 *) sent);
+ int old_pscan, old_iscan;
+ hci_dev_lock(hdev);
- hci_dev_lock(hdev);
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
- if (status != 0) {
- mgmt_write_scan_failed(hdev, param, status);
- hdev->discov_timeout = 0;
- goto done;
+ if (param & SCAN_INQUIRY) {
+ set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev->id, 1);
+ } else if (old_iscan)
+ mgmt_discoverable(hdev->id, 0);
+
+ if (param & SCAN_PAGE) {
+ set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev->id, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev->id, 0);
+ hci_dev_unlock(hdev);
}
- old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
- old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
-
- if (param & SCAN_INQUIRY) {
- set_bit(HCI_ISCAN, &hdev->flags);
- if (!old_iscan)
- mgmt_discoverable(hdev, 1);
- if (hdev->discov_timeout > 0) {
- int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
- queue_delayed_work(hdev->workqueue, &hdev->discov_off,
- to);
- }
- } else if (old_iscan)
- mgmt_discoverable(hdev, 0);
-
- if (param & SCAN_PAGE) {
- set_bit(HCI_PSCAN, &hdev->flags);
- if (!old_pscan)
- mgmt_connectable(hdev, 1);
- } else if (old_pscan)
- mgmt_connectable(hdev, 0);
-
-done:
- hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
}
@@ -353,19 +350,14 @@
BT_DBG("%s status 0x%x", hdev->name, status);
+ if (status)
+ return;
+
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
return;
- hci_dev_lock(hdev);
-
- if (status == 0)
- memcpy(hdev->dev_class, sent, 3);
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_set_class_of_dev_complete(hdev, sent, status);
-
- hci_dev_unlock(hdev);
+ memcpy(hdev->dev_class, sent, 3);
}
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -387,8 +379,11 @@
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
- if (hdev->notify)
+ if (hdev->notify) {
+ tasklet_disable(&hdev->tx_task);
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
+ tasklet_enable(&hdev->tx_task);
+ }
}
static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -415,8 +410,11 @@
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
- if (hdev->notify)
+ if (hdev->notify) {
+ tasklet_disable(&hdev->tx_task);
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
+ tasklet_enable(&hdev->tx_task);
+ }
}
static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -428,6 +426,18 @@
hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
}
+static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->ssp_mode = rp->mode;
+}
+
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -435,18 +445,14 @@
BT_DBG("%s status 0x%x", hdev->name, status);
+ if (status)
+ return;
+
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
return;
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
- else if (!status) {
- if (*((u8 *) sent))
- set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
- else
- clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
- }
+ hdev->ssp_mode = *((__u8 *) sent);
}
static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
@@ -493,16 +499,16 @@
* command otherwise */
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
- /* CSR 1.1 dongles does not accept any bitfield so don't try to set
- * any event mask for pre 1.2 devices */
- if (hdev->hci_ver < BLUETOOTH_VER_1_2)
- return;
+ BT_DBG("");
- events[4] |= 0x01; /* Flow Specification Complete */
- events[4] |= 0x02; /* Inquiry Result with RSSI */
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
+ /* Events for 1.2 and newer controllers */
+ if (hdev->lmp_ver > 1) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
if (hdev->features[3] & LMP_RSSI_INQ)
events[4] |= 0x04; /* Inquiry Result with RSSI */
@@ -543,27 +549,12 @@
static void hci_setup(struct hci_dev *hdev)
{
- if (hdev->dev_type != HCI_BREDR)
- return;
-
- hci_setup_event_mask(hdev);
-
- if (hdev->hci_ver > BLUETOOTH_VER_1_1)
+ if (hdev->lmp_ver > 1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
- u8 mode = 0x01;
- hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
- sizeof(mode), &mode);
- } else {
- struct hci_cp_write_eir cp;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
- memset(&cp, 0, sizeof(cp));
-
- hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
- }
+ u8 mode = 0x01;
+ hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
}
if (hdev->features[3] & LMP_RSSI_INQ)
@@ -571,20 +562,6 @@
if (hdev->features[7] & LMP_INQ_TX_PWR)
hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
-
- if (hdev->features[7] & LMP_EXTFEATURES) {
- struct hci_cp_read_local_ext_features cp;
-
- cp.page = 0x01;
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
- &cp);
- }
-
- if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
- u8 enable = 1;
- hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
- &enable);
- }
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -594,7 +571,7 @@
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
- goto done;
+ return;
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
@@ -606,11 +583,8 @@
hdev->manufacturer,
hdev->hci_ver, hdev->hci_rev);
- if (test_bit(HCI_INIT, &hdev->flags))
+ if (hdev->dev_type == HCI_BREDR && test_bit(HCI_INIT, &hdev->flags))
hci_setup(hdev);
-
-done:
- hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
}
static void hci_setup_link_policy(struct hci_dev *hdev)
@@ -627,8 +601,8 @@
link_policy |= HCI_LP_PARK;
link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
- &link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ sizeof(link_policy), &link_policy);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -660,6 +634,23 @@
memcpy(hdev->features, rp->features, 8);
+ if (hdev->dev_type == HCI_BREDR && test_bit(HCI_INIT, &hdev->flags)) {
+ if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ u8 mode = 0x01;
+ hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode);
+ }
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ hci_setup_inquiry_mode(hdev);
+
+ if (hdev->features[7] & LMP_INQ_TX_PWR)
+ hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
+ 0, NULL);
+
+ hci_setup_event_mask(hdev);
+ }
+
/* Adjust default settings according to features
* supported by device. */
@@ -704,50 +695,8 @@
hdev->features[6], hdev->features[7]);
}
-static void hci_set_le_support(struct hci_dev *hdev)
-{
- struct hci_cp_write_le_host_supported cp;
-
- memset(&cp, 0, sizeof(cp));
-
- if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
- cp.le = 1;
- cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
- }
-
- if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
- hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
- &cp);
-}
-
-static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
- if (rp->status)
- goto done;
-
- switch (rp->page) {
- case 0:
- memcpy(hdev->features, rp->features, 8);
- break;
- case 1:
- memcpy(hdev->host_features, rp->features, 8);
- break;
- }
-
- if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
- hci_set_le_support(hdev);
-
-done:
- hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
-}
-
static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
@@ -757,8 +706,6 @@
return;
hdev->flow_ctl_mode = rp->mode;
-
- hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
}
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -770,18 +717,20 @@
if (rp->status)
return;
- hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
- hdev->sco_mtu = rp->sco_mtu;
- hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
- hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
+ if (hdev->flow_ctl_mode == HCI_PACKET_BASED_FLOW_CTL_MODE) {
+ hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
+ hdev->sco_mtu = rp->sco_mtu;
+ hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
+ hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
+ hdev->acl_cnt = hdev->acl_pkts;
+ hdev->sco_cnt = hdev->sco_pkts;
+ }
if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
hdev->sco_mtu = 64;
hdev->sco_pkts = 8;
}
- hdev->acl_cnt = hdev->acl_pkts;
- hdev->sco_cnt = hdev->sco_pkts;
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
hdev->acl_mtu, hdev->acl_pkts,
@@ -800,28 +749,6 @@
hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
}
-static void hci_cc_read_data_block_size(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_read_data_block_size *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
- if (rp->status)
- return;
-
- hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
- hdev->block_len = __le16_to_cpu(rp->block_len);
- hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
-
- hdev->block_cnt = hdev->num_blocks;
-
- BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
- hdev->block_cnt, hdev->block_len);
-
- hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
-}
-
static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -831,8 +758,33 @@
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ if (hdev->flow_ctl_mode == HCI_BLOCK_BASED_FLOW_CTL_MODE) {
+ hdev->acl_mtu = __le16_to_cpu(rp->max_acl_len);
+ hdev->sco_mtu = 0;
+ hdev->data_block_len = __le16_to_cpu(rp->data_block_len);
+ /* acl_pkts indicates the number of blocks */
+ hdev->acl_pkts = __le16_to_cpu(rp->num_blocks);
+ hdev->sco_pkts = 0;
+ hdev->acl_cnt = hdev->acl_pkts;
+ hdev->sco_cnt = 0;
+ }
+
+ BT_DBG("%s acl mtu %d:%d, data block len %d", hdev->name,
+ hdev->acl_mtu, hdev->acl_cnt, hdev->data_block_len);
+}
+
static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
@@ -910,11 +862,10 @@
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
if (rp->status != 0)
goto unlock;
@@ -926,7 +877,6 @@
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (conn)
conn->pin_length = cp->pin_len;
-
unlock:
hci_dev_unlock(hdev);
}
@@ -936,16 +886,13 @@
struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
rp->status);
-
hci_dev_unlock(hdev);
}
-
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -971,13 +918,11 @@
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
- rp->status);
-
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+ rp->status);
hci_dev_unlock(hdev);
}
@@ -987,45 +932,27 @@
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
- ACL_LINK, 0, rp->status);
-
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+ rp->status);
hci_dev_unlock(hdev);
}
-static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+ struct hci_conn *conn;
+ struct hci_rp_read_rssi *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
- hci_dev_lock(hdev);
+ BT_DBG("%s rssi : %d handle : %d", hdev->name, rp->rssi, rp->handle);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
- 0, rp->status);
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
- hci_dev_lock(hdev);
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
- ACL_LINK, 0, rp->status);
-
- hci_dev_unlock(hdev);
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
+ if (conn)
+ mgmt_read_rssi_complete(hdev->id, rp->rssi, &conn->dst,
+ __le16_to_cpu(rp->handle), rp->status);
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -1034,86 +961,13 @@
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
hci_dev_lock(hdev);
- mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
+
+ mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
rp->randomizer, rp->status);
hci_dev_unlock(hdev);
}
-static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
-{
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
-
- if (status) {
- hci_dev_lock(hdev);
- mgmt_start_discovery_failed(hdev, status);
- hci_dev_unlock(hdev);
- return;
- }
-}
-
-static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_cp_le_set_scan_enable *cp;
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
- if (!cp)
- return;
-
- switch (cp->enable) {
- case LE_SCANNING_ENABLED:
- hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
-
- if (status) {
- hci_dev_lock(hdev);
- mgmt_start_discovery_failed(hdev, status);
- hci_dev_unlock(hdev);
- return;
- }
-
- set_bit(HCI_LE_SCAN, &hdev->dev_flags);
-
- cancel_delayed_work_sync(&hdev->adv_work);
-
- hci_dev_lock(hdev);
- hci_adv_entries_clear(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
- hci_dev_unlock(hdev);
- break;
-
- case LE_SCANNING_DISABLED:
- if (status)
- return;
-
- clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
-
- schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
-
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
- mgmt_interleaved_discovery(hdev);
- } else {
- hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- hci_dev_unlock(hdev);
- }
-
- break;
-
- default:
- BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
- break;
- }
-}
-
static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
@@ -1138,30 +992,26 @@
hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
}
-static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_cp_write_le_host_supported *sent;
+ void *sent;
+ __u8 param_scan_enable;
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ if (status)
+ return;
- sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!sent)
return;
- if (!status) {
- if (sent->le)
- hdev->host_features[0] |= LMP_HOST_LE;
- else
- hdev->host_features[0] &= ~LMP_HOST_LE;
+ param_scan_enable = *((__u8 *) sent);
+ if (param_scan_enable == 0x01) {
+ del_timer(&hdev->adv_timer);
+ } else if (param_scan_enable == 0x00) {
+ mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
}
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_bit(HCI_INIT, &hdev->flags))
- mgmt_le_enable_complete(hdev, sent->le, status);
-
- hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
}
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -1170,19 +1020,15 @@
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
+
hci_conn_check_pending(hdev);
+ } else {
+ set_bit(HCI_INQUIRY, &hdev->flags);
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_start_discovery_failed(hdev, status);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_inquiry_started(hdev->id);
hci_dev_unlock(hdev);
- return;
}
-
- set_bit(HCI_INQUIRY, &hdev->flags);
-
- hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
- hci_dev_unlock(hdev);
}
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1215,7 +1061,7 @@
if (!conn) {
conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
if (conn) {
- conn->out = true;
+ conn->out = 1;
conn->link_mode |= HCI_LM_MASTER;
} else
BT_ERR("No memory for new connection");
@@ -1267,9 +1113,6 @@
BT_DBG("%s status 0x%x", hdev->name, status);
- if (!status)
- return;
-
cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
if (!cp)
return;
@@ -1278,10 +1121,27 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- if (conn->state == BT_CONFIG) {
- hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ if (status) {
+ mgmt_auth_failed(hdev->id, &conn->dst, status);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+
+ if (conn->state == BT_CONFIG) {
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, status);
+ hci_conn_put(conn);
+ } else {
+ hci_auth_cfm(conn, status);
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_put(conn);
+ }
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ hci_encrypt_cfm(conn, status, 0x00);
+ }
}
+ conn->auth_initiator = 1;
}
hci_dev_unlock(hdev);
@@ -1324,82 +1184,15 @@
return 0;
/* Only request authentication for SSP connections or non-SSP
- * devices with sec_level HIGH or if MITM protection is requested */
- if (!hci_conn_ssp_enabled(conn) &&
- conn->pending_sec_level != BT_SECURITY_HIGH &&
- !(conn->auth_type & 0x01))
+ * devices with sec_level >= BT_SECURITY_MEDIUM*/
+ BT_DBG("Pending sec level is %d", conn->pending_sec_level);
+ if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
+ conn->pending_sec_level < BT_SECURITY_MEDIUM)
return 0;
return 1;
}
-static inline int hci_resolve_name(struct hci_dev *hdev,
- struct inquiry_entry *e)
-{
- struct hci_cp_remote_name_req cp;
-
- memset(&cp, 0, sizeof(cp));
-
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- cp.pscan_rep_mode = e->data.pscan_rep_mode;
- cp.pscan_mode = e->data.pscan_mode;
- cp.clock_offset = e->data.clock_offset;
-
- return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
-}
-
-static bool hci_resolve_next_name(struct hci_dev *hdev)
-{
- struct discovery_state *discov = &hdev->discovery;
- struct inquiry_entry *e;
-
- if (list_empty(&discov->resolve))
- return false;
-
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
- if (hci_resolve_name(hdev, e) == 0) {
- e->name_state = NAME_PENDING;
- return true;
- }
-
- return false;
-}
-
-static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
- bdaddr_t *bdaddr, u8 *name, u8 name_len)
-{
- struct discovery_state *discov = &hdev->discovery;
- struct inquiry_entry *e;
-
- if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
- name_len, conn->dev_class);
-
- if (discov->state == DISCOVERY_STOPPED)
- return;
-
- if (discov->state == DISCOVERY_STOPPING)
- goto discov_complete;
-
- if (discov->state != DISCOVERY_RESOLVING)
- return;
-
- e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
- if (e) {
- e->name_state = NAME_KNOWN;
- list_del(&e->list);
- if (name)
- mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
- e->data.rssi, name, name_len);
- }
-
- if (hci_resolve_next_name(hdev))
- return;
-
-discov_complete:
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-}
-
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_remote_name_req *cp;
@@ -1419,23 +1212,12 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
-
- if (!conn)
- goto unlock;
-
- if (!hci_outgoing_auth_needed(hdev, conn))
- goto unlock;
-
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ if (conn && hci_outgoing_auth_needed(hdev, conn)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -1546,9 +1328,9 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, status);
}
@@ -1573,41 +1355,20 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, status);
}
hci_dev_unlock(hdev);
}
-static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
-{
- struct hci_cp_disconnect *cp;
- struct hci_conn *conn;
-
- if (!status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
- if (conn)
- mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
-
- hci_dev_unlock(hdev);
-}
-
static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_le_create_conn *cp;
struct hci_conn *conn;
+ unsigned long exp = msecs_to_jiffies(5000);
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -1630,14 +1391,148 @@
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
- if (conn) {
- conn->dst_type = cp->peer_addr_type;
- conn->out = true;
- } else {
+ conn = hci_le_conn_add(hdev, &cp->peer_addr,
+ cp->peer_addr_type);
+ if (conn)
+ conn->out = 1;
+ else
BT_ERR("No memory for new connection");
- }
+ } else
+ exp = msecs_to_jiffies(conn->conn_timeout * 1000);
+
+ if (conn && exp)
+ mod_timer(&conn->disc_timer, jiffies + exp);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_accept_logical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_create_logical_link *ap;
+ struct hci_chan *chan;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ ap = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_LOGICAL_LINK);
+ if (!ap)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_id(hdev, ap->phy_handle);
+
+ BT_DBG("%s chan %p", hdev->name, chan);
+
+ if (status) {
+ if (chan && chan->state == BT_CONNECT) {
+ chan->state = BT_CLOSED;
+ hci_proto_create_cfm(chan, status);
}
+ } else if (chan) {
+ chan->state = BT_CONNECT2;
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_create_logical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_create_logical_link *cp;
+ struct hci_chan *chan;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_LOGICAL_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_id(hdev, cp->phy_handle);
+
+ BT_DBG("%s chan %p", hdev->name, chan);
+
+ if (status) {
+ if (chan && chan->state == BT_CONNECT) {
+ chan->state = BT_CLOSED;
+ hci_proto_create_cfm(chan, status);
+ }
+ } else if (chan)
+ chan->state = BT_CONNECT2;
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_flow_spec_modify(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_flow_spec_modify *cp;
+ struct hci_chan *chan;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_FLOW_SPEC_MODIFY);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, cp->log_handle);
+ if (chan) {
+ if (status)
+ hci_proto_modify_cfm(chan, status);
+ else {
+ chan->tx_fs = cp->tx_fs;
+ chan->rx_fs = cp->rx_fs;
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_disconn_logical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_disconn_logical_link *cp;
+ struct hci_chan *chan;
+
+ if (!status)
+ return;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONN_LOGICAL_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, cp->log_handle);
+ if (chan)
+ hci_chan_del(chan);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_disconn_physical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_disconn_phys_link *cp;
+ struct hci_conn *conn;
+
+ if (!status)
+ return;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONN_PHYS_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+ if (conn) {
+ conn->state = BT_CLOSED;
+ hci_conn_del(conn);
}
hci_dev_unlock(hdev);
@@ -1651,41 +1546,21 @@
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- struct discovery_state *discov = &hdev->discovery;
- struct inquiry_entry *e;
BT_DBG("%s status %d", hdev->name, status);
+ if (!hdev->disco_state)
+ clear_bit(HCI_INQUIRY, &hdev->flags);
+
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
-
- hci_conn_check_pending(hdev);
-
- if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
- return;
-
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
- return;
-
hci_dev_lock(hdev);
- if (discov->state != DISCOVERY_FINDING)
- goto unlock;
-
- if (list_empty(&discov->resolve)) {
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
- }
-
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
- if (e && hci_resolve_name(hdev, e) == 0) {
- e->name_state = NAME_PENDING;
- hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
- } else {
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- }
-
-unlock:
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_inquiry_complete_evt(hdev->id, status);
hci_dev_unlock(hdev);
+
+ if (!lmp_le_capable(hdev))
+ hci_conn_check_pending(hdev);
}
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1702,8 +1577,6 @@
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
- bool name_known, ssp;
-
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -1712,11 +1585,9 @@
data.clock_offset = info->clock_offset;
data.rssi = 0x00;
data.ssp_mode = 0x00;
-
- name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
- mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, 0, !name_known, ssp, NULL,
- 0);
+ hci_inquiry_cache_update(hdev, &data);
+ mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
+ info->dev_class, 0, 0, NULL);
}
hci_dev_unlock(hdev);
@@ -1750,6 +1621,11 @@
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr, 0);
+ } else if (conn->type == LE_LINK) {
+ conn->state = BT_CONNECTED;
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr, 1);
} else
conn->state = BT_CONNECTED;
@@ -1762,27 +1638,26 @@
if (test_bit(HCI_ENCRYPT, &hdev->flags))
conn->link_mode |= HCI_LM_ENCRYPT;
- /* Get remote features */
+ /* Get remote version */
if (conn->type == ACL_LINK) {
- struct hci_cp_read_remote_features cp;
+ struct hci_cp_read_remote_version cp;
cp.handle = ev->handle;
- hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_READ_REMOTE_VERSION,
+ sizeof(cp), &cp);
}
/* Set packet type for incoming connection */
- if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
+ if (!conn->out && hdev->hci_ver < 3) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
- &cp);
+ hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
+ sizeof(cp), &cp);
}
} else {
conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK)
- mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
- conn->dst_type, ev->status);
+ if (conn->type == ACL_LINK || conn->type == LE_LINK)
+ mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
}
if (conn->type == ACL_LINK)
@@ -1843,6 +1718,8 @@
}
memcpy(conn->dev_class, ev->dev_class, 3);
+ /* For incoming connection update remote class to userspace */
+ mgmt_remote_class(hdev->id, &ev->bdaddr, ev->dev_class);
conn->state = BT_CONNECT;
hci_dev_unlock(hdev);
@@ -1858,8 +1735,8 @@
else
cp.role = 0x01; /* Remain slave */
- hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
- &cp);
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
+ sizeof(cp), &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
@@ -1868,19 +1745,19 @@
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.max_latency = cpu_to_le16(0x000A);
cp.content_format = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.retrans_effort = 0x01;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
} else {
/* Connection rejected */
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = HCI_ERROR_REJ_BAD_ADDR;
+ cp.reason = 0x0f;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
@@ -1892,31 +1769,29 @@
BT_DBG("%s status %d", hdev->name, ev->status);
+ if (ev->status) {
+ hci_dev_lock(hdev);
+ mgmt_disconnect_failed(hdev->id);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
- if (ev->status == 0)
- conn->state = BT_CLOSED;
+ conn->state = BT_CLOSED;
- if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
- (conn->type == ACL_LINK || conn->type == LE_LINK)) {
- if (ev->status != 0)
- mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, ev->status);
- else
- mgmt_device_disconnected(hdev, &conn->dst, conn->type,
- conn->dst_type);
- }
+ if (conn->type == ACL_LINK || conn->type == LE_LINK)
+ mgmt_disconnected(hdev->id, &conn->dst);
- if (ev->status == 0) {
- if (conn->type == ACL_LINK && conn->flush_key)
- hci_remove_link_key(hdev, &conn->dst);
- hci_proto_disconn_cfm(conn, ev->reason);
- hci_conn_del(conn);
- }
+ if (conn->type == LE_LINK)
+ del_timer(&conn->smp_timer);
+
+ hci_proto_disconn_cfm(conn, ev->reason, 0);
+ hci_conn_del(conn);
unlock:
hci_dev_unlock(hdev);
@@ -1932,59 +1807,91 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (!conn)
- goto unlock;
+ if (conn) {
+ if (ev->status == 0x06 && hdev->ssp_mode > 0 &&
+ conn->ssp_mode > 0) {
+ struct hci_cp_auth_requested cp;
+ hci_remove_link_key(hdev, &conn->dst);
+ cp.handle = cpu_to_le16(conn->handle);
+ /*Initiates dedicated bonding as pin or key is missing
+ on remote device*/
+ /*In case if remote device is ssp supported,
+ reduce the security level to MEDIUM if it is HIGH*/
+ if (conn->ssp_mode && conn->auth_initiator &&
+ conn->io_capability != 0x03) {
+ conn->pending_sec_level = BT_SECURITY_HIGH;
+ conn->auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+ }
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+ hci_dev_unlock(hdev);
+ BT_INFO("Pin or key missing");
+ return;
+ }
- if (!ev->status) {
- if (!hci_conn_ssp_enabled(conn) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
- BT_INFO("re-auth of legacy device is not possible.");
- } else {
+ if (!ev->status) {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
- }
- } else {
- mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
- ev->status);
- }
-
- clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
- clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
-
- if (conn->state == BT_CONFIG) {
- if (!ev->status && hci_conn_ssp_enabled(conn)) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
} else {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
+ mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ conn->sec_level = BT_SECURITY_LOW;
+ }
+
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hdev->ssp_mode > 0 &&
+ conn->ssp_mode > 0) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ } else {
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_put(conn);
+ }
+ } else {
+ hci_auth_cfm(conn, ev->status);
+
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_put(conn);
}
- } else {
- hci_auth_cfm(conn, ev->status);
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_put(conn);
- }
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ if (!ev->status) {
+ if (conn->link_mode & HCI_LM_ENCRYPT) {
+ /* Encryption implies authentication */
+ conn->link_mode |= HCI_LM_AUTH;
+ conn->link_mode |= HCI_LM_ENCRYPT;
+ conn->sec_level =
+ conn->pending_sec_level;
+ clear_bit(HCI_CONN_ENCRYPT_PEND,
+ &conn->pend);
+ hci_encrypt_cfm(conn, ev->status, 1);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
- if (!ev->status) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
- } else {
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
- hci_encrypt_cfm(conn, ev->status, 0x00);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_encrypt_change(hdev->id,
+ &conn->dst,
+ ev->status);
+
+ } else {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev,
+ HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ }
+ } else {
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ hci_encrypt_cfm(conn, ev->status, 0x00);
+ }
}
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -1999,31 +1906,16 @@
hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_remote_name(hdev->id, &ev->bdaddr, ev->status, ev->name);
+
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
- goto check_auth;
-
- if (ev->status == 0)
- hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
- strnlen(ev->name, HCI_MAX_NAME_LENGTH));
- else
- hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
-
-check_auth:
- if (!conn)
- goto unlock;
-
- if (!hci_outgoing_auth_needed(hdev, conn))
- goto unlock;
-
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ if (conn && hci_outgoing_auth_needed(hdev, conn)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -2048,13 +1940,7 @@
conn->link_mode &= ~HCI_LM_ENCRYPT;
}
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
-
- if (ev->status && conn->state == BT_CONNECTED) {
- hci_acl_disconn(conn, 0x13);
- hci_conn_put(conn);
- goto unlock;
- }
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
if (conn->state == BT_CONFIG) {
if (!ev->status)
@@ -2062,11 +1948,30 @@
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
- } else
- hci_encrypt_cfm(conn, ev->status, ev->encrypt);
+ } else {
+ /*
+ * If the remote device does not support
+ * Pause Encryption, usually during the
+ * roleSwitch we see Encryption disable
+ * for short duration. Allow remote device
+ * to disable encryption
+ * for short duration in this case.
+ */
+ if ((ev->encrypt == 0) && (ev->status == 0) &&
+ ((conn->features[5] & LMP_PAUSE_ENC) == 0)) {
+ mod_timer(&conn->encrypt_pause_timer,
+ jiffies + msecs_to_jiffies(500));
+ BT_INFO("enc pause timer, enc_pend_flag set");
+ } else {
+ del_timer(&conn->encrypt_pause_timer);
+ hci_encrypt_cfm(conn, ev->status, ev->encrypt);
+ }
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_encrypt_change(hdev->id, &conn->dst, ev->status);
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -2084,7 +1989,7 @@
if (!ev->status)
conn->link_mode |= HCI_LM_SECURE;
- clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
hci_key_change_cfm(conn, ev->status);
}
@@ -2105,8 +2010,10 @@
if (!conn)
goto unlock;
- if (!ev->status)
+ if (!ev->status) {
memcpy(conn->features, ev->features, 8);
+ mgmt_remote_features(hdev->id, &conn->dst, ev->features);
+ }
if (conn->state != BT_CONFIG)
goto unlock;
@@ -2118,18 +2025,18 @@
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
sizeof(cp), &cp);
goto unlock;
+ } else if (!(lmp_ssp_capable(conn)) && conn->auth_initiator &&
+ (conn->pending_sec_level == BT_SECURITY_HIGH)) {
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
}
- if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
+ if (!ev->status) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0,
- conn->dev_class);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -2143,7 +2050,24 @@
static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
- BT_DBG("%s", hdev->name);
+ struct hci_ev_remote_version *ev = (void *) skb->data;
+ struct hci_cp_read_remote_features cp;
+ struct hci_conn *conn;
+ BT_DBG("%s status %d", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+ cp.handle = ev->handle;
+ hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
+ sizeof(cp), &cp);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+ if (!conn)
+ goto unlock;
+ if (!ev->status)
+ mgmt_remote_version(hdev->id, &conn->dst, ev->lmp_ver,
+ ev->manufacturer, ev->lmp_subver);
+unlock:
+ hci_dev_unlock(hdev);
}
static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2169,6 +2093,10 @@
hci_cc_exit_periodic_inq(hdev, skb);
break;
+ case HCI_OP_LINK_KEY_REPLY:
+ hci_cc_link_key_reply(hdev, skb);
+ break;
+
case HCI_OP_REMOTE_NAME_REQ_CANCEL:
hci_cc_remote_name_req_cancel(hdev, skb);
break;
@@ -2237,6 +2165,10 @@
hci_cc_host_buffer_size(hdev, skb);
break;
+ case HCI_OP_READ_SSP_MODE:
+ hci_cc_read_ssp_mode(hdev, skb);
+ break;
+
case HCI_OP_WRITE_SSP_MODE:
hci_cc_write_ssp_mode(hdev, skb);
break;
@@ -2253,10 +2185,6 @@
hci_cc_read_local_features(hdev, skb);
break;
- case HCI_OP_READ_LOCAL_EXT_FEATURES:
- hci_cc_read_local_ext_features(hdev, skb);
- break;
-
case HCI_OP_READ_BUFFER_SIZE:
hci_cc_read_buffer_size(hdev, skb);
break;
@@ -2265,10 +2193,6 @@
hci_cc_read_bd_addr(hdev, skb);
break;
- case HCI_OP_READ_DATA_BLOCK_SIZE:
- hci_cc_read_data_block_size(hdev, skb);
- break;
-
case HCI_OP_WRITE_CA_TIMEOUT:
hci_cc_write_ca_timeout(hdev, skb);
break;
@@ -2277,10 +2201,19 @@
hci_cc_read_flow_control_mode(hdev, skb);
break;
+ case HCI_OP_READ_DATA_BLOCK_SIZE:
+ hci_cc_read_data_block_size(hdev, skb);
+ break;
+
case HCI_OP_READ_LOCAL_AMP_INFO:
hci_cc_read_local_amp_info(hdev, skb);
break;
+ case HCI_OP_READ_LOCAL_AMP_ASSOC:
+ case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
+ hci_amp_cmd_complete(hdev, opcode, skb);
+ break;
+
case HCI_OP_DELETE_STORED_LINK_KEY:
hci_cc_delete_stored_link_key(hdev, skb);
break;
@@ -2317,6 +2250,10 @@
hci_cc_le_read_buffer_size(hdev, skb);
break;
+ case HCI_OP_READ_RSSI:
+ hci_cc_read_rssi(hdev, skb);
+ break;
+
case HCI_OP_USER_CONFIRM_REPLY:
hci_cc_user_confirm_reply(hdev, skb);
break;
@@ -2325,22 +2262,6 @@
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
- case HCI_OP_USER_PASSKEY_REPLY:
- hci_cc_user_passkey_reply(hdev, skb);
- break;
-
- case HCI_OP_USER_PASSKEY_NEG_REPLY:
- hci_cc_user_passkey_neg_reply(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_SCAN_PARAM:
- hci_cc_le_set_scan_param(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_SCAN_ENABLE:
- hci_cc_le_set_scan_enable(hdev, skb);
- break;
-
case HCI_OP_LE_LTK_REPLY:
hci_cc_le_ltk_reply(hdev, skb);
break;
@@ -2349,8 +2270,8 @@
hci_cc_le_ltk_neg_reply(hdev, skb);
break;
- case HCI_OP_WRITE_LE_HOST_SUPPORTED:
- hci_cc_write_le_host_supported(hdev, skb);
+ case HCI_OP_LE_SET_SCAN_ENABLE:
+ hci_cc_le_set_scan_enable(hdev, skb);
break;
default:
@@ -2364,7 +2285,7 @@
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ tasklet_schedule(&hdev->cmd_task);
}
}
@@ -2422,8 +2343,33 @@
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
+ case HCI_OP_CREATE_LOGICAL_LINK:
+ hci_cs_create_logical_link(hdev, ev->status);
+ break;
+
+ case HCI_OP_ACCEPT_LOGICAL_LINK:
+ hci_cs_accept_logical_link(hdev, ev->status);
+ break;
+
+ case HCI_OP_DISCONN_LOGICAL_LINK:
+ hci_cs_disconn_logical_link(hdev, ev->status);
+ break;
+
+ case HCI_OP_FLOW_SPEC_MODIFY:
+ hci_cs_flow_spec_modify(hdev, ev->status);
+ break;
+
+ case HCI_OP_CREATE_PHYS_LINK:
+ case HCI_OP_ACCEPT_PHYS_LINK:
+ hci_amp_cmd_status(hdev, opcode, ev->status);
+ break;
+
+ case HCI_OP_DISCONN_PHYS_LINK:
+ hci_cs_disconn_physical_link(hdev, ev->status);
+
case HCI_OP_DISCONNECT:
- hci_cs_disconnect(hdev, ev->status);
+ if (ev->status != 0)
+ mgmt_disconnect_failed(hdev->id);
break;
case HCI_OP_LE_CREATE_CONN:
@@ -2445,7 +2391,7 @@
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ tasklet_schedule(&hdev->cmd_task);
}
}
@@ -2467,7 +2413,7 @@
conn->link_mode |= HCI_LM_MASTER;
}
- clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
+ clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
hci_role_switch_cfm(conn, ev->status, ev->role);
}
@@ -2478,117 +2424,125 @@
static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
+ __le16 *ptr;
int i;
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
- BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
- return;
- }
-
- if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
- BT_DBG("%s bad parameters", hdev->name);
- return;
- }
+ skb_pull(skb, sizeof(*ev));
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
- for (i = 0; i < ev->num_hndl; i++) {
- struct hci_comp_pkts_info *info = &ev->handles[i];
- struct hci_conn *conn;
- __u16 handle, count;
-
- handle = __le16_to_cpu(info->handle);
- count = __le16_to_cpu(info->count);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- continue;
-
- conn->sent -= count;
-
- switch (conn->type) {
- case ACL_LINK:
- hdev->acl_cnt += count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- break;
-
- case LE_LINK:
- if (hdev->le_pkts) {
- hdev->le_cnt += count;
- if (hdev->le_cnt > hdev->le_pkts)
- hdev->le_cnt = hdev->le_pkts;
- } else {
- hdev->acl_cnt += count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- }
- break;
-
- case SCO_LINK:
- hdev->sco_cnt += count;
- if (hdev->sco_cnt > hdev->sco_pkts)
- hdev->sco_cnt = hdev->sco_pkts;
- break;
-
- default:
- BT_ERR("Unknown type %d conn %p", conn->type, conn);
- break;
- }
- }
-
- queue_work(hdev->workqueue, &hdev->tx_work);
-}
-
-static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
- int i;
-
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
- BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
- return;
- }
-
- if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
+ if (skb->len < ev->num_hndl * 4) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
- BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
- ev->num_hndl);
+ tasklet_disable(&hdev->tx_task);
- for (i = 0; i < ev->num_hndl; i++) {
- struct hci_comp_blocks_info *info = &ev->handles[i];
- struct hci_conn *conn;
- __u16 handle, block_count;
+ for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+ struct hci_conn *conn = NULL;
+ struct hci_chan *chan;
+ __u16 handle, count;
- handle = __le16_to_cpu(info->handle);
- block_count = __le16_to_cpu(info->blocks);
+ handle = get_unaligned_le16(ptr++);
+ count = get_unaligned_le16(ptr++);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- continue;
+ if (hdev->dev_type == HCI_BREDR)
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ else {
+ chan = hci_chan_list_lookup_handle(hdev, handle);
+ if (chan)
+ conn = chan->conn;
+ }
+ if (conn) {
+ conn->sent -= count;
- conn->sent -= block_count;
-
- switch (conn->type) {
- case ACL_LINK:
- hdev->block_cnt += block_count;
- if (hdev->block_cnt > hdev->num_blocks)
- hdev->block_cnt = hdev->num_blocks;
- break;
-
- default:
- BT_ERR("Unknown type %d conn %p", conn->type, conn);
- break;
+ if (conn->type == ACL_LINK) {
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ } else if (conn->type == LE_LINK) {
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ }
+ } else {
+ hdev->sco_cnt += count;
+ if (hdev->sco_cnt > hdev->sco_pkts)
+ hdev->sco_cnt = hdev->sco_pkts;
+ }
}
}
- queue_work(hdev->workqueue, &hdev->tx_work);
+ tasklet_schedule(&hdev->tx_task);
+
+ tasklet_enable(&hdev->tx_task);
+}
+
+static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
+ __le16 *ptr;
+ int i;
+
+ skb_pull(skb, sizeof(*ev));
+
+ BT_DBG("%s total_num_blocks %d num_hndl %d",
+ hdev->name, ev->total_num_blocks, ev->num_hndl);
+
+ if (skb->len < ev->num_hndl * 6) {
+ BT_DBG("%s bad parameters", hdev->name);
+ return;
+ }
+
+ tasklet_disable(&hdev->tx_task);
+
+ for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+ struct hci_conn *conn = NULL;
+ struct hci_chan *chan;
+ __u16 handle, block_count;
+
+ handle = get_unaligned_le16(ptr++);
+
+ /* Skip packet count */
+ ptr++;
+ block_count = get_unaligned_le16(ptr++);
+
+ BT_DBG("%s handle %d count %d", hdev->name, handle,
+ block_count);
+
+ if (hdev->dev_type == HCI_BREDR)
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ else {
+ chan = hci_chan_list_lookup_handle(hdev, handle);
+ if (chan)
+ conn = chan->conn;
+ }
+ if (conn) {
+ BT_DBG("%s conn %p sent %d", hdev->name,
+ conn, conn->sent);
+
+ conn->sent -= block_count;
+
+ if (conn->type == ACL_LINK) {
+ hdev->acl_cnt += block_count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ } else {
+ /* We should not find ourselves here */
+ BT_DBG("Unexpected event for SCO connection");
+ }
+ }
+ }
+
+ tasklet_schedule(&hdev->tx_task);
+
+ tasklet_enable(&hdev->tx_task);
}
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2605,14 +2559,17 @@
conn->mode = ev->mode;
conn->interval = __le16_to_cpu(ev->interval);
- if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
+ if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
if (conn->mode == HCI_CM_ACTIVE)
- set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ conn->power_save = 1;
else
- clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ conn->power_save = 0;
}
+ if (conn->mode == HCI_CM_SNIFF)
+ if (wake_lock_active(&conn->idle_lock))
+ wake_unlock(&conn->idle_lock);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, ev->status);
}
@@ -2629,30 +2586,20 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
- goto unlock;
-
- if (conn->state == BT_CONNECTED) {
+ if (conn && conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
hci_conn_put(conn);
+ hci_conn_enter_active_mode(conn, 0);
}
- if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
+ if (!test_bit(HCI_PAIRABLE, &hdev->flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
- else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
- u8 secure;
- if (conn->pending_sec_level == BT_SECURITY_HIGH)
- secure = 1;
- else
- secure = 0;
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_request(hdev->id, &ev->bdaddr);
- mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
- }
-
-unlock:
hci_dev_unlock(hdev);
}
@@ -2665,7 +2612,7 @@
BT_DBG("%s", hdev->name);
- if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
+ if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
return;
hci_dev_lock(hdev);
@@ -2677,33 +2624,31 @@
goto not_found;
}
- BT_DBG("%s found key type %u for %s", hdev->name, key->type,
+ BT_DBG("%s found key type %u for %s", hdev->name, key->key_type,
batostr(&ev->bdaddr));
- if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
- key->type == HCI_LK_DEBUG_COMBINATION) {
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->key_type == 0x03) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+
if (conn) {
- if (key->type == HCI_LK_UNAUTH_COMBINATION &&
- conn->auth_type != 0xff &&
- (conn->auth_type & 0x01)) {
- BT_DBG("%s ignoring unauthenticated key", hdev->name);
- goto not_found;
- }
+ BT_DBG("Conn pending sec level is %d, ssp is %d, key len is %d",
+ conn->pending_sec_level, conn->ssp_mode, key->pin_len);
+ }
+ if (conn && (conn->ssp_mode == 0) &&
+ (conn->pending_sec_level == BT_SECURITY_HIGH) &&
+ (key->pin_len != 16)) {
+ BT_DBG("Security is high ignoring this key");
+ goto not_found;
+ }
- if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
- conn->pending_sec_level == BT_SECURITY_HIGH) {
- BT_DBG("%s ignoring key unauthenticated for high \
- security", hdev->name);
- goto not_found;
- }
-
- conn->key_type = key->type;
- conn->pin_length = key->pin_len;
+ if (key->key_type == 0x04 && conn && conn->auth_type != 0xff &&
+ (conn->auth_type & 0x01)) {
+ BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ goto not_found;
}
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -2726,7 +2671,7 @@
struct hci_conn *conn;
u8 pin_len = 0;
- BT_DBG("%s", hdev->name);
+ BT_DBG("%s type %d", hdev->name, ev->key_type);
hci_dev_lock(hdev);
@@ -2734,16 +2679,19 @@
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ memcpy(conn->link_key, ev->link_key, 16);
+ conn->key_type = ev->key_type;
+ hci_disconnect_amp(conn, 0x06);
+
+ conn->link_mode &= ~HCI_LM_ENCRYPT;
pin_len = conn->pin_length;
-
- if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
- conn->key_type = ev->key_type;
-
hci_conn_put(conn);
+ hci_conn_enter_active_mode(conn, 0);
}
- if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
- hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
+ if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+ hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
ev->key_type, pin_len);
hci_dev_unlock(hdev);
@@ -2810,7 +2758,6 @@
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
- bool name_known, ssp;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
@@ -2832,12 +2779,10 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
-
- name_known = hci_inquiry_cache_update(hdev, &data,
- false, &ssp);
- mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, info->rssi,
- !name_known, ssp, NULL, 0);
+ hci_inquiry_cache_update(hdev, &data);
+ mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
+ info->dev_class, info->rssi,
+ 0, NULL);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -2851,11 +2796,10 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- name_known = hci_inquiry_cache_update(hdev, &data,
- false, &ssp);
- mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, info->rssi,
- !name_known, ssp, NULL, 0);
+ hci_inquiry_cache_update(hdev, &data);
+ mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
+ info->dev_class, info->rssi,
+ 0, NULL);
}
}
@@ -2880,25 +2824,32 @@
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
+ ie->data.ssp_mode = (ev->features[0] & 0x01);
- if (ev->features[0] & LMP_HOST_SSP)
- set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+ conn->ssp_mode = (ev->features[0] & 0x01);
+ /*In case if remote device ssp supported/2.0 device
+ reduce the security level to MEDIUM if it is HIGH*/
+ if (!conn->ssp_mode && conn->auth_initiator &&
+ (conn->pending_sec_level == BT_SECURITY_HIGH))
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
+ if (conn->ssp_mode && conn->auth_initiator &&
+ conn->io_capability != 0x03) {
+ conn->pending_sec_level = BT_SECURITY_HIGH;
+ conn->auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+ }
}
if (conn->state != BT_CONFIG)
goto unlock;
- if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
+ if (!ev->status) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0,
- conn->dev_class);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -2940,13 +2891,14 @@
hci_conn_add_sysfs(conn);
break;
- case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
- conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
+ if (!conn->hdev->is_wbs)
+ conn->pkt_type =
+ (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
hci_setup_sync(conn, conn->link->handle);
goto unlock;
@@ -2992,8 +2944,6 @@
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
- bool name_known, ssp;
-
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -3002,19 +2952,10 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x01;
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- name_known = eir_has_data_type(info->data,
- sizeof(info->data),
- EIR_NAME_COMPLETE);
- else
- name_known = true;
-
- name_known = hci_inquiry_cache_update(hdev, &data, name_known,
- &ssp);
- mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, info->rssi, !name_known,
- ssp, info->data, sizeof(info->data));
+ hci_inquiry_cache_update(hdev, &data);
+ mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
+ info->dev_class, info->rssi,
+ HCI_MAX_EIR_LENGTH, info->data);
}
hci_dev_unlock(hdev);
@@ -3022,19 +2963,23 @@
static inline u8 hci_get_auth_req(struct hci_conn *conn)
{
+ BT_DBG("%p", conn);
+
/* If remote requests dedicated bonding follow that lead */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
/* If both remote and local IO capabilities allow MITM
* protection then require it, otherwise don't */
- if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
+ if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) {
return 0x02;
- else
+ } else {
+ conn->auth_type |= 0x01;
return 0x03;
+ }
}
/* If remote requests no-bonding follow that lead */
- if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
- return conn->remote_auth | (conn->auth_type & 0x01);
+ if (conn->remote_auth <= 0x01)
+ return 0x00;
return conn->auth_type;
}
@@ -3054,22 +2999,23 @@
hci_conn_hold(conn);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!test_bit(HCI_MGMT, &hdev->flags))
goto unlock;
- if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
+ if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
+ u8 io_cap = conn->io_capability;
+ /* ACL-SSP does not support IO CAP 0x04 */
+ cp.capability = (io_cap == 0x04) ? 0x01 : io_cap;
bacpy(&cp.bdaddr, &ev->bdaddr);
- /* Change the IO capability from KeyboardDisplay
- * to DisplayYesNo as it is not supported by BT spec. */
- cp.capability = (conn->io_capability == 0x04) ?
- 0x01 : conn->io_capability;
- conn->auth_type = hci_get_auth_req(conn);
- cp.authentication = conn->auth_type;
+ if (conn->auth_initiator)
+ cp.authentication = conn->auth_type;
+ else
+ cp.authentication = hci_get_auth_req(conn);
- if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
+ if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
hci_find_remote_oob_data(hdev, &conn->dst))
cp.oob_data = 0x01;
else
@@ -3081,7 +3027,7 @@
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
+ cp.reason = 0x16; /* Pairing not allowed */
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
sizeof(cp), &cp);
@@ -3105,94 +3051,31 @@
goto unlock;
conn->remote_cap = ev->capability;
+ conn->remote_oob = ev->oob_data;
conn->remote_auth = ev->authentication;
- if (ev->oob_data)
- set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static inline void hci_user_ssp_confirmation_evt(struct hci_dev *hdev,
+ u8 event, struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
- int loc_mitm, rem_mitm, confirm_hint = 0;
- struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
- goto unlock;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
- goto unlock;
-
- loc_mitm = (conn->auth_type & 0x01);
- rem_mitm = (conn->remote_auth & 0x01);
-
- /* If we require MITM but the remote device can't provide that
- * (it has NoInputNoOutput) then reject the confirmation
- * request. The only exception is when we're dedicated bonding
- * initiators (connect_cfm_cb set) since then we always have the MITM
- * bit set. */
- if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
- BT_DBG("Rejecting request: remote device can't provide MITM");
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
- goto unlock;
+ if (test_bit(HCI_MGMT, &hdev->flags)) {
+ if (event == HCI_EV_USER_PASSKEY_REQUEST)
+ mgmt_user_confirm_request(hdev->id, event,
+ &ev->bdaddr, 0);
+ else
+ mgmt_user_confirm_request(hdev->id, event,
+ &ev->bdaddr, ev->passkey);
}
- /* If no side requires MITM protection; auto-accept */
- if ((!loc_mitm || conn->remote_cap == 0x03) &&
- (!rem_mitm || conn->io_capability == 0x03)) {
-
- /* If we're not the initiators request authorization to
- * proceed from user space (mgmt_user_confirm with
- * confirm_hint set to 1). */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
- BT_DBG("Confirming auto-accept as acceptor");
- confirm_hint = 1;
- goto confirm;
- }
-
- BT_DBG("Auto-accept of user confirmation with %ums delay",
- hdev->auto_accept_delay);
-
- if (hdev->auto_accept_delay > 0) {
- int delay = msecs_to_jiffies(hdev->auto_accept_delay);
- mod_timer(&conn->auto_accept_timer, jiffies + delay);
- goto unlock;
- }
-
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
- goto unlock;
- }
-
-confirm:
- mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
- confirm_hint);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_user_passkey_req *ev = (void *) skb->data;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
-
hci_dev_unlock(hdev);
}
@@ -3214,9 +3097,8 @@
* initiated the authentication. A traditional auth_complete
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
- mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
- ev->status);
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
+ mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
hci_conn_put(conn);
@@ -3235,13 +3117,13 @@
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
+ ie->data.ssp_mode = (ev->features[0] & 0x01);
hci_dev_unlock(hdev);
}
static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
struct oob_data *data;
@@ -3250,7 +3132,7 @@
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!test_bit(HCI_MGMT, &hdev->flags))
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
@@ -3286,33 +3168,28 @@
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
+ conn = hci_le_conn_add(hdev, &ev->bdaddr, ev->bdaddr_type);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
return;
}
-
- conn->dst_type = ev->bdaddr_type;
}
if (ev->status) {
- mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
- conn->dst_type, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
- if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
- conn->dst_type, 0, NULL, 0, NULL);
-
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr, 1);
+ hci_conn_hold(conn);
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
@@ -3322,30 +3199,6 @@
hci_dev_unlock(hdev);
}
-static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- u8 num_reports = skb->data[0];
- void *ptr = &skb->data[1];
- s8 rssi;
-
- hci_dev_lock(hdev);
-
- while (num_reports--) {
- struct hci_ev_le_advertising_info *ev = ptr;
-
- hci_add_adv_entry(hdev, ev);
-
- rssi = ev->data[ev->length];
- mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
- NULL, rssi, 0, 1, ev->data, ev->length);
-
- ptr += sizeof(*ev) + ev->length + 1;
- }
-
- hci_dev_unlock(hdev);
-}
-
static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -3353,7 +3206,7 @@
struct hci_cp_le_ltk_reply cp;
struct hci_cp_le_ltk_neg_reply neg;
struct hci_conn *conn;
- struct smp_ltk *ltk;
+ struct link_key *ltk;
BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
@@ -3369,17 +3222,10 @@
memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
cp.handle = cpu_to_le16(conn->handle);
-
- if (ltk->authenticated)
- conn->sec_level = BT_SECURITY_HIGH;
+ conn->pin_length = ltk->pin_len;
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
- if (ltk->type & HCI_SMP_STK) {
- list_del(<k->list);
- kfree(ltk);
- }
-
hci_dev_unlock(hdev);
return;
@@ -3390,6 +3236,27 @@
hci_dev_unlock(hdev);
}
+static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_advertising_info *ev;
+ u8 num_reports;
+
+ num_reports = skb->data[0];
+ ev = (void *) &skb->data[1];
+
+ hci_dev_lock(hdev);
+
+ while (num_reports--) {
+ mgmt_device_found(hdev->id, &ev->bdaddr, ev->bdaddr_type,
+ 1, NULL, 0, ev->length, ev->data);
+ hci_add_adv_entry(hdev, ev);
+ ev = (void *) (ev->data + ev->length + 1);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -3401,24 +3268,146 @@
hci_le_conn_complete_evt(hdev, skb);
break;
- case HCI_EV_LE_ADVERTISING_REPORT:
- hci_le_adv_report_evt(hdev, skb);
- break;
-
case HCI_EV_LE_LTK_REQ:
hci_le_ltk_request_evt(hdev, skb);
break;
+ case HCI_EV_LE_ADVERTISING_REPORT:
+ hci_le_adv_report_evt(hdev, skb);
+ break;
+
default:
break;
}
}
+static inline void hci_phy_link_complete(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_phys_link_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s handle %d status %d", hdev->name, ev->phy_handle,
+ ev->status);
+
+ hci_dev_lock(hdev);
+
+ if (ev->status == 0) {
+ conn = hci_conn_add(hdev, ACL_LINK, 0, BDADDR_ANY);
+ if (conn) {
+ conn->handle = ev->phy_handle;
+ conn->state = BT_CONNECTED;
+
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT/2;
+ hci_conn_put(conn);
+
+ hci_conn_hold_device(conn);
+ hci_conn_add_sysfs(conn);
+ } else
+ BT_ERR("No memory for new connection");
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_log_link_complete(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_log_link_complete *ev = (void *) skb->data;
+ struct hci_chan *chan;
+
+ BT_DBG("%s handle %d status %d", hdev->name,
+ __le16_to_cpu(ev->log_handle), ev->status);
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_id(hdev, ev->phy_handle);
+
+ if (chan) {
+ if (ev->status == 0) {
+ chan->ll_handle = __le16_to_cpu(ev->log_handle);
+ chan->state = BT_CONNECTED;
+ } else {
+ chan->state = BT_CLOSED;
+ }
+
+ hci_proto_create_cfm(chan, ev->status);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_flow_spec_modify_complete(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_flow_spec_modify_complete *ev = (void *) skb->data;
+ struct hci_chan *chan;
+
+ BT_DBG("%s handle %d status %d", hdev->name,
+ __le16_to_cpu(ev->log_handle), ev->status);
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, ev->log_handle);
+ if (chan)
+ hci_proto_modify_cfm(chan, ev->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_disconn_log_link_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_log_link_complete *ev = (void *) skb->data;
+ struct hci_chan *chan;
+
+ BT_DBG("%s handle %d status %d", hdev->name,
+ __le16_to_cpu(ev->log_handle), ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, __le16_to_cpu(ev->log_handle));
+ if (chan)
+ hci_proto_destroy_cfm(chan, ev->reason);
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_disconn_phy_link_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_phys_link_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status %d", hdev->name, ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (conn) {
+ conn->state = BT_CLOSED;
+
+ hci_proto_disconn_cfm(conn, ev->reason, 0);
+ hci_conn_del(conn);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
__u8 event = hdr->evt;
+ BT_DBG("");
+
skb_pull(skb, HCI_EVENT_HDR_SIZE);
switch (event) {
@@ -3546,12 +3535,10 @@
hci_io_capa_reply_evt(hdev, skb);
break;
- case HCI_EV_USER_CONFIRM_REQUEST:
- hci_user_confirm_request_evt(hdev, skb);
- break;
-
case HCI_EV_USER_PASSKEY_REQUEST:
- hci_user_passkey_request_evt(hdev, skb);
+ case HCI_EV_USER_PASSKEY_NOTIFICATION:
+ case HCI_EV_USER_CONFIRM_REQUEST:
+ hci_user_ssp_confirmation_evt(hdev, event, skb);
break;
case HCI_EV_SIMPLE_PAIR_COMPLETE:
@@ -3570,10 +3557,40 @@
hci_remote_oob_data_request_evt(hdev, skb);
break;
+ case HCI_EV_PHYS_LINK_COMPLETE:
+ hci_phy_link_complete(hdev, skb);
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
+ case HCI_EV_LOG_LINK_COMPLETE:
+ hci_log_link_complete(hdev, skb);
+ break;
+
+ case HCI_EV_FLOW_SPEC_MODIFY_COMPLETE:
+ hci_flow_spec_modify_complete(hdev, skb);
+ break;
+
+ case HCI_EV_DISCONN_LOG_LINK_COMPLETE:
+ hci_disconn_log_link_complete_evt(hdev, skb);
+ break;
+
+ case HCI_EV_DISCONN_PHYS_LINK_COMPLETE:
+ hci_disconn_phy_link_complete_evt(hdev, skb);
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
case HCI_EV_NUM_COMP_BLOCKS:
hci_num_comp_blocks_evt(hdev, skb);
break;
+ case HCI_EV_CHANNEL_SELECTED:
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
+ case HCI_EV_AMP_STATUS_CHANGE:
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
default:
BT_DBG("%s event 0x%x", hdev->name, event);
break;
@@ -3582,3 +3599,31 @@
kfree_skb(skb);
hdev->stat.evt_rx++;
}
+
+/* Generate internal stack event */
+void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
+{
+ struct hci_event_hdr *hdr;
+ struct hci_ev_stack_internal *ev;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
+ hdr->evt = HCI_EV_STACK_INTERNAL;
+ hdr->plen = sizeof(*ev) + dlen;
+
+ ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
+ ev->type = type;
+ memcpy(ev->data, data, dlen);
+
+ bt_cb(skb)->incoming = 1;
+ __net_timestamp(skb);
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ skb->dev = (void *) hdev;
+ hci_send_to_sock(hdev, skb, NULL);
+ kfree_skb(skb);
+}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623..61c2ca1 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2000-2001, 2011, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -42,14 +42,14 @@
#include <linux/ioctl.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/hci_mon.h>
-static atomic_t monitor_promisc = ATOMIC_INIT(0);
+static bool enable_mgmt = 1;
/* ----- HCI socket interface ----- */
@@ -85,20 +85,22 @@
};
/* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+ struct sock *skip_sk)
{
struct sock *sk;
struct hlist_node *node;
- struct sk_buff *skb_copy = NULL;
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
-
sk_for_each(sk, node, &hci_sk_list.head) {
struct hci_filter *flt;
struct sk_buff *nskb;
+ if (sk == skip_sk)
+ continue;
+
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
@@ -106,9 +108,12 @@
if (skb->sk == sk)
continue;
- if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
+ if (bt_cb(skb)->channel != hci_pi(sk)->channel)
continue;
+ if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
+ goto clone;
+
/* Apply filter */
flt = &hci_pi(sk)->filter;
@@ -132,301 +137,19 @@
continue;
}
- if (!skb_copy) {
- /* Create a private copy with headroom */
- skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
- if (!skb_copy)
- continue;
-
- /* Put type byte before the data */
- memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
- }
-
- nskb = skb_clone(skb_copy, GFP_ATOMIC);
- if (!nskb)
- continue;
-
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
-
- read_unlock(&hci_sk_list.lock);
-
- kfree_skb(skb_copy);
-}
-
-/* Send frame to control socket */
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
-{
- struct sock *sk;
- struct hlist_node *node;
-
- BT_DBG("len %d", skb->len);
-
- read_lock(&hci_sk_list.lock);
-
- sk_for_each(sk, node, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
- /* Skip the original socket */
- if (sk == skip_sk)
- continue;
-
- if (sk->sk_state != BT_BOUND)
- continue;
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
- continue;
-
+clone:
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
continue;
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
-
- read_unlock(&hci_sk_list.lock);
-}
-
-/* Send frame to monitor socket */
-void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct sock *sk;
- struct hlist_node *node;
- struct sk_buff *skb_copy = NULL;
- __le16 opcode;
-
- if (!atomic_read(&monitor_promisc))
- return;
-
- BT_DBG("hdev %p len %d", hdev, skb->len);
-
- switch (bt_cb(skb)->pkt_type) {
- case HCI_COMMAND_PKT:
- opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
- break;
- case HCI_EVENT_PKT:
- opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
- break;
- case HCI_ACLDATA_PKT:
- if (bt_cb(skb)->incoming)
- opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
- else
- opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
- break;
- case HCI_SCODATA_PKT:
- if (bt_cb(skb)->incoming)
- opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
- else
- opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
- break;
- default:
- return;
- }
-
- read_lock(&hci_sk_list.lock);
-
- sk_for_each(sk, node, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
- if (sk->sk_state != BT_BOUND)
- continue;
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
- continue;
-
- if (!skb_copy) {
- struct hci_mon_hdr *hdr;
-
- /* Create a private copy with headroom */
- skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
- if (!skb_copy)
- continue;
-
- /* Put header before the data */
- hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
- hdr->opcode = opcode;
- hdr->index = cpu_to_le16(hdev->id);
- hdr->len = cpu_to_le16(skb->len);
- }
-
- nskb = skb_clone(skb_copy, GFP_ATOMIC);
- if (!nskb)
- continue;
+ /* Put type byte before the data */
+ if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
+ memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
-
read_unlock(&hci_sk_list.lock);
-
- kfree_skb(skb_copy);
-}
-
-static void send_monitor_event(struct sk_buff *skb)
-{
- struct sock *sk;
- struct hlist_node *node;
-
- BT_DBG("len %d", skb->len);
-
- read_lock(&hci_sk_list.lock);
-
- sk_for_each(sk, node, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
- if (sk->sk_state != BT_BOUND)
- continue;
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
- continue;
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- continue;
-
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
-
- read_unlock(&hci_sk_list.lock);
-}
-
-static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
-{
- struct hci_mon_hdr *hdr;
- struct hci_mon_new_index *ni;
- struct sk_buff *skb;
- __le16 opcode;
-
- switch (event) {
- case HCI_DEV_REG:
- skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
- ni->type = hdev->dev_type;
- ni->bus = hdev->bus;
- bacpy(&ni->bdaddr, &hdev->bdaddr);
- memcpy(ni->name, hdev->name, 8);
-
- opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
- break;
-
- case HCI_DEV_UNREG:
- skb = bt_skb_alloc(0, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
- break;
-
- default:
- return NULL;
- }
-
- __net_timestamp(skb);
-
- hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
- hdr->opcode = opcode;
- hdr->index = cpu_to_le16(hdev->id);
- hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
-
- return skb;
-}
-
-static void send_monitor_replay(struct sock *sk)
-{
- struct hci_dev *hdev;
-
- read_lock(&hci_dev_list_lock);
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- struct sk_buff *skb;
-
- skb = create_monitor_event(hdev, HCI_DEV_REG);
- if (!skb)
- continue;
-
- if (sock_queue_rcv_skb(sk, skb))
- kfree_skb(skb);
- }
-
- read_unlock(&hci_dev_list_lock);
-}
-
-/* Generate internal stack event */
-static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
-{
- struct hci_event_hdr *hdr;
- struct hci_ev_stack_internal *ev;
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
- if (!skb)
- return;
-
- hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
- hdr->evt = HCI_EV_STACK_INTERNAL;
- hdr->plen = sizeof(*ev) + dlen;
-
- ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
- ev->type = type;
- memcpy(ev->data, data, dlen);
-
- bt_cb(skb)->incoming = 1;
- __net_timestamp(skb);
-
- bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
- skb->dev = (void *) hdev;
- hci_send_to_sock(hdev, skb);
- kfree_skb(skb);
-}
-
-void hci_sock_dev_event(struct hci_dev *hdev, int event)
-{
- struct hci_ev_si_device ev;
-
- BT_DBG("hdev %s event %d", hdev->name, event);
-
- /* Send event to monitor */
- if (atomic_read(&monitor_promisc)) {
- struct sk_buff *skb;
-
- skb = create_monitor_event(hdev, event);
- if (skb) {
- send_monitor_event(skb);
- kfree_skb(skb);
- }
- }
-
- /* Send event to sockets */
- ev.event = event;
- ev.dev_id = hdev->id;
- hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
-
- if (event == HCI_DEV_UNREG) {
- struct sock *sk;
- struct hlist_node *node;
-
- /* Detach sockets from device */
- read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
- bh_lock_sock_nested(sk);
- if (hci_pi(sk)->hdev == hdev) {
- hci_pi(sk)->hdev = NULL;
- sk->sk_err = EPIPE;
- sk->sk_state = BT_OPEN;
- sk->sk_state_change(sk);
-
- hci_dev_put(hdev);
- }
- bh_unlock_sock(sk);
- }
- read_unlock(&hci_sk_list.lock);
- }
}
static int hci_sock_release(struct socket *sock)
@@ -441,9 +164,6 @@
hdev = hci_pi(sk)->hdev;
- if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
- atomic_dec(&monitor_promisc);
-
bt_sock_unlink(&hci_sk_list, sk);
if (hdev) {
@@ -460,38 +180,82 @@
return 0;
}
-static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- bdaddr_t bdaddr;
- int err;
+ struct list_head *p;
- if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
- return -EFAULT;
+ list_for_each(p, &hdev->blacklist) {
+ struct bdaddr_list *b;
- hci_dev_lock(hdev);
+ b = list_entry(p, struct bdaddr_list, list);
- err = hci_blacklist_add(hdev, &bdaddr, 0);
+ if (bacmp(bdaddr, &b->bdaddr) == 0)
+ return b;
+ }
- hci_dev_unlock(hdev);
-
- return err;
+ return NULL;
}
-static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
+static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
- int err;
+ struct bdaddr_list *entry;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- hci_dev_lock(hdev);
+ if (bacmp(&bdaddr, BDADDR_ANY) == 0)
+ return -EBADF;
- err = hci_blacklist_del(hdev, &bdaddr, 0);
+ if (hci_blacklist_lookup(hdev, &bdaddr))
+ return -EEXIST;
- hci_dev_unlock(hdev);
+ entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
- return err;
+ bacpy(&entry->bdaddr, &bdaddr);
+
+ list_add(&entry->list, &hdev->blacklist);
+
+ return 0;
+}
+
+int hci_blacklist_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->blacklist) {
+ struct bdaddr_list *b;
+
+ b = list_entry(p, struct bdaddr_list, list);
+
+ list_del(p);
+ kfree(b);
+ }
+
+ return 0;
+}
+
+static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
+{
+ bdaddr_t bdaddr;
+ struct bdaddr_list *entry;
+
+ if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
+ return -EFAULT;
+
+ if (bacmp(&bdaddr, BDADDR_ANY) == 0)
+ return hci_blacklist_clear(hdev);
+
+ entry = hci_blacklist_lookup(hdev, &bdaddr);
+ if (!entry)
+ return -ENOENT;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ return 0;
}
/* Ioctls that require bound socket */
@@ -526,12 +290,15 @@
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
- return hci_sock_blacklist_add(hdev, (void __user *) arg);
+ return hci_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
- return hci_sock_blacklist_del(hdev, (void __user *) arg);
+ return hci_blacklist_del(hdev, (void __user *) arg);
+
+ case HCISETAUTHINFO:
+ return hci_set_auth_info(hdev, (void __user *) arg);
default:
if (hdev->ioctl)
@@ -561,7 +328,12 @@
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
- return hci_dev_open(arg);
+
+ err = hci_dev_open(arg);
+ if (!err || err == -EALREADY)
+ return 0;
+ else
+ return err;
case HCIDEVDOWN:
if (!capable(CAP_NET_ADMIN))
@@ -620,69 +392,31 @@
if (haddr.hci_family != AF_BLUETOOTH)
return -EINVAL;
+ if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
+ return -EINVAL;
+
+ if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
+ return -EINVAL;
+
lock_sock(sk);
- if (sk->sk_state == BT_BOUND) {
+ if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
err = -EALREADY;
goto done;
}
- switch (haddr.hci_channel) {
- case HCI_CHANNEL_RAW:
- if (hci_pi(sk)->hdev) {
- err = -EALREADY;
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ hdev = hci_dev_get(haddr.hci_dev);
+ if (!hdev) {
+ err = -ENODEV;
goto done;
}
- if (haddr.hci_dev != HCI_DEV_NONE) {
- hdev = hci_dev_get(haddr.hci_dev);
- if (!hdev) {
- err = -ENODEV;
- goto done;
- }
-
- atomic_inc(&hdev->promisc);
- }
-
- hci_pi(sk)->hdev = hdev;
- break;
-
- case HCI_CHANNEL_CONTROL:
- if (haddr.hci_dev != HCI_DEV_NONE) {
- err = -EINVAL;
- goto done;
- }
-
- if (!capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- goto done;
- }
-
- break;
-
- case HCI_CHANNEL_MONITOR:
- if (haddr.hci_dev != HCI_DEV_NONE) {
- err = -EINVAL;
- goto done;
- }
-
- if (!capable(CAP_NET_RAW)) {
- err = -EPERM;
- goto done;
- }
-
- send_monitor_replay(sk);
-
- atomic_inc(&monitor_promisc);
- break;
-
- default:
- err = -EINVAL;
- goto done;
+ atomic_inc(&hdev->promisc);
}
-
hci_pi(sk)->channel = haddr.hci_channel;
+ hci_pi(sk)->hdev = hdev;
sk->sk_state = BT_BOUND;
done:
@@ -733,8 +467,7 @@
data = &tv;
len = sizeof(tv);
#ifdef CONFIG_COMPAT
- if (!COMPAT_USE_64BIT_TIME &&
- (msg->msg_flags & MSG_CMSG_COMPAT)) {
+ if (msg->msg_flags & MSG_CMSG_COMPAT) {
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
data = &ctv;
@@ -777,15 +510,7 @@
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- switch (hci_pi(sk)->channel) {
- case HCI_CHANNEL_RAW:
- hci_sock_cmsg(sk, msg, skb);
- break;
- case HCI_CHANNEL_CONTROL:
- case HCI_CHANNEL_MONITOR:
- sock_recv_timestamp(msg, sk, skb);
- break;
- }
+ hci_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
@@ -798,6 +523,7 @@
struct sock *sk = sock->sk;
struct hci_dev *hdev;
struct sk_buff *skb;
+ int reserve = 0;
int err;
BT_DBG("sock %p sk %p", sock, sk);
@@ -819,9 +545,6 @@
case HCI_CHANNEL_CONTROL:
err = mgmt_control(sk, msg, len);
goto done;
- case HCI_CHANNEL_MONITOR:
- err = -EOPNOTSUPP;
- goto done;
default:
err = -EINVAL;
goto done;
@@ -838,10 +561,18 @@
goto done;
}
- skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
+ /* Allocate extra headroom for Qualcomm PAL */
+ if (hdev->dev_type == HCI_AMP && hdev->manufacturer == 0x001d)
+ reserve = BT_SKB_RESERVE_80211;
+
+ skb = bt_skb_send_alloc(sk, len + reserve,
+ msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
goto done;
+ if (reserve)
+ skb_reserve(skb, reserve);
+
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto drop;
@@ -865,10 +596,10 @@
if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
skb_queue_tail(&hdev->raw_q, skb);
- queue_work(hdev->workqueue, &hdev->tx_work);
+ tasklet_schedule(&hdev->tx_task);
} else {
skb_queue_tail(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
+ tasklet_schedule(&hdev->cmd_task);
}
} else {
if (!capable(CAP_NET_RAW)) {
@@ -877,7 +608,7 @@
}
skb_queue_tail(&hdev->raw_q, skb);
- queue_work(hdev->workqueue, &hdev->tx_work);
+ tasklet_schedule(&hdev->tx_task);
}
err = len;
@@ -901,11 +632,6 @@
lock_sock(sk);
- if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
- err = -EINVAL;
- goto done;
- }
-
switch (optname) {
case HCI_DATA_DIR:
if (get_user(opt, (int __user *)optval)) {
@@ -968,7 +694,6 @@
break;
}
-done:
release_sock(sk);
return err;
}
@@ -977,20 +702,11 @@
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
- int len, opt, err = 0;
-
- BT_DBG("sk %p, opt %d", sk, optname);
+ int len, opt;
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
- err = -EINVAL;
- goto done;
- }
-
switch (optname) {
case HCI_DATA_DIR:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
@@ -999,7 +715,7 @@
opt = 0;
if (put_user(opt, optval))
- err = -EFAULT;
+ return -EFAULT;
break;
case HCI_TIME_STAMP:
@@ -1009,7 +725,7 @@
opt = 0;
if (put_user(opt, optval))
- err = -EFAULT;
+ return -EFAULT;
break;
case HCI_FILTER:
@@ -1024,17 +740,15 @@
len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
- err = -EFAULT;
+ return -EFAULT;
break;
default:
- err = -ENOPROTOOPT;
+ return -ENOPROTOOPT;
break;
}
-done:
- release_sock(sk);
- return err;
+ return 0;
}
static const struct proto_ops hci_sock_ops = {
@@ -1092,12 +806,54 @@
return 0;
}
+static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct hci_dev *hdev = (struct hci_dev *) ptr;
+ struct hci_ev_si_device ev;
+
+ BT_DBG("hdev %s event %ld", hdev->name, event);
+
+ /* Send event to sockets */
+ ev.event = event;
+ ev.dev_id = hdev->id;
+ hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+
+ if (event == HCI_DEV_UNREG) {
+ struct sock *sk;
+ struct hlist_node *node;
+
+ /* Detach sockets from device */
+ read_lock(&hci_sk_list.lock);
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ local_bh_disable();
+ bh_lock_sock_nested(sk);
+ if (hci_pi(sk)->hdev == hdev) {
+ hci_pi(sk)->hdev = NULL;
+ sk->sk_err = EPIPE;
+ sk->sk_state = BT_OPEN;
+ sk->sk_state_change(sk);
+
+ hci_dev_put(hdev);
+ }
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ }
+ read_unlock(&hci_sk_list.lock);
+ }
+
+ return NOTIFY_DONE;
+}
+
static const struct net_proto_family hci_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = hci_sock_create,
};
+static struct notifier_block hci_sock_nblock = {
+ .notifier_call = hci_sock_dev_event
+};
+
int __init hci_sock_init(void)
{
int err;
@@ -1110,6 +866,8 @@
if (err < 0)
goto error;
+ hci_register_notifier(&hci_sock_nblock);
+
BT_INFO("HCI socket layer initialized");
return 0;
@@ -1125,5 +883,10 @@
if (bt_sock_unregister(BTPROTO_HCI) < 0)
BT_ERR("HCI socket unregistration failed");
+ hci_unregister_notifier(&hci_sock_nblock);
+
proto_unregister(&hci_sk_proto);
}
+
+module_param(enable_mgmt, bool, 0644);
+MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index bc15429..0a4c7d6 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
@@ -24,8 +25,6 @@
return "SCO";
case ESCO_LINK:
return "eSCO";
- case LE_LINK:
- return "LE";
default:
return "UNKNOWN";
}
@@ -33,19 +32,19 @@
static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = to_hci_conn(dev);
+ struct hci_conn *conn = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = to_hci_conn(dev);
+ struct hci_conn *conn = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", batostr(&conn->dst));
}
static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = to_hci_conn(dev);
+ struct hci_conn *conn = dev_get_drvdata(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
conn->features[0], conn->features[1],
@@ -79,8 +78,8 @@
static void bt_link_release(struct device *dev)
{
- struct hci_conn *conn = to_hci_conn(dev);
- kfree(conn);
+ void *data = dev_get_drvdata(dev);
+ kfree(data);
}
static struct device_type bt_link = {
@@ -89,6 +88,23 @@
.release = bt_link_release,
};
+static void add_conn(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
+ struct hci_dev *hdev = conn->hdev;
+
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+
+ dev_set_drvdata(&conn->dev, conn);
+
+ if (device_add(&conn->dev) < 0) {
+ BT_ERR("Failed to register connection device");
+ return;
+ }
+
+ hci_dev_hold(hdev);
+}
+
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
@@ -99,37 +115,9 @@
return !strncmp(dev_name(dev), "rfcomm", 6);
}
-void hci_conn_init_sysfs(struct hci_conn *conn)
+static void del_conn(struct work_struct *work)
{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- conn->dev.type = &bt_link;
- conn->dev.class = bt_class;
- conn->dev.parent = &hdev->dev;
-
- device_initialize(&conn->dev);
-}
-
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
-
- if (device_add(&conn->dev) < 0) {
- BT_ERR("Failed to register connection device");
- return;
- }
-
- hci_dev_hold(hdev);
-}
-
-void hci_conn_del_sysfs(struct hci_conn *conn)
-{
+ struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
struct hci_dev *hdev = conn->hdev;
if (!device_is_registered(&conn->dev))
@@ -151,6 +139,36 @@
hci_dev_put(hdev);
}
+void hci_conn_init_sysfs(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = bt_class;
+ conn->dev.parent = &hdev->dev;
+
+ device_initialize(&conn->dev);
+
+ INIT_WORK(&conn->work_add, add_conn);
+ INIT_WORK(&conn->work_del, del_conn);
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
+{
+ BT_DBG("conn %p", conn);
+
+ queue_work(conn->hdev->workqueue, &conn->work_add);
+}
+
+void hci_conn_del_sysfs(struct hci_conn *conn)
+{
+ BT_DBG("conn %p", conn);
+
+ queue_work(conn->hdev->workqueue, &conn->work_del);
+}
+
static inline char *host_bustostr(int bus)
{
switch (bus) {
@@ -187,19 +205,19 @@
static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
int i;
@@ -212,20 +230,20 @@
static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%.2x%.2x%.2x\n",
hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
}
static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
hdev->features[0], hdev->features[1],
@@ -236,31 +254,31 @@
static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
unsigned int val;
int rv;
@@ -278,13 +296,13 @@
static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
u16 val;
int rv;
@@ -302,13 +320,13 @@
static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct hci_dev *hdev = to_hci_dev(dev);
+ struct hci_dev *hdev = dev_get_drvdata(dev);
u16 val;
int rv;
@@ -368,9 +386,8 @@
static void bt_host_release(struct device *dev)
{
- struct hci_dev *hdev = to_hci_dev(dev);
- kfree(hdev);
- module_put(THIS_MODULE);
+ void *data = dev_get_drvdata(dev);
+ kfree(data);
}
static struct device_type bt_host = {
@@ -382,12 +399,12 @@
static int inquiry_cache_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
- list_for_each_entry(e, &cache->all, all) {
+ for (e = cache->list; e; e = e->next) {
struct inquiry_data *data = &e->data;
seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
batostr(&data->bdaddr),
@@ -398,7 +415,7 @@
data->rssi, data->ssp_mode, e->timestamp);
}
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
return 0;
}
@@ -418,14 +435,19 @@
static int blacklist_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct bdaddr_list *b;
+ struct list_head *l;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
- list_for_each_entry(b, &hdev->blacklist, list)
+ list_for_each(l, &hdev->blacklist) {
+ struct bdaddr_list *b;
+
+ b = list_entry(l, struct bdaddr_list, list);
+
seq_printf(f, "%s\n", batostr(&b->bdaddr));
+ }
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
return 0;
}
@@ -462,14 +484,19 @@
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct bt_uuid *uuid;
+ struct list_head *l;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
- list_for_each_entry(uuid, &hdev->uuids, list)
+ list_for_each(l, &hdev->uuids) {
+ struct bt_uuid *uuid;
+
+ uuid = list_entry(l, struct bt_uuid, list);
+
print_bt_uuid(f, uuid->uuid);
+ }
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
return 0;
}
@@ -486,57 +513,22 @@
.release = single_release,
};
-static int auto_accept_delay_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
-
- hdev->auto_accept_delay = val;
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
-
- *val = hdev->auto_accept_delay;
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
- auto_accept_delay_set, "%llu\n");
-
-void hci_init_sysfs(struct hci_dev *hdev)
-{
- struct device *dev = &hdev->dev;
-
- dev->type = &bt_host;
- dev->class = bt_class;
-
- __module_get(THIS_MODULE);
- device_initialize(dev);
-}
-
-int hci_add_sysfs(struct hci_dev *hdev)
+int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ dev->type = &bt_host;
+ dev->class = bt_class;
dev->parent = hdev->parent;
+
dev_set_name(dev, "%s", hdev->name);
- err = device_add(dev);
+ dev_set_drvdata(dev, hdev);
+
+ err = device_register(dev);
if (err < 0)
return err;
@@ -555,12 +547,10 @@
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
- debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
- &auto_accept_delay_fops);
return 0;
}
-void hci_del_sysfs(struct hci_dev *hdev)
+void hci_unregister_sysfs(struct hci_dev *hdev)
{
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index d478be1..a91c97c 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,6 +21,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -36,8 +37,6 @@
#include <linux/file.h>
#include <linux/init.h>
#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <net/sock.h>
#include <linux/input.h>
@@ -81,20 +80,24 @@
static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
{
struct hidp_session *session;
+ struct list_head *p;
BT_DBG("");
- list_for_each_entry(session, &hidp_session_list, list) {
+ list_for_each(p, &hidp_session_list) {
+ session = list_entry(p, struct hidp_session, list);
if (!bacmp(bdaddr, &session->bdaddr))
return session;
}
-
return NULL;
}
static void __hidp_link_session(struct hidp_session *session)
{
+ __module_get(THIS_MODULE);
list_add(&session->list, &hidp_session_list);
+
+ hci_conn_hold_device(session->conn);
}
static void __hidp_unlink_session(struct hidp_session *session)
@@ -102,6 +105,7 @@
hci_conn_put_device(session->conn);
list_del(&session->list);
+ module_put(THIS_MODULE);
}
static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -250,9 +254,6 @@
BT_DBG("session %p data %p size %d", session, data, size);
- if (atomic_read(&session->terminate))
- return -EIO;
-
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
@@ -318,143 +319,24 @@
return hidp_queue_report(session, buf, rsize);
}
-static int hidp_get_raw_report(struct hid_device *hid,
- unsigned char report_number,
- unsigned char *data, size_t count,
- unsigned char report_type)
-{
- struct hidp_session *session = hid->driver_data;
- struct sk_buff *skb;
- size_t len;
- int numbered_reports = hid->report_enum[report_type].numbered;
- int ret;
-
- switch (report_type) {
- case HID_FEATURE_REPORT:
- report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
- break;
- case HID_INPUT_REPORT:
- report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT;
- break;
- case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT;
- break;
- default:
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&session->report_mutex))
- return -ERESTARTSYS;
-
- /* Set up our wait, and send the report request to the device. */
- session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK;
- session->waiting_report_number = numbered_reports ? report_number : -1;
- set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- data[0] = report_number;
- ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
- if (ret)
- goto err;
-
- /* Wait for the return of the report. The returned report
- gets put in session->report_return. */
- while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
- int res;
-
- res = wait_event_interruptible_timeout(session->report_queue,
- !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags),
- 5*HZ);
- if (res == 0) {
- /* timeout */
- ret = -EIO;
- goto err;
- }
- if (res < 0) {
- /* signal */
- ret = -ERESTARTSYS;
- goto err;
- }
- }
-
- skb = session->report_return;
- if (skb) {
- len = skb->len < count ? skb->len : count;
- memcpy(data, skb->data, len);
-
- kfree_skb(skb);
- session->report_return = NULL;
- } else {
- /* Device returned a HANDSHAKE, indicating protocol error. */
- len = -EIO;
- }
-
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
-
- return len;
-
-err:
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
- return ret;
-}
-
static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
unsigned char report_type)
{
- struct hidp_session *session = hid->driver_data;
- int ret;
-
switch (report_type) {
case HID_FEATURE_REPORT:
report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
break;
case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
+ report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
break;
default:
return -EINVAL;
}
- if (mutex_lock_interruptible(&session->report_mutex))
- return -ERESTARTSYS;
-
- /* Set up our wait, and send the report request to the device. */
- set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
- count);
- if (ret)
- goto err;
-
- /* Wait for the ACK from the device. */
- while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
- int res;
-
- res = wait_event_interruptible_timeout(session->report_queue,
- !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags),
- 10*HZ);
- if (res == 0) {
- /* timeout */
- ret = -EIO;
- goto err;
- }
- if (res < 0) {
- /* signal */
- ret = -ERESTARTSYS;
- goto err;
- }
- }
-
- if (!session->output_report_success) {
- ret = -EIO;
- goto err;
- }
-
- ret = count;
-
-err:
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- mutex_unlock(&session->report_mutex);
- return ret;
+ if (hidp_send_ctrl_message(hid->driver_data, report_type,
+ data, count))
+ return -ENOMEM;
+ return count;
}
static void hidp_idle_timeout(unsigned long arg)
@@ -462,7 +344,7 @@
struct hidp_session *session = (struct hidp_session *) arg;
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ hidp_schedule(session);
}
static void hidp_set_timer(struct hidp_session *session)
@@ -481,21 +363,16 @@
unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
- session->output_report_success = 0; /* default condition */
switch (param) {
case HIDP_HSHK_SUCCESSFUL:
/* FIXME: Call into SET_ GET_ handlers here */
- session->output_report_success = 1;
break;
case HIDP_HSHK_NOT_READY:
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
- if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
- wake_up_interruptible(&session->report_queue);
-
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -514,10 +391,6 @@
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
break;
}
-
- /* Wake up the waiting thread. */
- if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
- wake_up_interruptible(&session->report_queue);
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -530,16 +403,15 @@
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
+ /* Kill session thread */
atomic_inc(&session->terminate);
- wake_up_process(current);
+ hidp_schedule(session);
}
}
-/* Returns true if the passed-in skb should be freed by the caller. */
-static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
+static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
- int done_with_skb = 1;
BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
switch (param) {
@@ -551,6 +423,7 @@
if (session->hid)
hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
+
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -562,27 +435,12 @@
__hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
}
-
- if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
- param == session->waiting_report_type) {
- if (session->waiting_report_number < 0 ||
- session->waiting_report_number == skb->data[0]) {
- /* hidp_get_raw_report() is waiting on this report. */
- session->report_return = skb;
- done_with_skb = 0;
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- wake_up_interruptible(&session->report_queue);
- }
- }
-
- return done_with_skb;
}
static void hidp_recv_ctrl_frame(struct hidp_session *session,
struct sk_buff *skb)
{
unsigned char hdr, type, param;
- int free_skb = 1;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -602,7 +460,7 @@
break;
case HIDP_TRANS_DATA:
- free_skb = hidp_process_data(session, skb, param);
+ hidp_process_data(session, skb, param);
break;
default:
@@ -611,8 +469,7 @@
break;
}
- if (free_skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void hidp_recv_intr_frame(struct hidp_session *session,
@@ -657,24 +514,7 @@
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
-static void hidp_process_intr_transmit(struct hidp_session *session)
-{
- struct sk_buff *skb;
-
- BT_DBG("session %p", session);
-
- while ((skb = skb_dequeue(&session->intr_transmit))) {
- if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->intr_transmit, skb);
- break;
- }
-
- hidp_set_timer(session);
- kfree_skb(skb);
- }
-}
-
-static void hidp_process_ctrl_transmit(struct hidp_session *session)
+static void hidp_process_transmit(struct hidp_session *session)
{
struct sk_buff *skb;
@@ -689,6 +529,16 @@
hidp_set_timer(session);
kfree_skb(skb);
}
+
+ while ((skb = skb_dequeue(&session->intr_transmit))) {
+ if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->intr_transmit, skb);
+ break;
+ }
+
+ hidp_set_timer(session);
+ kfree_skb(skb);
+ }
}
static int hidp_session(void *arg)
@@ -697,35 +547,35 @@
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb;
+ int vendor = 0x0000, product = 0x0000;
wait_queue_t ctrl_wait, intr_wait;
BT_DBG("session %p", session);
- __module_get(THIS_MODULE);
+ if (session->input) {
+ vendor = session->input->id.vendor;
+ product = session->input->id.product;
+ }
+
+ if (session->hid) {
+ vendor = session->hid->vendor;
+ product = session->hid->product;
+ }
+
+ daemonize("khidpd_%04x%04x", vendor, product);
set_user_nice(current, -15);
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
- session->waiting_for_startup = 0;
- wake_up_interruptible(&session->startup_queue);
- set_current_state(TASK_INTERRUPTIBLE);
while (!atomic_read(&session->terminate)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (ctrl_sk->sk_state != BT_CONNECTED ||
intr_sk->sk_state != BT_CONNECTED)
break;
- while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
- skb_orphan(skb);
- if (!skb_linearize(skb))
- hidp_recv_intr_frame(session, skb);
- else
- kfree_skb(skb);
- }
-
- hidp_process_intr_transmit(session);
-
while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
@@ -734,19 +584,22 @@
kfree_skb(skb);
}
- hidp_process_ctrl_transmit(session);
+ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+ skb_orphan(skb);
+ if (!skb_linearize(skb))
+ hidp_recv_intr_frame(session, skb);
+ else
+ kfree_skb(skb);
+ }
+
+ hidp_process_transmit(session);
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- wake_up_interruptible(&session->report_queue);
-
down_write(&hidp_session_sem);
hidp_del_timer(session);
@@ -778,39 +631,35 @@
up_write(&hidp_session_sem);
- kfree(session->rd_data);
kfree(session);
- module_put_and_exit(0);
return 0;
}
-static struct hci_conn *hidp_get_connection(struct hidp_session *session)
+static struct device *hidp_get_device(struct hidp_session *session)
{
bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
- struct hci_conn *conn;
+ struct device *device = NULL;
struct hci_dev *hdev;
hdev = hci_get_route(dst, src);
if (!hdev)
return NULL;
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (conn)
- hci_conn_hold_device(conn);
- hci_dev_unlock(hdev);
+ session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (session->conn)
+ device = &session->conn->dev;
hci_dev_put(hdev);
- return conn;
+ return device;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int i;
+ int err, i;
input = input_allocate_device();
if (!input)
@@ -853,10 +702,16 @@
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
- input->dev.parent = &session->conn->dev;
+ input->dev.parent = hidp_get_device(session);
input->event = hidp_input_event;
+ err = input_register_device(input);
+ if (err < 0) {
+ hci_conn_put_device(session->conn);
+ return err;
+ }
+
return 0;
}
@@ -882,9 +737,6 @@
struct hidp_session *session = hid->driver_data;
struct hid_report *report;
- if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
- return 0;
-
list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
report_list, list)
hidp_send_report(session, report);
@@ -915,8 +767,6 @@
.hidinput_input_event = hidp_hidinput_event,
};
-/* This function sets up the hid device. It does not add it
- to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
@@ -953,14 +803,21 @@
strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
- hid->dev.parent = &session->conn->dev;
+ hid->dev.parent = hidp_get_device(session);
hid->ll_driver = &hidp_hid_driver;
- hid->hid_get_raw_report = hidp_get_raw_report;
hid->hid_output_raw_report = hidp_output_raw_report;
+ err = hid_add_device(hid);
+ if (err < 0)
+ goto failed;
+
return 0;
+failed:
+ hid_destroy_device(hid);
+ session->hid = NULL;
+
fault:
kfree(session->rd_data);
session->rd_data = NULL;
@@ -971,7 +828,6 @@
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
{
struct hidp_session *session, *s;
- int vendor, product;
int err;
BT_DBG("");
@@ -980,28 +836,24 @@
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
+ session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
down_write(&hidp_session_sem);
s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
if (s && s->state == BT_CONNECTED) {
- up_write(&hidp_session_sem);
- return -EEXIST;
- }
-
- session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
- if (!session) {
- up_write(&hidp_session_sem);
- return -ENOMEM;
+ err = -EEXIST;
+ goto failed;
}
bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
- session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
- l2cap_pi(ctrl_sock->sk)->chan->imtu);
- session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
- l2cap_pi(intr_sock->sk)->chan->imtu);
+ session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu);
+ session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu);
BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
@@ -1009,29 +861,17 @@
session->intr_sock = intr_sock;
session->state = BT_CONNECTED;
- session->conn = hidp_get_connection(session);
- if (!session->conn) {
- err = -ENOTCONN;
- goto failed;
- }
-
setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
skb_queue_head_init(&session->ctrl_transmit);
skb_queue_head_init(&session->intr_transmit);
- mutex_init(&session->report_mutex);
- init_waitqueue_head(&session->report_queue);
- init_waitqueue_head(&session->startup_queue);
- session->waiting_for_startup = 1;
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
- __hidp_link_session(session);
-
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
- if (err)
+ if (err && err != -ENODEV)
goto purge;
}
@@ -1041,42 +881,13 @@
goto purge;
}
+ __hidp_link_session(session);
+
hidp_set_timer(session);
- if (session->hid) {
- vendor = session->hid->vendor;
- product = session->hid->product;
- } else if (session->input) {
- vendor = session->input->id.vendor;
- product = session->input->id.product;
- } else {
- vendor = 0x0000;
- product = 0x0000;
- }
-
- session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
- vendor, product);
- if (IS_ERR(session->task)) {
- err = PTR_ERR(session->task);
+ err = kernel_thread(hidp_session, session, CLONE_KERNEL);
+ if (err < 0)
goto unlink;
- }
-
- while (session->waiting_for_startup) {
- wait_event_interruptible(session->startup_queue,
- !session->waiting_for_startup);
- }
-
- if (session->hid)
- err = hid_add_device(session->hid);
- else
- err = input_register_device(session->input);
-
- if (err < 0) {
- atomic_inc(&session->terminate);
- wake_up_process(session->task);
- up_write(&hidp_session_sem);
- return err;
- }
if (session->input) {
hidp_send_ctrl_message(session,
@@ -1093,6 +904,8 @@
unlink:
hidp_del_timer(session);
+ __hidp_unlink_session(session);
+
if (session->input) {
input_unregister_device(session->input);
session->input = NULL;
@@ -1107,14 +920,13 @@
session->rd_data = NULL;
purge:
- __hidp_unlink_session(session);
-
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
failed:
up_write(&hidp_session_sem);
+ input_free_device(session->input);
kfree(session);
return err;
}
@@ -1138,8 +950,13 @@
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
+ /* Wakeup user-space polling for socket errors */
+ session->intr_sock->sk->sk_err = EUNATCH;
+ session->ctrl_sock->sk->sk_err = EUNATCH;
+
+ /* Kill session thread */
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ hidp_schedule(session);
}
} else
err = -ENOENT;
@@ -1150,16 +967,19 @@
int hidp_get_connlist(struct hidp_connlist_req *req)
{
- struct hidp_session *session;
+ struct list_head *p;
int err = 0, n = 0;
BT_DBG("");
down_read(&hidp_session_sem);
- list_for_each_entry(session, &hidp_session_list, list) {
+ list_for_each(p, &hidp_session_list) {
+ struct hidp_session *session;
struct hidp_conninfo ci;
+ session = list_entry(p, struct hidp_session, list);
+
__hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index af1bcc8..28bb9ce 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -80,8 +80,6 @@
#define HIDP_VIRTUAL_CABLE_UNPLUG 0
#define HIDP_BOOT_PROTOCOL_MODE 1
#define HIDP_BLUETOOTH_VENDOR_ID 9
-#define HIDP_WAITING_FOR_RETURN 10
-#define HIDP_WAITING_FOR_SEND_ACK 11
struct hidp_connadd_req {
int ctrl_sock; /* Connected control socket */
@@ -143,7 +141,6 @@
uint intr_mtu;
atomic_t terminate;
- struct task_struct *task;
unsigned char keys[8];
unsigned char leds;
@@ -157,22 +154,9 @@
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
- /* Used in hidp_get_raw_report() */
- int waiting_report_type; /* HIDP_DATA_RTYPE_* */
- int waiting_report_number; /* -1 for not numbered */
- struct mutex report_mutex;
- struct sk_buff *report_return;
- wait_queue_head_t report_queue;
-
- /* Used in hidp_output_raw_report() */
- int output_report_success; /* boolean */
-
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
-
- wait_queue_head_t startup_queue;
- int waiting_for_startup;
};
static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d7..178ac7f 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -160,10 +160,10 @@
{
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
- u32 uci;
+ uint32_t uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
+ if (get_user(cl.cnum, (uint32_t __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -174,7 +174,7 @@
err = hidp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
err = -EFAULT;
return err;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 0939c72..7719b82 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1,9 +1,8 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
- Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -47,340 +46,536 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/crc16.h>
+#include <linux/math64.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/amp.h>
bool disable_ertm;
+bool enable_reconfig;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
-static LIST_HEAD(chan_list);
-static DEFINE_RWLOCK(chan_list_lock);
+struct workqueue_struct *_l2cap_wq;
+
+struct bt_sock_list l2cap_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
+static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u16 result);
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid, u16 result);
+
+static void l2cap_amp_move_setup(struct sock *sk);
+static void l2cap_amp_move_success(struct sock *sk);
+static void l2cap_amp_move_revert(struct sock *sk);
+
+static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
-static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
- void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
-static void l2cap_send_disconn_req(struct l2cap_conn *conn,
- struct l2cap_chan *chan, int err);
+static int l2cap_answer_move_poll(struct sock *sk);
+static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
+static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
+static void l2cap_chan_ready(struct sock *sk);
+static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process);
+static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l);
+static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to);
/* ---- L2CAP channels ---- */
-
-static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
+static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
{
- struct l2cap_chan *c;
-
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->dcid == cid)
- return c;
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->dcid == cid)
+ break;
}
- return NULL;
+ return s;
}
-static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
+/* Find channel with given DCID.
+ * Returns locked socket */
+static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
+ u16 cid)
{
- struct l2cap_chan *c;
+ struct sock *s;
+ read_lock(&l->lock);
+ s = __l2cap_get_chan_by_dcid(l, cid);
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l->lock);
+ return s;
+}
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->scid == cid)
- return c;
+static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
+{
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->scid == cid)
+ break;
}
- return NULL;
+ return s;
}
/* Find channel with given SCID.
* Returns locked socket */
-static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
+static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_scid(conn, cid);
- mutex_unlock(&conn->chan_lock);
-
- return c;
+ struct sock *s;
+ read_lock(&l->lock);
+ s = __l2cap_get_chan_by_scid(l, cid);
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l->lock);
+ return s;
}
-static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
+static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
{
- struct l2cap_chan *c;
-
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->ident == ident)
- return c;
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->ident == ident)
+ break;
}
+ return s;
+}
+
+static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
+{
+ struct sock *s;
+ read_lock(&l->lock);
+ s = __l2cap_get_chan_by_ident(l, ident);
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l->lock);
+ return s;
+}
+
+static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
+ u16 seq)
+{
+ struct sk_buff *skb;
+
+ skb_queue_walk(head, skb) {
+ if (bt_cb(skb)->control.txseq == seq)
+ return skb;
+ }
+
return NULL;
}
-static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
{
- struct l2cap_chan *c;
+ u16 allocSize = 1;
+ int err = 0;
+ int i;
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- mutex_unlock(&conn->chan_lock);
+ /* Actual allocated size must be a power of 2 */
+ while (allocSize && allocSize <= size)
+ allocSize <<= 1;
+ if (!allocSize)
+ return -ENOMEM;
- return c;
-}
+ seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
+ if (!seq_list->list)
+ return -ENOMEM;
-static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
-{
- struct l2cap_chan *c;
+ seq_list->size = allocSize;
+ seq_list->mask = allocSize - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < allocSize; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
- list_for_each_entry(c, &chan_list, global_l) {
- if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
- return c;
- }
- return NULL;
-}
-
-int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
-{
- int err;
-
- write_lock(&chan_list_lock);
-
- if (psm && __l2cap_global_chan_by_addr(psm, src)) {
- err = -EADDRINUSE;
- goto done;
- }
-
- if (psm) {
- chan->psm = psm;
- chan->sport = psm;
- err = 0;
- } else {
- u16 p;
-
- err = -EINVAL;
- for (p = 0x1001; p < 0x1100; p += 2)
- if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
- chan->psm = cpu_to_le16(p);
- chan->sport = cpu_to_le16(p);
- err = 0;
- break;
- }
- }
-
-done:
- write_unlock(&chan_list_lock);
return err;
}
-int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
{
- write_lock(&chan_list_lock);
-
- chan->scid = scid;
-
- write_unlock(&chan_list_lock);
-
- return 0;
+ kfree(seq_list->list);
}
-static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
+ /* In case someone tries to pop the head of an empty list */
+ BT_DBG("List empty");
+ return L2CAP_SEQ_LIST_CLEAR;
+ } else if (seq_list->head == seq) {
+ /* Head can be removed quickly */
+ BT_DBG("Remove head");
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+ } else {
+ /* Non-head item must be found first */
+ u16 prev = seq_list->head;
+ BT_DBG("Find and remove");
+ while (seq_list->list[prev & mask] != seq) {
+ prev = seq_list->list[prev & mask];
+ if (prev == L2CAP_SEQ_LIST_TAIL) {
+ BT_DBG("seq %d not in list", (int) seq);
+ return L2CAP_SEQ_LIST_CLEAR;
+ }
+ }
+
+ seq_list->list[prev & mask] = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+ if (seq_list->tail == seq)
+ seq_list->tail = prev;
+ }
+ return seq;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ return l2cap_seq_list_remove(seq_list, seq_list->head);
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
+ u16 i;
+ for (i = 0; i < seq_list->size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
+
+ if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+ }
+}
+
+static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
+{
+ u16 packed;
+
+ packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
+ L2CAP_CTRL_REQSEQ;
+ packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
+ L2CAP_CTRL_FINAL;
+
+ if (control->frame_type == 's') {
+ packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
+ L2CAP_CTRL_POLL;
+ packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
+ L2CAP_CTRL_SUPERVISE;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
+ L2CAP_CTRL_SAR;
+ packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
+ L2CAP_CTRL_TXSEQ;
+ }
+
+ return packed;
+}
+
+static void __get_enhanced_control(u16 enhanced,
+ struct bt_l2cap_control *control)
+{
+ control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
+ L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enhanced & L2CAP_CTRL_FINAL) >>
+ L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
+ control->frame_type = 's';
+ control->poll = (enhanced & L2CAP_CTRL_POLL) >>
+ L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
+ L2CAP_CTRL_SUPERVISE_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ control->frame_type = 'i';
+ control->sar = (enhanced & L2CAP_CTRL_SAR) >>
+ L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
+ L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static u32 __pack_extended_control(struct bt_l2cap_control *control)
+{
+ u32 packed;
+
+ packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
+ L2CAP_EXT_CTRL_REQSEQ;
+ packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
+ L2CAP_EXT_CTRL_FINAL;
+
+ if (control->frame_type == 's') {
+ packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
+ L2CAP_EXT_CTRL_POLL;
+ packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
+ L2CAP_EXT_CTRL_SUPERVISE;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
+ L2CAP_EXT_CTRL_SAR;
+ packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
+ L2CAP_EXT_CTRL_TXSEQ;
+ }
+
+ return packed;
+}
+
+static void __get_extended_control(u32 extended,
+ struct bt_l2cap_control *control)
+{
+ control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
+ L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
+ L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ control->frame_type = 's';
+ control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
+ L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
+ L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ control->frame_type = 'i';
+ control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
+ L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
+ L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ __cancel_delayed_work(&pi->ack_work);
+}
+
+static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
+ if (!delayed_work_pending(&pi->ack_work)) {
+ queue_delayed_work(_l2cap_wq, &pi->ack_work,
+ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+ }
+}
+
+static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ __cancel_delayed_work(&pi->retrans_work);
+}
+
+static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
+ __cancel_delayed_work(&pi->retrans_work);
+ queue_delayed_work(_l2cap_wq, &pi->retrans_work,
+ msecs_to_jiffies(pi->retrans_timeout));
+ }
+}
+
+static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ __cancel_delayed_work(&pi->monitor_work);
+}
+
+static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ l2cap_ertm_stop_retrans_timer(pi);
+ __cancel_delayed_work(&pi->monitor_work);
+ if (pi->monitor_timeout) {
+ queue_delayed_work(_l2cap_wq, &pi->monitor_work,
+ msecs_to_jiffies(pi->monitor_timeout));
+ }
+}
+
+static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
{
u16 cid = L2CAP_CID_DYN_START;
for (; cid < L2CAP_CID_DYN_END; cid++) {
- if (!__l2cap_get_chan_by_scid(conn, cid))
+ if (!__l2cap_get_chan_by_scid(l, cid))
return cid;
}
return 0;
}
-static void __l2cap_state_change(struct l2cap_chan *chan, int state)
+static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
{
- BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
- state_to_string(state));
+ sock_hold(sk);
- chan->state = state;
- chan->ops->state_change(chan->data, state);
+ if (l->head)
+ l2cap_pi(l->head)->prev_c = sk;
+
+ l2cap_pi(sk)->next_c = l->head;
+ l2cap_pi(sk)->prev_c = NULL;
+ l->head = sk;
}
-static void l2cap_state_change(struct l2cap_chan *chan, int state)
+static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
{
- struct sock *sk = chan->sk;
+ struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
- lock_sock(sk);
- __l2cap_state_change(chan, state);
- release_sock(sk);
+ write_lock_bh(&l->lock);
+ if (sk == l->head)
+ l->head = next;
+
+ if (next)
+ l2cap_pi(next)->prev_c = prev;
+ if (prev)
+ l2cap_pi(prev)->next_c = next;
+ write_unlock_bh(&l->lock);
+
+ __sock_put(sk);
}
-static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
{
- struct sock *sk = chan->sk;
+ struct l2cap_chan_list *l = &conn->chan_list;
- sk->sk_err = err;
-}
-
-static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
-{
- struct sock *sk = chan->sk;
-
- lock_sock(sk);
- __l2cap_chan_set_err(chan, err);
- release_sock(sk);
-}
-
-static void l2cap_chan_timeout(struct work_struct *work)
-{
- struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- chan_timer.work);
- struct l2cap_conn *conn = chan->conn;
- int reason;
-
- BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
-
- mutex_lock(&conn->chan_lock);
- l2cap_chan_lock(chan);
-
- if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
- reason = ECONNREFUSED;
- else if (chan->state == BT_CONNECT &&
- chan->sec_level != BT_SECURITY_SDP)
- reason = ECONNREFUSED;
- else
- reason = ETIMEDOUT;
-
- l2cap_chan_close(chan, reason);
-
- l2cap_chan_unlock(chan);
-
- chan->ops->close(chan->data);
- mutex_unlock(&conn->chan_lock);
-
- l2cap_chan_put(chan);
-}
-
-struct l2cap_chan *l2cap_chan_create(struct sock *sk)
-{
- struct l2cap_chan *chan;
-
- chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
- if (!chan)
- return NULL;
-
- mutex_init(&chan->lock);
-
- chan->sk = sk;
-
- write_lock(&chan_list_lock);
- list_add(&chan->global_l, &chan_list);
- write_unlock(&chan_list_lock);
-
- INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
-
- chan->state = BT_OPEN;
-
- atomic_set(&chan->refcnt, 1);
-
- BT_DBG("sk %p chan %p", sk, chan);
-
- return chan;
-}
-
-void l2cap_chan_destroy(struct l2cap_chan *chan)
-{
- write_lock(&chan_list_lock);
- list_del(&chan->global_l);
- write_unlock(&chan_list_lock);
-
- l2cap_chan_put(chan);
-}
-
-void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
-{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- chan->psm, chan->dcid);
+ l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
- conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
+ conn->disc_reason = 0x13;
- chan->conn = conn;
+ l2cap_pi(sk)->conn = conn;
- switch (chan->chan_type) {
- case L2CAP_CHAN_CONN_ORIENTED:
+ if (!l2cap_pi(sk)->fixed_channel &&
+ (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
if (conn->hcon->type == LE_LINK) {
/* LE connection */
- chan->omtu = L2CAP_LE_DEFAULT_MTU;
- chan->scid = L2CAP_CID_LE_DATA;
- chan->dcid = L2CAP_CID_LE_DATA;
+ if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
+ if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
+
+ l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
+ l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
} else {
/* Alloc CID for connection-oriented socket */
- chan->scid = l2cap_alloc_cid(conn);
- chan->omtu = L2CAP_DEFAULT_MTU;
+ l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
}
- break;
-
- case L2CAP_CHAN_CONN_LESS:
+ } else if (sk->sk_type == SOCK_DGRAM) {
/* Connectionless socket */
- chan->scid = L2CAP_CID_CONN_LESS;
- chan->dcid = L2CAP_CID_CONN_LESS;
- chan->omtu = L2CAP_DEFAULT_MTU;
- break;
-
- default:
+ l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
+ l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+ } else if (sk->sk_type == SOCK_RAW) {
/* Raw socket can send/recv signalling messages only */
- chan->scid = L2CAP_CID_SIGNALING;
- chan->dcid = L2CAP_CID_SIGNALING;
- chan->omtu = L2CAP_DEFAULT_MTU;
+ l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
+ l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
}
- chan->local_id = L2CAP_BESTEFFORT_ID;
- chan->local_stype = L2CAP_SERV_BESTEFFORT;
- chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
- chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
- chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
- chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ if (l2cap_get_smallest_flushto(l) > l2cap_pi(sk)->flush_to) {
+ /*if flush timeout of the channel is lesser than existing */
+ l2cap_set_acl_flushto(conn->hcon, l2cap_pi(sk)->flush_to);
+ }
+ /* Otherwise, do not set scid/dcid/omtu. These will be set up
+ * by l2cap_fixed_channel_config()
+ */
- l2cap_chan_hold(chan);
-
- list_add(&chan->list, &conn->chan_l);
+ __l2cap_chan_link(l, sk);
}
-void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+/* Delete channel.
+ * Must be called on the locked socket. */
+void l2cap_chan_del(struct sock *sk, int err)
{
- mutex_lock(&conn->chan_lock);
- __l2cap_chan_add(conn, chan);
- mutex_unlock(&conn->chan_lock);
-}
-
-static void l2cap_chan_del(struct l2cap_chan *chan, int err)
-{
- struct sock *sk = chan->sk;
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sock *parent = bt_sk(sk)->parent;
- __clear_chan_timer(chan);
+ l2cap_sock_clear_timer(sk);
- BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
+ BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
- /* Delete from channel list */
- list_del(&chan->list);
+ struct l2cap_chan_list *l = &conn->chan_list;
+ /* Unlink from channel list */
+ l2cap_chan_unlink(l, sk);
+ l2cap_pi(sk)->conn = NULL;
+ if (!l2cap_pi(sk)->fixed_channel)
+ hci_conn_put(conn->hcon);
- l2cap_chan_put(chan);
-
- chan->conn = NULL;
- hci_conn_put(conn->hcon);
+ read_lock(&l->lock);
+ if (l2cap_pi(sk)->flush_to < l2cap_get_smallest_flushto(l))
+ l2cap_set_acl_flushto(conn->hcon,
+ l2cap_get_smallest_flushto(l));
+ read_unlock(&l->lock);
}
- lock_sock(sk);
+ if (l2cap_pi(sk)->ampchan) {
+ struct hci_chan *ampchan = l2cap_pi(sk)->ampchan;
+ struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
+ l2cap_pi(sk)->ampchan = NULL;
+ l2cap_pi(sk)->ampcon = NULL;
+ l2cap_pi(sk)->amp_id = 0;
+ if (hci_chan_put(ampchan))
+ ampcon->l2cap_data = NULL;
+ else
+ l2cap_deaggregate(ampchan, l2cap_pi(sk));
+ }
- __l2cap_state_change(chan, BT_CLOSED);
+ sk->sk_state = BT_CLOSED;
sock_set_flag(sk, SOCK_ZAPPED);
if (err)
- __l2cap_chan_set_err(chan, err);
+ sk->sk_err = err;
if (parent) {
bt_accept_unlink(sk);
@@ -388,118 +583,25 @@
} else
sk->sk_state_change(sk);
- release_sock(sk);
+ sk->sk_send_head = NULL;
+ skb_queue_purge(TX_QUEUE(sk));
- if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
- test_bit(CONF_INPUT_DONE, &chan->conf_state)))
- return;
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+ if (l2cap_pi(sk)->sdu)
+ kfree_skb(l2cap_pi(sk)->sdu);
- skb_queue_purge(&chan->tx_q);
+ skb_queue_purge(SREJ_QUEUE(sk));
- if (chan->mode == L2CAP_MODE_ERTM) {
- struct srej_list *l, *tmp;
-
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
-
- skb_queue_purge(&chan->srej_q);
-
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- list_del(&l->list);
- kfree(l);
- }
+ __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
}
}
-static void l2cap_chan_cleanup_listen(struct sock *parent)
+static inline u8 l2cap_get_auth_type(struct sock *sk)
{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL))) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-
- l2cap_chan_lock(chan);
- __clear_chan_timer(chan);
- l2cap_chan_close(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
-
- chan->ops->close(chan->data);
- }
-}
-
-void l2cap_chan_close(struct l2cap_chan *chan, int reason)
-{
- struct l2cap_conn *conn = chan->conn;
- struct sock *sk = chan->sk;
-
- BT_DBG("chan %p state %s sk %p", chan,
- state_to_string(chan->state), sk);
-
- switch (chan->state) {
- case BT_LISTEN:
- lock_sock(sk);
- l2cap_chan_cleanup_listen(sk);
-
- __l2cap_state_change(chan, BT_CLOSED);
- sock_set_flag(sk, SOCK_ZAPPED);
- release_sock(sk);
- break;
-
- case BT_CONNECTED:
- case BT_CONFIG:
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
- conn->hcon->type == ACL_LINK) {
- __clear_chan_timer(chan);
- __set_chan_timer(chan, sk->sk_sndtimeo);
- l2cap_send_disconn_req(conn, chan, reason);
- } else
- l2cap_chan_del(chan, reason);
- break;
-
- case BT_CONNECT2:
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
- conn->hcon->type == ACL_LINK) {
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (bt_sk(sk)->defer_setup)
- result = L2CAP_CR_SEC_BLOCK;
- else
- result = L2CAP_CR_BAD_PSM;
- l2cap_state_change(chan, BT_DISCONN);
-
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
- sizeof(rsp), &rsp);
- }
-
- l2cap_chan_del(chan, reason);
- break;
-
- case BT_CONNECT:
- case BT_DISCONN:
- l2cap_chan_del(chan, reason);
- break;
-
- default:
- lock_sock(sk);
- sock_set_flag(sk, SOCK_ZAPPED);
- release_sock(sk);
- break;
- }
-}
-
-static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
-{
- if (chan->chan_type == L2CAP_CHAN_RAW) {
- switch (chan->sec_level) {
+ if (sk->sk_type == SOCK_RAW) {
+ switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_DEDICATED_BONDING_MITM;
case BT_SECURITY_MEDIUM:
@@ -507,16 +609,16 @@
default:
return HCI_AT_NO_BONDING;
}
- } else if (chan->psm == cpu_to_le16(0x0001)) {
- if (chan->sec_level == BT_SECURITY_LOW)
- chan->sec_level = BT_SECURITY_SDP;
+ } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- if (chan->sec_level == BT_SECURITY_HIGH)
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
return HCI_AT_NO_BONDING_MITM;
else
return HCI_AT_NO_BONDING;
} else {
- switch (chan->sec_level) {
+ switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_GENERAL_BONDING_MITM;
case BT_SECURITY_MEDIUM:
@@ -528,17 +630,18 @@
}
/* Service level security */
-int l2cap_chan_check_security(struct l2cap_chan *chan)
+static inline int l2cap_check_security(struct sock *sk)
{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
__u8 auth_type;
- auth_type = l2cap_get_auth_type(chan);
+ auth_type = l2cap_get_auth_type(sk);
- return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
+ return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
+ auth_type);
}
-static u8 l2cap_get_ident(struct l2cap_conn *conn)
+u8 l2cap_get_ident(struct l2cap_conn *conn)
{
u8 id;
@@ -548,19 +651,46 @@
* 200 - 254 are used by utilities like l2ping, etc.
*/
- spin_lock(&conn->lock);
+ spin_lock_bh(&conn->lock);
if (++conn->tx_ident > 128)
conn->tx_ident = 1;
id = conn->tx_ident;
- spin_unlock(&conn->lock);
+ spin_unlock_bh(&conn->lock);
return id;
}
-static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static void apply_fcs(struct sk_buff *skb)
+{
+ size_t len;
+ u16 partial_crc;
+ struct sk_buff *iter;
+ struct sk_buff *final_frag = skb;
+
+ if (skb_has_frag_list(skb))
+ len = skb_headlen(skb);
+ else
+ len = skb->len - L2CAP_FCS_SIZE;
+
+ partial_crc = crc16(0, (u8 *) skb->data, len);
+
+ skb_walk_frags(skb, iter) {
+ len = iter->len;
+ if (!iter->next)
+ len -= L2CAP_FCS_SIZE;
+
+ partial_crc = crc16(partial_crc, iter->data, len);
+ final_frag = iter;
+ }
+
+ put_unaligned_le16(partial_crc,
+ final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
+}
+
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
u8 flags;
@@ -575,123 +705,60 @@
else
flags = ACL_START;
- bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
- skb->priority = HCI_PRIO_MAX;
+ bt_cb(skb)->force_active = 1;
- hci_send_acl(conn->hchan, skb, flags);
+ hci_send_acl(conn->hcon, NULL, skb, flags);
}
-static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+static inline int __l2cap_no_conn_pending(struct sock *sk)
{
- struct hci_conn *hcon = chan->conn->hcon;
- u16 flags;
-
- BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
- skb->priority);
-
- if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
- lmp_no_flush_capable(hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
- hci_send_acl(chan->conn->hchan, skb, flags);
+ return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
+static void l2cap_send_conn_req(struct sock *sk)
{
- struct sk_buff *skb;
- struct l2cap_hdr *lh;
- struct l2cap_conn *conn = chan->conn;
- int count, hlen;
-
- if (chan->state != BT_CONNECTED)
- return;
-
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hlen = L2CAP_EXT_HDR_SIZE;
- else
- hlen = L2CAP_ENH_HDR_SIZE;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += L2CAP_FCS_SIZE;
-
- BT_DBG("chan %p, control 0x%8.8x", chan, control);
-
- count = min_t(unsigned int, conn->mtu, hlen);
-
- control |= __set_sframe(chan);
-
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
-
- if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= __set_ctrl_poll(chan);
-
- skb = bt_skb_alloc(count, GFP_ATOMIC);
- if (!skb)
- return;
-
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
-
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
- }
-
- skb->priority = HCI_PRIO_MAX;
- l2cap_do_send(chan, skb);
-}
-
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
-{
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- } else
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
-
- control |= __set_reqseq(chan, chan->buffer_seq);
-
- l2cap_send_sframe(chan, control);
-}
-
-static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
-{
- return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
-}
-
-static void l2cap_send_conn_req(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
struct l2cap_conn_req req;
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ req.psm = l2cap_pi(sk)->psm;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
+ l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
- chan->ident = l2cap_get_ident(conn);
-
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
-
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_REQ, sizeof(req), &req);
}
-static void l2cap_do_start(struct l2cap_chan *chan)
+static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_create_chan_req req;
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ req.psm = l2cap_pi(sk)->psm;
+ req.amp_id = amp_id;
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
+ l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
+
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
+}
+
+static void l2cap_do_start(struct sock *sk)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_chan_check_security(chan) &&
- __l2cap_no_conn_pending(chan))
- l2cap_send_conn_req(chan);
+ if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+
+ if (l2cap_pi(sk)->amp_pref ==
+ BT_AMP_POLICY_PREFER_AMP &&
+ conn->fc_mask & L2CAP_FC_A2MP)
+ amp_create_physical(conn, sk);
+ else
+ l2cap_send_conn_req(sk);
+ }
} else {
struct l2cap_info_req req;
req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -699,7 +766,8 @@
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
+ mod_timer(&conn->info_timer, jiffies +
+ msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
l2cap_send_cmd(conn, conn->info_ident,
L2CAP_INFO_REQ, sizeof(req), &req);
@@ -722,75 +790,90 @@
}
}
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
{
- struct sock *sk = chan->sk;
struct l2cap_disconn_req req;
if (!conn)
return;
- if (chan->mode == L2CAP_MODE_ERTM) {
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
+ sk->sk_send_head = NULL;
+ skb_queue_purge(TX_QUEUE(sk));
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+ skb_queue_purge(SREJ_QUEUE(sk));
+
+ __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
}
- req.dcid = cpu_to_le16(chan->dcid);
- req.scid = cpu_to_le16(chan->scid);
+ req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_DISCONN_REQ, sizeof(req), &req);
- lock_sock(sk);
- __l2cap_state_change(chan, BT_DISCONN);
- __l2cap_chan_set_err(chan, err);
- release_sock(sk);
+ sk->sk_state = BT_DISCONN;
+ sk->sk_err = err;
}
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan, *tmp;
+ struct l2cap_chan_list *l = &conn->chan_list;
+ struct sock_del_list del, *tmp1, *tmp2;
+ struct sock *sk;
BT_DBG("conn %p", conn);
- mutex_lock(&conn->chan_lock);
+ INIT_LIST_HEAD(&del.list);
- list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
+ read_lock(&l->lock);
- l2cap_chan_lock(chan);
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ bh_lock_sock(sk);
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- l2cap_chan_unlock(chan);
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
+ bh_unlock_sock(sk);
continue;
}
- if (chan->state == BT_CONNECT) {
- if (!l2cap_chan_check_security(chan) ||
- !__l2cap_no_conn_pending(chan)) {
- l2cap_chan_unlock(chan);
+ if (sk->sk_state == BT_CONNECT) {
+ if (!l2cap_check_security(sk) ||
+ !__l2cap_no_conn_pending(sk)) {
+ bh_unlock_sock(sk);
continue;
}
- if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
- && test_bit(CONF_STATE2_DEVICE,
- &chan->conf_state)) {
- l2cap_chan_close(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
+ if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
+ conn->feat_mask)
+ && l2cap_pi(sk)->conf_state &
+ L2CAP_CONF_STATE2_DEVICE) {
+ tmp1 = kzalloc(sizeof(struct sock_del_list),
+ GFP_ATOMIC);
+ tmp1->sk = sk;
+ list_add_tail(&tmp1->list, &del.list);
+ bh_unlock_sock(sk);
continue;
}
- l2cap_send_conn_req(chan);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
- } else if (chan->state == BT_CONNECT2) {
+ if (l2cap_pi(sk)->amp_pref ==
+ BT_AMP_POLICY_PREFER_AMP &&
+ conn->fc_mask & L2CAP_FC_A2MP)
+ amp_create_physical(conn, sk);
+ else
+ l2cap_send_conn_req(sk);
+
+ } else if (sk->sk_state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
char buf[128];
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- if (l2cap_chan_check_security(chan)) {
- lock_sock(sk);
+ if (l2cap_check_security(sk)) {
if (bt_sk(sk)->defer_setup) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -799,86 +882,133 @@
parent->sk_data_ready(parent, 0);
} else {
- __l2cap_state_change(chan, BT_CONFIG);
+ sk->sk_state = BT_CONFIG;
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
}
- release_sock(sk);
} else {
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
}
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
- sizeof(rsp), &rsp);
-
- if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
- rsp.result != L2CAP_CR_SUCCESS) {
- l2cap_chan_unlock(chan);
+ if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
+ l2cap_pi(sk)->amp_id) {
+ amp_accept_physical(conn,
+ l2cap_pi(sk)->amp_id, sk);
+ bh_unlock_sock(sk);
continue;
}
- set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
+ rsp.result != L2CAP_CR_SUCCESS) {
+ bh_unlock_sock(sk);
+ continue;
+ }
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
- l2cap_chan_unlock(chan);
+ bh_unlock_sock(sk);
}
- mutex_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
+
+ list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
+ bh_lock_sock(tmp1->sk);
+ __l2cap_sock_close(tmp1->sk, ECONNRESET);
+ bh_unlock_sock(tmp1->sk);
+ list_del(&tmp1->list);
+ kfree(tmp1);
+ }
+}
+
+/* Find socket with fixed cid with given source and destination bdaddrs.
+ * Direction of the req/rsp must match.
+ */
+struct sock *l2cap_find_sock_by_fixed_cid_and_dir(__le16 cid, bdaddr_t *src,
+ bdaddr_t *dst, int incoming)
+{
+ struct sock *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
+
+ BT_DBG(" %d", incoming);
+
+ read_lock(&l2cap_sk_list.lock);
+
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+
+ if (incoming && !l2cap_pi(sk)->incoming)
+ continue;
+
+ if (!incoming && l2cap_pi(sk)->incoming)
+ continue;
+
+ if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
+ /* Exact match. */
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
+
+ /* Closest match */
+ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ sk1 = sk;
+ }
+ }
+
+ read_unlock(&l2cap_sk_list.lock);
+
+ return node ? sk : sk1;
}
/* Find socket with cid and source bdaddr.
* Returns closest match, locked.
*/
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
{
- struct l2cap_chan *c, *c1 = NULL;
+ struct sock *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
- read_lock(&chan_list_lock);
+ read_lock(&l2cap_sk_list.lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
- if (state && c->state != state)
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (state && sk->sk_state != state)
continue;
- if (c->scid == cid) {
+ if (l2cap_pi(sk)->scid == cid) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
- read_unlock(&chan_list_lock);
- return c;
- }
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- c1 = c;
+ sk1 = sk;
}
}
- read_unlock(&chan_list_lock);
+ read_unlock(&l2cap_sk_list.lock);
- return c1;
+ return node ? sk : sk1;
}
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
- struct sock *parent, *sk;
- struct l2cap_chan *chan, *pchan;
+ struct l2cap_chan_list *list = &conn->chan_list;
+ struct sock *parent, *uninitialized_var(sk);
BT_DBG("");
/* Check if we have socket listening on cid */
- pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+ parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
conn->src);
- if (!pchan)
+ if (!parent)
return;
- parent = pchan->sk;
-
- lock_sock(parent);
+ bh_lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
@@ -886,113 +1016,101 @@
goto clean;
}
- chan = pchan->ops->new_connection(pchan->data);
- if (!chan)
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+ if (!sk)
goto clean;
- sk = chan->sk;
+ write_lock_bh(&list->lock);
hci_conn_hold(conn->hcon);
+ l2cap_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
+ l2cap_pi(sk)->incoming = 1;
bt_accept_enqueue(parent, sk);
- l2cap_chan_add(conn, chan);
+ __l2cap_chan_add(conn, sk);
- __set_chan_timer(chan, sk->sk_sndtimeo);
-
- __l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state = BT_CONNECTED;
parent->sk_data_ready(parent, 0);
+ write_unlock_bh(&list->lock);
+
clean:
- release_sock(parent);
-}
-
-static void l2cap_chan_ready(struct l2cap_chan *chan)
-{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- chan->conf_state = 0;
- __clear_chan_timer(chan);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
-
- release_sock(sk);
+ bh_unlock_sock(parent);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan_list *l = &conn->chan_list;
+ struct sock *sk;
BT_DBG("conn %p", conn);
if (!conn->hcon->out && conn->hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
- if (conn->hcon->out && conn->hcon->type == LE_LINK)
- smp_conn_security(conn, conn->hcon->pending_sec_level);
+ read_lock(&l->lock);
- mutex_lock(&conn->chan_lock);
+ if (l->head) {
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ bh_lock_sock(sk);
- list_for_each_entry(chan, &conn->chan_l, list) {
+ if (conn->hcon->type == LE_LINK) {
+ u8 sec_level = l2cap_pi(sk)->sec_level;
+ u8 pending_sec = conn->hcon->pending_sec_level;
- l2cap_chan_lock(chan);
+ if (pending_sec > sec_level)
+ sec_level = pending_sec;
- if (conn->hcon->type == LE_LINK) {
- if (smp_conn_security(conn, chan->sec_level))
- l2cap_chan_ready(chan);
+ if (smp_conn_security(conn, sec_level))
+ l2cap_chan_ready(sk);
- } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- struct sock *sk = chan->sk;
- __clear_chan_timer(chan);
- lock_sock(sk);
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
- release_sock(sk);
+ hci_conn_put(conn->hcon);
- } else if (chan->state == BT_CONNECT)
- l2cap_do_start(chan);
+ } else if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
+ l2cap_sock_clear_timer(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else if (sk->sk_state == BT_CONNECT)
+ l2cap_do_start(sk);
- l2cap_chan_unlock(chan);
+ bh_unlock_sock(sk);
+ }
+ } else if (conn->hcon->type == LE_LINK) {
+ smp_conn_security(conn, BT_SECURITY_HIGH);
}
- mutex_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
+
+ if (conn->hcon->out && conn->hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
}
/* Notify sockets that we cannot guaranty reliability anymore */
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan_list *l = &conn->chan_list;
+ struct sock *sk;
BT_DBG("conn %p", conn);
- mutex_lock(&conn->chan_lock);
+ read_lock(&l->lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
- if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
- __l2cap_chan_set_err(chan, err);
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ if (l2cap_pi(sk)->force_reliable)
+ sk->sk_err = err;
}
- mutex_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
}
-static void l2cap_info_timeout(struct work_struct *work)
+static void l2cap_info_timeout(unsigned long arg)
{
- struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
- info_timer.work);
+ struct l2cap_conn *conn = (void *) arg;
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -1000,78 +1118,21 @@
l2cap_conn_start(conn);
}
-static void l2cap_conn_del(struct hci_conn *hcon, int err)
-{
- struct l2cap_conn *conn = hcon->l2cap_data;
- struct l2cap_chan *chan, *l;
-
- if (!conn)
- return;
-
- BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
-
- kfree_skb(conn->rx_skb);
-
- mutex_lock(&conn->chan_lock);
-
- /* Kill channels */
- list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
- l2cap_chan_lock(chan);
-
- l2cap_chan_del(chan, err);
-
- l2cap_chan_unlock(chan);
-
- chan->ops->close(chan->data);
- }
-
- mutex_unlock(&conn->chan_lock);
-
- hci_chan_del(conn->hchan);
-
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
- cancel_delayed_work_sync(&conn->info_timer);
-
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
- cancel_delayed_work_sync(&conn->security_timer);
- smp_chan_destroy(conn);
- }
-
- hcon->l2cap_data = NULL;
- kfree(conn);
-}
-
-static void security_timeout(struct work_struct *work)
-{
- struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
- security_timer.work);
-
- l2cap_conn_del(conn->hcon, ETIMEDOUT);
-}
-
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn = hcon->l2cap_data;
- struct hci_chan *hchan;
if (conn || status)
return conn;
- hchan = hci_chan_create(hcon);
- if (!hchan)
- return NULL;
-
conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
- if (!conn) {
- hci_chan_del(hchan);
+ if (!conn)
return NULL;
- }
hcon->l2cap_data = conn;
conn->hcon = hcon;
- conn->hchan = hchan;
- BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+ BT_DBG("hcon %p conn %p", hcon, conn);
if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
conn->mtu = hcon->hdev->le_mtu;
@@ -1084,59 +1145,110 @@
conn->feat_mask = 0;
spin_lock_init(&conn->lock);
- mutex_init(&conn->chan_lock);
-
- INIT_LIST_HEAD(&conn->chan_l);
+ rwlock_init(&conn->chan_list.lock);
if (hcon->type == LE_LINK)
- INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
+ setup_timer(&hcon->smp_timer, smp_timeout,
+ (unsigned long) conn);
else
- INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
+ setup_timer(&conn->info_timer, l2cap_info_timeout,
+ (unsigned long) conn);
- conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
+ conn->disc_reason = 0x13;
return conn;
}
+static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct sock *sk;
+ struct sock *next;
+
+ if (!conn)
+ return;
+
+ BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+ if ((conn->hcon == hcon) && (conn->rx_skb))
+ kfree_skb(conn->rx_skb);
+
+ BT_DBG("conn->hcon %p", conn->hcon);
+
+ /* Kill channels */
+ for (sk = conn->chan_list.head; sk; ) {
+ BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
+ if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
+ next = l2cap_pi(sk)->next_c;
+ if (is_process)
+ lock_sock(sk);
+ else
+ bh_lock_sock(sk);
+ l2cap_chan_del(sk, err);
+ if (is_process)
+ release_sock(sk);
+ else
+ bh_unlock_sock(sk);
+ l2cap_sock_kill(sk);
+ sk = next;
+ } else
+ sk = l2cap_pi(sk)->next_c;
+ }
+
+ if (conn->hcon == hcon) {
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+ del_timer_sync(&conn->info_timer);
+
+ hcon->l2cap_data = NULL;
+
+ kfree(conn);
+ }
+}
+
+static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct l2cap_chan_list *l = &conn->chan_list;
+ write_lock_bh(&l->lock);
+ __l2cap_chan_add(conn, sk);
+ write_unlock_bh(&l->lock);
+}
+
/* ---- Socket interface ---- */
/* Find socket with psm and source bdaddr.
* Returns closest match.
*/
-static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
{
- struct l2cap_chan *c, *c1 = NULL;
+ struct sock *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
- read_lock(&chan_list_lock);
+ read_lock(&l2cap_sk_list.lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
- if (state && c->state != state)
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (state && sk->sk_state != state)
continue;
- if (c->psm == psm) {
+ if (l2cap_pi(sk)->psm == psm) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
- read_unlock(&chan_list_lock);
- return c;
- }
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- c1 = c;
+ sk1 = sk;
}
}
- read_unlock(&chan_list_lock);
+ read_unlock(&l2cap_sk_list.lock);
- return c1;
+ return node ? sk : sk1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
+int l2cap_do_connect(struct sock *sk)
{
- struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
+ bdaddr_t *dst = &bt_sk(sk)->dst;
struct l2cap_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -1144,136 +1256,104 @@
int err;
BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- chan->psm);
+ l2cap_pi(sk)->psm);
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
- l2cap_chan_lock(chan);
+ auth_type = l2cap_get_auth_type(sk);
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
- chan->chan_type != L2CAP_CHAN_RAW) {
- err = -EINVAL;
- goto done;
- }
+ if (l2cap_pi(sk)->fixed_channel) {
+ /* Fixed channels piggyback on existing ACL connections */
+ hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (!hcon || !hcon->l2cap_data) {
+ err = -ENOTCONN;
+ goto done;
+ }
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
- err = -EINVAL;
- goto done;
- }
+ conn = hcon->l2cap_data;
+ } else {
+ if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
+ hcon = hci_le_connect(hdev, 0, dst,
+ l2cap_pi(sk)->sec_level, auth_type,
+ &bt_sk(sk)->le_params);
+ else
+ hcon = hci_connect(hdev, ACL_LINK, 0, dst,
+ l2cap_pi(sk)->sec_level, auth_type);
- switch (chan->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto done;
+ }
- lock_sock(sk);
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- err = 0;
- release_sock(sk);
- goto done;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- release_sock(sk);
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- release_sock(sk);
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, dst);
-
- release_sock(sk);
-
- chan->psm = psm;
- chan->dcid = cid;
-
- auth_type = l2cap_get_auth_type(chan);
-
- if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, 0, dst,
- chan->sec_level, auth_type);
- else
- hcon = hci_connect(hdev, ACL_LINK, 0, dst,
- chan->sec_level, auth_type);
-
- if (IS_ERR(hcon)) {
- err = PTR_ERR(hcon);
- goto done;
- }
-
- conn = l2cap_conn_add(hcon, 0);
- if (!conn) {
- hci_conn_put(hcon);
- err = -ENOMEM;
- goto done;
+ conn = l2cap_conn_add(hcon, 0);
+ if (!conn) {
+ hci_conn_put(hcon);
+ err = -ENOMEM;
+ goto done;
+ }
}
/* Update source addr of the socket */
bacpy(src, conn->src);
- l2cap_chan_unlock(chan);
- l2cap_chan_add(conn, chan);
- l2cap_chan_lock(chan);
+ l2cap_chan_add(conn, sk);
- l2cap_state_change(chan, BT_CONNECT);
- __set_chan_timer(chan, sk->sk_sndtimeo);
+ if ((l2cap_pi(sk)->fixed_channel) ||
+ (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
+ hcon->state == BT_CONNECTED)) {
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else {
+ sk->sk_state = BT_CONNECT;
+ /* If we have valid LE Params, let timeout override default */
+ if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
+ l2cap_sock_le_params_valid(&bt_sk(sk)->le_params)) {
+ u16 timeout = bt_sk(sk)->le_params.conn_timeout;
- if (hcon->state == BT_CONNECTED) {
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- __clear_chan_timer(chan);
- if (l2cap_chan_check_security(chan))
- l2cap_state_change(chan, BT_CONNECTED);
+ if (timeout)
+ l2cap_sock_set_timer(sk,
+ msecs_to_jiffies(timeout*1000));
} else
- l2cap_do_start(chan);
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+ sk->sk_state_change(sk);
+
+ if (hcon->state == BT_CONNECTED) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
+ l2cap_sock_clear_timer(sk);
+ if (l2cap_check_security(sk)) {
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ }
+ } else
+ l2cap_do_start(sk);
+ }
}
err = 0;
done:
- l2cap_chan_unlock(chan);
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
int __l2cap_wait_ack(struct sock *sk)
{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
int timeo = HZ/5;
add_wait_queue(sk_sleep(sk), &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (chan->unacked_frames > 0 && chan->conn) {
+ while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
+ atomic_read(&l2cap_pi(sk)->ertm_queued)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (!timeo)
timeo = HZ/5;
@@ -1285,7 +1365,6 @@
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
@@ -1296,325 +1375,349 @@
return err;
}
-static void l2cap_monitor_timeout(struct work_struct *work)
+static void l2cap_ertm_tx_worker(struct work_struct *work)
{
- struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- monitor_timer.work);
+ struct l2cap_pinfo *pi =
+ container_of(work, struct l2cap_pinfo, tx_work);
+ struct sock *sk = (struct sock *)pi;
+ BT_DBG("%p", pi);
- BT_DBG("chan %p", chan);
-
- l2cap_chan_lock(chan);
-
- if (chan->retry_count >= chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
- return;
- }
-
- chan->retry_count++;
- __set_monitor_timer(chan);
-
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
+ lock_sock(sk);
+ l2cap_ertm_send(sk);
+ release_sock(sk);
+ sock_put(sk);
}
-static void l2cap_retrans_timeout(struct work_struct *work)
+static void l2cap_skb_destructor(struct sk_buff *skb)
{
- struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- retrans_timer.work);
+ struct sock *sk = skb->sk;
+ int queued;
+ int keep_sk = 0;
- BT_DBG("chan %p", chan);
+ queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
+ if (queued < L2CAP_MIN_ERTM_QUEUED)
+ keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
- l2cap_chan_lock(chan);
-
- chan->retry_count = 1;
- __set_monitor_timer(chan);
-
- set_bit(CONN_WAIT_F, &chan->conn_state);
-
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
+ if (!keep_sk)
+ sock_put(sk);
}
-static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
- struct sk_buff *skb;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
- while ((skb = skb_peek(&chan->tx_q)) &&
- chan->unacked_frames) {
- if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
- break;
+ BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- skb = skb_dequeue(&chan->tx_q);
- kfree_skb(skb);
+ if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
+ pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
+ BT_DBG("Sending on AMP connection %p %p",
+ pi->ampcon, pi->ampchan);
+ if (pi->ampchan)
+ hci_send_acl(pi->ampcon, pi->ampchan, skb,
+ ACL_COMPLETE);
+ else
+ kfree_skb(skb);
+ } else {
+ u16 flags;
- chan->unacked_frames--;
- }
+ bt_cb(skb)->force_active = pi->force_active;
+ BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
- if (!chan->unacked_frames)
- __clear_retrans_timer(chan);
-}
+ if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
+ !l2cap_pi(sk)->flushable)
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
-static void l2cap_streaming_send(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
- u32 control;
- u16 fcs;
-
- while ((skb = skb_dequeue(&chan->tx_q))) {
- control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
- control |= __set_txseq(chan, chan->next_tx_seq);
- __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data,
- skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs,
- skb->data + skb->len - L2CAP_FCS_SIZE);
- }
-
- l2cap_do_send(chan, skb);
-
- chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ hci_send_acl(pi->conn->hcon, NULL, skb, flags);
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
+int l2cap_ertm_send(struct sock *sk)
{
struct sk_buff *skb, *tx_skb;
- u16 fcs;
- u32 control;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct bt_l2cap_control *control;
+ int sent = 0;
- skb = skb_peek(&chan->tx_q);
- if (!skb)
- return;
+ BT_DBG("sk %p", sk);
- while (bt_cb(skb)->tx_seq != tx_seq) {
- if (skb_queue_is_last(&chan->tx_q, skb))
- return;
-
- skb = skb_queue_next(&chan->tx_q, skb);
- }
-
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- return;
- }
-
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
-
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
- control &= __get_sar_mask(chan);
-
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
-
- control |= __set_reqseq(chan, chan->buffer_seq);
- control |= __set_txseq(chan, tx_seq);
-
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data,
- tx_skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs,
- tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
- }
-
- l2cap_do_send(chan, tx_skb);
-}
-
-static int l2cap_ertm_send(struct l2cap_chan *chan)
-{
- struct sk_buff *skb, *tx_skb;
- u16 fcs;
- u32 control;
- int nsent = 0;
-
- if (chan->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
- while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ return 0;
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- break;
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ return 0;
+
+ while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
+ atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
+ (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
+
+ skb = sk->sk_send_head;
+
+ bt_cb(skb)->retries = 1;
+ control = &bt_cb(skb)->control;
+
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control->final = 1;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+ control->reqseq = pi->buffer_seq;
+ pi->last_acked_seq = pi->buffer_seq;
+ control->txseq = pi->next_tx_seq;
+
+ if (pi->extended_control) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
}
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ apply_fcs(skb);
+
+ /* Clone after data has been modified. Data is assumed to be
+ read-only (for locking purposes) on cloned sk_buffs.
+ */
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ if (!tx_skb)
+ break;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
- control &= __get_sar_mask(chan);
+ sock_hold(sk);
+ tx_skb->sk = sk;
+ tx_skb->destructor = l2cap_skb_destructor;
+ atomic_inc(&pi->ertm_queued);
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
+ l2cap_ertm_start_retrans_timer(pi);
- control |= __set_reqseq(chan, chan->buffer_seq);
- control |= __set_txseq(chan, chan->next_tx_seq);
+ pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
+ pi->unacked_frames += 1;
+ pi->frames_sent += 1;
+ sent += 1;
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data,
- tx_skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs, skb->data +
- tx_skb->len - L2CAP_FCS_SIZE);
- }
-
- l2cap_do_send(chan, tx_skb);
-
- __set_retrans_timer(chan);
-
- bt_cb(skb)->tx_seq = chan->next_tx_seq;
-
- chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
-
- if (bt_cb(skb)->retries == 1) {
- chan->unacked_frames++;
-
- if (!nsent++)
- __clear_ack_timer(chan);
- }
-
- chan->frames_sent++;
-
- if (skb_queue_is_last(&chan->tx_q, skb))
- chan->tx_send_head = NULL;
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ sk->sk_send_head = NULL;
else
- chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
+ sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+
+ l2cap_do_send(sk, tx_skb);
+ BT_DBG("Sent txseq %d", (int)control->txseq);
}
- return nsent;
+ BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
+ (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
+ atomic_read(&pi->ertm_queued));
+
+ return sent;
}
-static int l2cap_retransmit_frames(struct l2cap_chan *chan)
+int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
{
- int ret;
+ struct sk_buff *skb;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct bt_l2cap_control *control;
+ int sent = 0;
- if (!skb_queue_empty(&chan->tx_q))
- chan->tx_send_head = chan->tx_q.next;
+ BT_DBG("sk %p, skbs %p", sk, skbs);
- chan->next_tx_seq = chan->expected_ack_seq;
- ret = l2cap_ertm_send(chan);
- return ret;
-}
+ if (sk->sk_state != BT_CONNECTED)
+ return -ENOTCONN;
-static void __l2cap_send_ack(struct l2cap_chan *chan)
-{
- u32 control = 0;
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ return 0;
- control |= __set_reqseq(chan, chan->buffer_seq);
+ skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- l2cap_send_sframe(chan, control);
- return;
+ BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
+ while (!skb_queue_empty(TX_QUEUE(sk))) {
+
+ skb = skb_dequeue(TX_QUEUE(sk));
+
+ BT_DBG("skb %p", skb);
+
+ bt_cb(skb)->retries = 1;
+ control = &bt_cb(skb)->control;
+
+ BT_DBG("control %p", control);
+
+ control->reqseq = 0;
+ control->txseq = pi->next_tx_seq;
+
+ if (pi->extended_control) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ apply_fcs(skb);
+
+ l2cap_do_send(sk, skb);
+
+ BT_DBG("Sent txseq %d", (int)control->txseq);
+
+ pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
+ pi->frames_sent += 1;
+ sent += 1;
}
- if (l2cap_ertm_send(chan) > 0)
- return;
+ BT_DBG("Sent %d", sent);
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
+ return 0;
}
-static void l2cap_send_ack(struct l2cap_chan *chan)
+static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
{
- __clear_ack_timer(chan);
- __l2cap_send_ack(chan);
+ while (len > 0) {
+ if (iv->iov_len) {
+ int copy = min_t(unsigned int, len, iv->iov_len);
+ memcpy(kdata, iv->iov_base, copy);
+ len -= copy;
+ kdata += copy;
+ iv->iov_base += copy;
+ iv->iov_len -= copy;
+ }
+ iv++;
+ }
+
+ return 0;
}
-static void l2cap_send_srejtail(struct l2cap_chan *chan)
+static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
+ int len, int count, struct sk_buff *skb,
+ int reseg)
{
- struct srej_list *tail;
- u32 control;
-
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_ctrl_final(chan);
-
- tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= __set_reqseq(chan, tail->tx_seq);
-
- l2cap_send_sframe(chan, control);
-}
-
-static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
- struct msghdr *msg, int len,
- int count, struct sk_buff *skb)
-{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff **frag;
+ struct sk_buff *final;
int err, sent = 0;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
+ BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
+ msg, (int)len, (int)count, skb);
+
+ if (!conn)
+ return -ENOTCONN;
+
+ /* When resegmenting, data is copied from kernel space */
+ if (reseg) {
+ err = memcpy_fromkvec(skb_put(skb, count),
+ (struct kvec *) msg->msg_iov, count);
+ } else {
+ err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
+ count);
+ }
+
+ if (err)
return -EFAULT;
sent += count;
len -= count;
+ final = skb;
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
+ int skblen;
count = min_t(unsigned int, conn->mtu, len);
- *frag = chan->ops->alloc_skb(chan, count,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
+ /* Add room for the FCS if it fits */
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
+ len + L2CAP_FCS_SIZE <= conn->mtu)
+ skblen = count + L2CAP_FCS_SIZE;
+ else
+ skblen = count;
+
+ /* Don't use bt_skb_send_alloc() while resegmenting, since
+ * it is not ok to block.
+ */
+ if (reseg) {
+ *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
+ if (*frag)
+ skb_set_owner_w(*frag, sk);
+ } else {
+ *frag = bt_skb_send_alloc(sk, skblen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ }
if (!*frag)
- return err;
- if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
- (*frag)->priority = skb->priority;
+ /* When resegmenting, data is copied from kernel space */
+ if (reseg) {
+ err = memcpy_fromkvec(skb_put(*frag, count),
+ (struct kvec *) msg->msg_iov,
+ count);
+ } else {
+ err = memcpy_fromiovec(skb_put(*frag, count),
+ msg->msg_iov, count);
+ }
+
+ if (err)
+ return -EFAULT;
sent += count;
len -= count;
+ final = *frag;
+
frag = &(*frag)->next;
}
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
+ if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
+ if (reseg) {
+ *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
+ GFP_ATOMIC);
+ if (*frag)
+ skb_set_owner_w(*frag, sk);
+ } else {
+ *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
+ msg->msg_flags & MSG_DONTWAIT,
+ &err);
+ }
+
+ if (!*frag)
+ return -EFAULT;
+
+ final = *frag;
+ }
+
+ skb_put(final, L2CAP_FCS_SIZE);
+ }
+
return sent;
}
-static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 priority)
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
+ int err, count, hlen = L2CAP_HDR_SIZE + 2;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
+ BT_DBG("sk %p len %d", sk, (int)len);
count = min_t(unsigned int, (conn->mtu - hlen), len);
-
- skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
+ skb = bt_skb_send_alloc(sk, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return ERR_PTR(err);
- skb->priority = priority;
-
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(chan->psm, skb_put(skb, 2));
+ put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
- err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1622,33 +1725,27 @@
return skb;
}
-static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 priority)
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d", chan, (int)len);
+ BT_DBG("sk %p len %d", sk, (int)len);
count = min_t(unsigned int, (conn->mtu - hlen), len);
-
- skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
+ skb = bt_skb_send_alloc(sk, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return ERR_PTR(err);
- skb->priority = priority;
-
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1656,200 +1753,1032 @@
return skb;
}
-static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 control, u16 sdulen)
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
+ struct msghdr *msg, size_t len,
+ u16 sdulen, int reseg)
{
- struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen;
+ int reserve = 0;
struct l2cap_hdr *lh;
+ u8 fcs = l2cap_pi(sk)->fcs;
- BT_DBG("chan %p len %d", chan, (int)len);
-
- if (!conn)
- return ERR_PTR(-ENOTCONN);
-
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hlen = L2CAP_EXT_HDR_SIZE;
+ if (l2cap_pi(sk)->extended_control)
+ hlen = L2CAP_EXTENDED_HDR_SIZE;
else
- hlen = L2CAP_ENH_HDR_SIZE;
+ hlen = L2CAP_ENHANCED_HDR_SIZE;
if (sdulen)
hlen += L2CAP_SDULEN_SIZE;
- if (chan->fcs == L2CAP_FCS_CRC16)
+ if (fcs == L2CAP_FCS_CRC16)
hlen += L2CAP_FCS_SIZE;
- count = min_t(unsigned int, (conn->mtu - hlen), len);
+ BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
+ sk, msg, (int)len, (int)sdulen, hlen);
- skb = chan->ops->alloc_skb(chan, count + hlen,
+ count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
+
+ /* Allocate extra headroom for Qualcomm PAL. This is only
+ * necessary in two places (here and when creating sframes)
+ * because only unfragmented iframes and sframes are sent
+ * using AMP controllers.
+ */
+ if (l2cap_pi(sk)->ampcon &&
+ l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
+ reserve = BT_SKB_RESERVE_80211;
+
+ /* Don't use bt_skb_send_alloc() while resegmenting, since
+ * it is not ok to block.
+ */
+ if (reseg) {
+ skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
+ if (skb)
+ skb_set_owner_w(skb, sk);
+ } else {
+ skb = bt_skb_send_alloc(sk, count + hlen + reserve,
msg->msg_flags & MSG_DONTWAIT, &err);
-
+ }
if (!skb)
return ERR_PTR(err);
+ if (reserve)
+ skb_reserve(skb, reserve);
+
+ bt_cb(skb)->control.fcs = fcs;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ /* Control header is populated later */
+ if (l2cap_pi(sk)->extended_control)
+ put_unaligned_le32(0, skb_put(skb, 4));
+ else
+ put_unaligned_le16(0, skb_put(skb, 2));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
- err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
if (unlikely(err < 0)) {
+ BT_DBG("err %d", err);
kfree_skb(skb);
return ERR_PTR(err);
}
- if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
-
bt_cb(skb)->retries = 0;
return skb;
}
-static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
{
- struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u32 control;
- size_t size = 0;
+ struct l2cap_pinfo *pi;
+ struct sk_buff *acked_skb;
+ u16 ackseq;
- skb_queue_head_init(&sar_queue);
- control = __set_ctrl_sar(chan, L2CAP_SAR_START);
- skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
- __skb_queue_tail(&sar_queue, skb);
- len -= chan->remote_mps;
- size += chan->remote_mps;
+ pi = l2cap_pi(sk);
- while (len > 0) {
- size_t buflen;
+ if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
+ return;
- if (len > chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
- buflen = chan->remote_mps;
- } else {
- control = __set_ctrl_sar(chan, L2CAP_SAR_END);
- buflen = len;
+ BT_DBG("expected_ack_seq %d, unacked_frames %d",
+ (int) pi->expected_ack_seq, (int) pi->unacked_frames);
+
+ for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
+ ackseq = __next_seq(ackseq, pi)) {
+
+ acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
+ if (acked_skb) {
+ skb_unlink(acked_skb, TX_QUEUE(sk));
+ kfree_skb(acked_skb);
+ pi->unacked_frames--;
}
-
- skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
- if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
- return PTR_ERR(skb);
- }
-
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
}
- skb_queue_splice_tail(&sar_queue, &chan->tx_q);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = sar_queue.next;
- return size;
+ pi->expected_ack_seq = reqseq;
+
+ if (pi->unacked_frames == 0)
+ l2cap_ertm_stop_retrans_timer(pi);
+
+ BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
}
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
- u32 priority)
+static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
{
struct sk_buff *skb;
- u32 control;
- int err;
+ int len;
+ int reserve = 0;
+ struct l2cap_hdr *lh;
- /* Connectionless channel */
- if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
- skb = l2cap_create_connless_pdu(chan, msg, len, priority);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (l2cap_pi(sk)->extended_control)
+ len = L2CAP_EXTENDED_HDR_SIZE;
+ else
+ len = L2CAP_ENHANCED_HDR_SIZE;
- l2cap_do_send(chan, skb);
- return len;
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+ len += L2CAP_FCS_SIZE;
+
+ /* Allocate extra headroom for Qualcomm PAL */
+ if (l2cap_pi(sk)->ampcon &&
+ l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
+ reserve = BT_SKB_RESERVE_80211;
+
+ skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
+
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (reserve)
+ skb_reserve(skb, reserve);
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
+
+ if (l2cap_pi(sk)->extended_control)
+ put_unaligned_le32(control, skb_put(skb, 4));
+ else
+ put_unaligned_le16(control, skb_put(skb, 2));
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
- switch (chan->mode) {
- case L2CAP_MODE_BASIC:
- /* Check outgoing MTU */
- if (len > chan->omtu)
- return -EMSGSIZE;
+ return skb;
+}
- /* Create a basic PDU */
- skb = l2cap_create_basic_pdu(chan, msg, len, priority);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+static void l2cap_ertm_send_sframe(struct sock *sk,
+ struct bt_l2cap_control *control)
+{
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+ u32 control_field;
- l2cap_do_send(chan, skb);
- err = len;
- break;
+ BT_DBG("sk %p, control %p", sk, control);
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
- skb = l2cap_create_iframe_pdu(chan, msg, len, control,
- 0);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (control->frame_type != 's')
+ return;
- __skb_queue_tail(&chan->tx_q, skb);
+ pi = l2cap_pi(sk);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = skb;
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
+ BT_DBG("AMP error - attempted S-Frame send during AMP move");
+ return;
+ }
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(chan, msg, len);
- if (err < 0)
- return err;
+ if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
+ control->final = 1;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+
+ if (control->super == L2CAP_SFRAME_RR)
+ pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
+ else if (control->super == L2CAP_SFRAME_RNR)
+ pi->conn_state |= L2CAP_CONN_SENT_RNR;
+
+ if (control->super != L2CAP_SFRAME_SREJ) {
+ pi->last_acked_seq = control->reqseq;
+ l2cap_ertm_stop_ack_timer(pi);
+ }
+
+ BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
+ (int) control->final, (int) control->poll,
+ (int) control->super);
+
+ if (pi->extended_control)
+ control_field = __pack_extended_control(control);
+ else
+ control_field = __pack_enhanced_control(control);
+
+ skb = l2cap_create_sframe_pdu(sk, control_field);
+ if (!IS_ERR(skb))
+ l2cap_do_send(sk, skb);
+}
+
+static void l2cap_ertm_send_ack(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct bt_l2cap_control control;
+ u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
+ int threshold;
+
+ BT_DBG("sk %p", sk);
+ BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
+ (int)pi->buffer_seq);
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+
+ if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
+ l2cap_ertm_stop_ack_timer(pi);
+ control.super = L2CAP_SFRAME_RNR;
+ control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ } else {
+ if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
+ l2cap_ertm_send(sk);
+ /* If any i-frames were sent, they included an ack */
+ if (pi->buffer_seq == pi->last_acked_seq)
+ frames_to_ack = 0;
}
- if (chan->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(chan);
- err = len;
+ /* Ack now if the tx window is 3/4ths full.
+ * Calculate without mul or div
+ */
+ threshold = pi->tx_win;
+ threshold += threshold << 1;
+ threshold >>= 2;
+
+ BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
+ threshold);
+
+ if (frames_to_ack >= threshold) {
+ l2cap_ertm_stop_ack_timer(pi);
+ control.super = L2CAP_SFRAME_RR;
+ control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ frames_to_ack = 0;
+ }
+
+ if (frames_to_ack)
+ l2cap_ertm_start_ack_timer(pi);
+ }
+}
+
+static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control control;
+
+ BT_DBG("sk %p, poll %d", sk, (int) poll);
+
+ pi = l2cap_pi(sk);
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.poll = poll;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ control.super = L2CAP_SFRAME_RNR;
+ else
+ control.super = L2CAP_SFRAME_RR;
+
+ control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &control);
+}
+
+static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control control;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.final = 1;
+ control.reqseq = pi->buffer_seq;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ control.super = L2CAP_SFRAME_RNR;
+ l2cap_ertm_send_sframe(sk, &control);
+ }
+
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
+ l2cap_ertm_start_retrans_timer(pi);
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ /* Send pending iframes */
+ l2cap_ertm_send(sk);
+
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ /* F-bit wasn't sent in an s-frame or i-frame yet, so
+ * send it now.
+ */
+ control.super = L2CAP_SFRAME_RR;
+ l2cap_ertm_send_sframe(sk, &control);
+ }
+}
+
+static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+ u16 seq;
+
+ BT_DBG("sk %p, txseq %d", sk, (int)txseq);
+
+ pi = l2cap_pi(sk);
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.super = L2CAP_SFRAME_SREJ;
+
+ for (seq = pi->expected_tx_seq; seq != txseq;
+ seq = __next_seq(seq, pi)) {
+ if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
+ control.reqseq = seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ l2cap_seq_list_append(&pi->srej_list, seq);
+ }
+ }
+
+ pi->expected_tx_seq = __next_seq(txseq, pi);
+}
+
+static void l2cap_ertm_send_srej_tail(struct sock *sk)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.super = L2CAP_SFRAME_SREJ;
+ control.reqseq = pi->srej_list.tail;
+ l2cap_ertm_send_sframe(sk, &control);
+}
+
+static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+ u16 initial_head;
+ u16 seq;
+
+ BT_DBG("sk %p, txseq %d", sk, (int) txseq);
+
+ pi = l2cap_pi(sk);
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.super = L2CAP_SFRAME_SREJ;
+
+ /* Capture initial list head to allow only one pass through the list. */
+ initial_head = pi->srej_list.head;
+
+ do {
+ seq = l2cap_seq_list_pop(&pi->srej_list);
+ if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
+ break;
+
+ control.reqseq = seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ l2cap_seq_list_append(&pi->srej_list, seq);
+ } while (pi->srej_list.head != initial_head);
+}
+
+static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ BT_DBG("sk %p", sk);
+
+ pi->expected_tx_seq = pi->buffer_seq;
+ l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
+ skb_queue_purge(SREJ_QUEUE(sk));
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+}
+
+static int l2cap_ertm_tx_state_xmit(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_DATA_REQUEST:
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb_peek(skbs);
+
+ skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
+ l2cap_ertm_send(sk);
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_ertm_abort_rx_srej_sent(sk);
+ }
+
+ l2cap_ertm_send_ack(sk);
+
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(pi->conn, pi,
+ pi->scid,
+ L2CAP_MOVE_CHAN_CONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ } else if (pi->amp_move_role ==
+ L2CAP_AMP_MOVE_RESPONDER) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident,
+ pi->dcid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ }
break;
}
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- err = len;
- break;
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
+ (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
+ struct bt_l2cap_control local_control;
+
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.frame_type = 's';
+ local_control.super = L2CAP_SFRAME_RR;
+ local_control.poll = 1;
+ local_control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &local_control);
+
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
}
-
- err = l2cap_ertm_send(chan);
- if (err >= 0)
- err = len;
-
break;
-
+ case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+ break;
+ case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
+ l2cap_ertm_send_rr_or_rnr(sk, 1);
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ l2cap_ertm_stop_ack_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
+ l2cap_ertm_send_rr_or_rnr(sk, 1);
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_ERTM_EVENT_RECV_FBIT:
+ /* Nothing to process */
+ break;
default:
- BT_DBG("bad state %1.1x", chan->mode);
- err = -EBADFD;
+ break;
}
return err;
}
+static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_DATA_REQUEST:
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb_peek(skbs);
+ /* Queue data, but don't send. */
+ skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_ertm_abort_rx_srej_sent(sk);
+ }
+
+ l2cap_ertm_send_ack(sk);
+
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
+ struct bt_l2cap_control local_control;
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.frame_type = 's';
+ local_control.super = L2CAP_SFRAME_RR;
+ local_control.poll = 1;
+ local_control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &local_control);
+
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+
+ /* Fall through */
+
+ case L2CAP_ERTM_EVENT_RECV_FBIT:
+ if (control && control->final) {
+ l2cap_ertm_stop_monitor_timer(pi);
+ if (pi->unacked_frames > 0)
+ l2cap_ertm_start_retrans_timer(pi);
+ pi->retry_count = 0;
+ pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
+ BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
+ }
+ break;
+ case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
+ /* Ignore */
+ break;
+ case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
+ if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
+ l2cap_ertm_send_rr_or_rnr(sk, 1);
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->retry_count += 1;
+ } else
+ l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
+ sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
+
+ pi = l2cap_pi(sk);
+
+ switch (pi->tx_state) {
+ case L2CAP_ERTM_TX_STATE_XMIT:
+ err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
+ break;
+ case L2CAP_ERTM_TX_STATE_WAIT_F:
+ err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
+ break;
+ default:
+ /* Ignore event */
+ break;
+ }
+
+ return err;
+}
+
+int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
+ struct msghdr *msg, size_t len, int reseg)
+{
+ struct sk_buff *skb;
+ u16 sdu_len;
+ size_t pdu_len;
+ int err = 0;
+ u8 sar;
+
+ BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
+
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
+
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = l2cap_pi(sk)->conn->mtu;
+
+ /* Constrain BR/EDR PDU size to fit within the largest radio packet */
+ if (!l2cap_pi(sk)->ampcon)
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len) {
+ skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
+
+ BT_DBG("iframe skb %p", skb);
+
+ if (IS_ERR(skb)) {
+ __skb_queue_purge(seg_queue);
+ return PTR_ERR(skb);
+ }
+
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
+ }
+
+ return err;
+}
+
+static inline int is_initial_frame(u8 sar)
+{
+ return (sar == L2CAP_SAR_UNSEGMENTED ||
+ sar == L2CAP_SAR_START);
+}
+
+static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
+ size_t veclen)
+{
+ struct sk_buff *frag_iter;
+
+ BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
+
+ if (iv->iov_len + skb->len > veclen)
+ return -ENOMEM;
+
+ memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
+ iv->iov_len += skb->len;
+
+ skb_walk_frags(skb, frag_iter) {
+ if (iv->iov_len + skb->len > veclen)
+ return -ENOMEM;
+
+ BT_DBG("Copying %d bytes", (int)frag_iter->len);
+ memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
+ frag_iter->len);
+ iv->iov_len += frag_iter->len;
+ }
+
+ return 0;
+}
+
+int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
+{
+ void *buf;
+ int buflen;
+ int err = 0;
+ struct sk_buff *skb;
+ struct msghdr msg;
+ struct kvec iv;
+ struct sk_buff_head old_frames;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p", sk);
+
+ if (skb_queue_empty(queue))
+ return 0;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = (struct iovec *) &iv;
+
+ buflen = pi->omtu + L2CAP_FCS_SIZE;
+ buf = kzalloc(buflen, GFP_TEMPORARY);
+
+ if (!buf) {
+ BT_DBG("Could not allocate resegmentation buffer");
+ return -ENOMEM;
+ }
+
+ /* Move current frames off the original queue */
+ __skb_queue_head_init(&old_frames);
+ skb_queue_splice_tail_init(queue, &old_frames);
+
+ while (!skb_queue_empty(&old_frames)) {
+ struct sk_buff_head current_sdu;
+ u8 original_sar;
+
+ /* Reassemble each SDU from one or more PDUs */
+
+ iv.iov_base = buf;
+ iv.iov_len = 0;
+
+ skb = skb_peek(&old_frames);
+ original_sar = bt_cb(skb)->control.sar;
+
+ __skb_unlink(skb, &old_frames);
+
+ /* Append data to SDU */
+ if (pi->extended_control)
+ skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
+ else
+ skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
+
+ if (original_sar == L2CAP_SAR_START)
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+
+ err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
+
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
+ iv.iov_len -= L2CAP_FCS_SIZE;
+
+ /* Free skb */
+ kfree_skb(skb);
+
+ if (err)
+ break;
+
+ while (!skb_queue_empty(&old_frames) && !err) {
+ /* Check next frame */
+ skb = skb_peek(&old_frames);
+
+ if (is_initial_frame(bt_cb(skb)->control.sar))
+ break;
+
+ __skb_unlink(skb, &old_frames);
+
+ /* Append data to SDU */
+ if (pi->extended_control)
+ skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
+ else
+ skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
+
+ if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+
+ err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
+
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
+ iv.iov_len -= L2CAP_FCS_SIZE;
+
+ /* Free skb */
+ kfree_skb(skb);
+ }
+
+ if (err)
+ break;
+
+ /* Segment data */
+
+ __skb_queue_head_init(¤t_sdu);
+
+ /* skbs for the SDU were just freed, but the
+ * resegmenting process could produce more, smaller
+ * skbs due to smaller PDUs and reduced HCI MTU. The
+ * overhead from the sk_buff structs could put us over
+ * the sk_sndbuf limit.
+ *
+ * Since this code is running in response to a
+ * received poll/final packet, it cannot block.
+ * Therefore, memory allocation needs to be allowed by
+ * falling back to bt_skb_alloc() (with
+ * skb_set_owner_w() to maintain sk_wmem_alloc
+ * correctly).
+ */
+ msg.msg_iovlen = iv.iov_len;
+ err = l2cap_segment_sdu(sk, ¤t_sdu, &msg,
+ msg.msg_iovlen, 1);
+
+ if (err || skb_queue_empty(¤t_sdu)) {
+ BT_DBG("Error %d resegmenting data for socket %p",
+ err, sk);
+ __skb_queue_purge(¤t_sdu);
+ break;
+ }
+
+ /* Fix up first PDU SAR bits */
+ if (!is_initial_frame(original_sar)) {
+ BT_DBG("Changing SAR bits, %d PDUs",
+ skb_queue_len(¤t_sdu));
+ skb = skb_peek(¤t_sdu);
+
+ if (skb_queue_len(¤t_sdu) == 1) {
+ /* Change SAR from 'unsegmented' to 'end' */
+ bt_cb(skb)->control.sar = L2CAP_SAR_END;
+ } else {
+ struct l2cap_hdr *lh;
+ size_t hdrlen;
+
+ /* Change SAR from 'start' to 'continue' */
+ bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
+
+ /* Start frames contain 2 bytes for
+ * sdulen and continue frames don't.
+ * Must rewrite header to eliminate
+ * sdulen and then adjust l2cap frame
+ * length.
+ */
+ if (pi->extended_control)
+ hdrlen = L2CAP_EXTENDED_HDR_SIZE;
+ else
+ hdrlen = L2CAP_ENHANCED_HDR_SIZE;
+
+ memmove(skb->data + L2CAP_SDULEN_SIZE,
+ skb->data, hdrlen);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+ lh = (struct l2cap_hdr *)skb->data;
+ lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
+ L2CAP_SDULEN_SIZE);
+ }
+ }
+
+ /* Add to queue */
+ skb_queue_splice_tail(¤t_sdu, queue);
+ }
+
+ __skb_queue_purge(&old_frames);
+ if (err)
+ __skb_queue_purge(queue);
+
+ kfree(buf);
+
+ BT_DBG("Queue resegmented, err=%d", err);
+ return err;
+}
+
+static void l2cap_resegment_worker(struct work_struct *work)
+{
+ int err = 0;
+ struct l2cap_resegment_work *seg_work =
+ container_of(work, struct l2cap_resegment_work, work);
+ struct sock *sk = seg_work->sk;
+
+ kfree(seg_work);
+
+ BT_DBG("sk %p", sk);
+ lock_sock(sk);
+
+ if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
+ release_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
+ err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
+
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
+
+ if (skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head = NULL;
+ else
+ sk->sk_send_head = skb_peek(TX_QUEUE(sk));
+
+ if (err)
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
+ else
+ l2cap_ertm_send(sk);
+
+ release_sock(sk);
+ sock_put(sk);
+}
+
+static int l2cap_setup_resegment(struct sock *sk)
+{
+ struct l2cap_resegment_work *seg_work;
+
+ BT_DBG("sk %p", sk);
+
+ if (skb_queue_empty(TX_QUEUE(sk)))
+ return 0;
+
+ seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
+ if (!seg_work)
+ return -ENOMEM;
+
+ INIT_WORK(&seg_work->work, l2cap_resegment_worker);
+ sock_hold(sk);
+ seg_work->sk = sk;
+
+ if (!queue_work(_l2cap_wq, &seg_work->work)) {
+ kfree(seg_work);
+ sock_put(sk);
+ return -ENOMEM;
+ }
+
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
+
+ return 0;
+}
+
+static inline int l2cap_rmem_available(struct sock *sk)
+{
+ BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
+ atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
+ return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
+}
+
+static inline int l2cap_rmem_full(struct sock *sk)
+{
+ BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
+ atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
+ return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
+}
+
+void l2cap_amp_move_init(struct sock *sk)
+{
+ BT_DBG("sk %p", sk);
+
+ if (!l2cap_pi(sk)->conn)
+ return;
+
+ if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
+ return;
+
+ if (l2cap_pi(sk)->amp_id == 0) {
+ if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
+ return;
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
+ amp_create_physical(l2cap_pi(sk)->conn, sk);
+ } else {
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
+ l2cap_pi(sk)->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
+ l2cap_pi(sk)->amp_move_id = 0;
+ l2cap_amp_move_setup(sk);
+ l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
+ l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+}
+
+static void l2cap_chan_ready(struct sock *sk)
+{
+ struct sock *parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ l2cap_pi(sk)->conf_state = 0;
+ l2cap_sock_clear_timer(sk);
+
+ if (!parent) {
+ /* Outgoing channel.
+ * Wake up socket sleeping on connect.
+ */
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else {
+ /* Incoming channel.
+ * Wake up socket sleeping on accept.
+ */
+ parent->sk_data_ready(parent, 0);
+ }
+}
+
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct l2cap_chan_list *l = &conn->chan_list;
struct sk_buff *nskb;
- struct l2cap_chan *chan;
+ struct sock *sk;
BT_DBG("conn %p", conn);
- mutex_lock(&conn->chan_lock);
-
- list_for_each_entry(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
- if (chan->chan_type != L2CAP_CHAN_RAW)
+ read_lock(&l->lock);
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ if (sk->sk_type != SOCK_RAW)
continue;
/* Don't send frame to the socket it came from */
@@ -1859,11 +2788,10 @@
if (!nskb)
continue;
- if (chan->ops->recv(chan->data, nskb))
+ if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
-
- mutex_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
}
/* ---- L2CAP signalling commands ---- */
@@ -1874,12 +2802,13 @@
struct l2cap_cmd_hdr *cmd;
struct l2cap_hdr *lh;
int len, count;
+ unsigned int mtu = conn->hcon->hdev->acl_mtu;
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
conn, code, ident, dlen);
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
- count = min_t(unsigned int, conn->mtu, len);
+ count = min_t(unsigned int, mtu, len);
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
@@ -1909,7 +2838,7 @@
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
- count = min_t(unsigned int, conn->mtu, len);
+ count = min_t(unsigned int, mtu, len);
*frag = bt_skb_alloc(count, GFP_ATOMIC);
if (!*frag)
@@ -1993,68 +2922,158 @@
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_ertm_ack_timeout(struct work_struct *work)
{
- struct l2cap_conf_efs efs;
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct l2cap_pinfo *pi =
+ container_of(delayed, struct l2cap_pinfo, ack_work);
+ struct sock *sk = (struct sock *)pi;
+ u16 frames_to_ack;
- switch (chan->mode) {
- case L2CAP_MODE_ERTM:
- efs.id = chan->local_id;
- efs.stype = chan->local_stype;
- efs.msdu = cpu_to_le16(chan->local_msdu);
- efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
- efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
- break;
+ BT_DBG("sk %p", sk);
- case L2CAP_MODE_STREAMING:
- efs.id = 1;
- efs.stype = L2CAP_SERV_BESTEFFORT;
- efs.msdu = cpu_to_le16(chan->local_msdu);
- efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- efs.acc_lat = 0;
- efs.flush_to = 0;
- break;
+ if (!sk)
+ return;
- default:
+ lock_sock(sk);
+
+ if (!l2cap_pi(sk)->conn) {
+ release_sock(sk);
return;
}
- l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
+ l2cap_pi(sk)->last_acked_seq,
+ l2cap_pi(sk));
+
+ if (frames_to_ack)
+ l2cap_ertm_send_rr_or_rnr(sk, 0);
+
+ release_sock(sk);
}
-static void l2cap_ack_timeout(struct work_struct *work)
+static void l2cap_ertm_retrans_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- ack_timer.work);
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct l2cap_pinfo *pi =
+ container_of(delayed, struct l2cap_pinfo, retrans_work);
+ struct sock *sk = (struct sock *)pi;
- BT_DBG("chan %p", chan);
+ BT_DBG("sk %p", sk);
- l2cap_chan_lock(chan);
+ if (!sk)
+ return;
- __l2cap_send_ack(chan);
+ lock_sock(sk);
- l2cap_chan_unlock(chan);
+ if (!l2cap_pi(sk)->conn) {
+ release_sock(sk);
+ return;
+ }
- l2cap_chan_put(chan);
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
+ release_sock(sk);
}
-static inline void l2cap_ertm_init(struct l2cap_chan *chan)
+static void l2cap_ertm_monitor_timeout(struct work_struct *work)
{
- chan->expected_ack_seq = 0;
- chan->unacked_frames = 0;
- chan->buffer_seq = 0;
- chan->num_acked = 0;
- chan->frames_sent = 0;
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct l2cap_pinfo *pi =
+ container_of(delayed, struct l2cap_pinfo, monitor_work);
+ struct sock *sk = (struct sock *)pi;
- INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
- INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
- INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
+ BT_DBG("sk %p", sk);
- skb_queue_head_init(&chan->srej_q);
+ if (!sk)
+ return;
- INIT_LIST_HEAD(&chan->srej_l);
+ lock_sock(sk);
+
+ if (!l2cap_pi(sk)->conn) {
+ release_sock(sk);
+ return;
+ }
+
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
+
+ release_sock(sk);
+}
+
+static inline void l2cap_ertm_init(struct sock *sk)
+{
+ l2cap_pi(sk)->next_tx_seq = 0;
+ l2cap_pi(sk)->expected_tx_seq = 0;
+ l2cap_pi(sk)->expected_ack_seq = 0;
+ l2cap_pi(sk)->unacked_frames = 0;
+ l2cap_pi(sk)->buffer_seq = 0;
+ l2cap_pi(sk)->frames_sent = 0;
+ l2cap_pi(sk)->last_acked_seq = 0;
+ l2cap_pi(sk)->sdu = NULL;
+ l2cap_pi(sk)->sdu_last_frag = NULL;
+ l2cap_pi(sk)->sdu_len = 0;
+ atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
+
+ l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
+
+ BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
+ l2cap_pi(sk)->rx_state);
+
+ l2cap_pi(sk)->amp_id = 0;
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ l2cap_pi(sk)->amp_move_reqseq = 0;
+ l2cap_pi(sk)->amp_move_event = 0;
+
+ INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
+ INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
+ l2cap_ertm_retrans_timeout);
+ INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
+ l2cap_ertm_monitor_timeout);
+ INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
+ skb_queue_head_init(SREJ_QUEUE(sk));
+ skb_queue_head_init(TX_QUEUE(sk));
+
+ l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
+ l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
+ l2cap_pi(sk)->remote_tx_win);
+}
+
+void l2cap_ertm_destruct(struct sock *sk)
+{
+ l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
+ l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
+}
+
+void l2cap_ertm_shutdown(struct sock *sk)
+{
+ l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
+ l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
+ l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
+}
+
+void l2cap_ertm_recv_done(struct sock *sk)
+{
+ lock_sock(sk);
+
+ if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM ||
+ sk->sk_state != BT_CONNECTED) {
+ release_sock(sk);
+ return;
+ }
+
+ /* Consume any queued incoming frames and update local busy status */
+ if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
+ l2cap_ertm_rx_queued_iframes(sk))
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
+ else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ l2cap_rmem_available(sk))
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
+
+ release_sock(sk);
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2070,68 +3089,231 @@
}
}
-static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
{
- return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
-}
-
-static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
-{
- return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
-}
-
-static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
-{
- if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
- __l2cap_ews_supported(chan)) {
- /* use extended control field */
- set_bit(FLAG_EXT_CTRL, &chan->flags);
- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
+ (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
+ pi->extended_control = 1;
} else {
- chan->tx_win = min_t(u16, chan->tx_win,
- L2CAP_DEFAULT_TX_WINDOW);
- chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
+ pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
+
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
+ pi->extended_control = 0;
}
}
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
+ struct hci_ext_fs *new,
+ struct hci_ext_fs *agg)
{
- struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = chan->mode };
- void *ptr = req->data;
- u16 size;
+ *agg = *cur;
+ if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
+ /* current flow spec has known rate */
+ if ((new->max_sdu == 0xFFFF) ||
+ (new->sdu_arr_time == 0xFFFFFFFF)) {
+ /* new fs has unknown rate, so aggregate is unknown */
+ agg->max_sdu = 0xFFFF;
+ agg->sdu_arr_time = 0xFFFFFFFF;
+ } else {
+ /* new fs has known rate, so aggregate is known */
+ u64 cur_rate;
+ u64 new_rate;
+ cur_rate = cur->max_sdu * 1000000ULL;
+ if (cur->sdu_arr_time)
+ cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
+ new_rate = new->max_sdu * 1000000ULL;
+ if (new->sdu_arr_time)
+ new_rate = div_u64(new_rate, new->sdu_arr_time);
+ cur_rate = cur_rate + new_rate;
+ if (cur_rate)
+ agg->sdu_arr_time = div64_u64(
+ agg->max_sdu * 1000000ULL, cur_rate);
+ }
+ }
+}
+
+static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
+{
+ struct hci_ext_fs tx_fs;
+ struct hci_ext_fs rx_fs;
BT_DBG("chan %p", chan);
- if (chan->num_conf_req || chan->num_conf_rsp)
+ if (((chan->tx_fs.max_sdu == 0xFFFF) ||
+ (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
+ ((chan->rx_fs.max_sdu == 0xFFFF) ||
+ (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
+ return 0;
+
+ l2cap_aggregate_fs(&chan->tx_fs,
+ (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
+ l2cap_aggregate_fs(&chan->rx_fs,
+ (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
+ hci_chan_modify(chan, &tx_fs, &rx_fs);
+ return 1;
+}
+
+static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
+ struct hci_ext_fs *old,
+ struct hci_ext_fs *agg)
+{
+ *agg = *cur;
+ if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
+ u64 cur_rate;
+ u64 old_rate;
+ cur_rate = cur->max_sdu * 1000000ULL;
+ if (cur->sdu_arr_time)
+ cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
+ old_rate = old->max_sdu * 1000000ULL;
+ if (old->sdu_arr_time)
+ old_rate = div_u64(old_rate, old->sdu_arr_time);
+ cur_rate = cur_rate - old_rate;
+ if (cur_rate)
+ agg->sdu_arr_time = div64_u64(
+ agg->max_sdu * 1000000ULL, cur_rate);
+ }
+}
+
+static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
+{
+ struct hci_ext_fs tx_fs;
+ struct hci_ext_fs rx_fs;
+
+ BT_DBG("chan %p", chan);
+
+ if (((chan->tx_fs.max_sdu == 0xFFFF) ||
+ (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
+ ((chan->rx_fs.max_sdu == 0xFFFF) ||
+ (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
+ return 0;
+
+ l2cap_deaggregate_fs(&chan->tx_fs,
+ (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
+ l2cap_deaggregate_fs(&chan->rx_fs,
+ (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
+ hci_chan_modify(chan, &tx_fs, &rx_fs);
+ return 1;
+}
+
+static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct hci_dev *hdev;
+ struct hci_conn *hcon;
+ struct hci_chan *chan;
+
+ hdev = hci_dev_get(amp_id);
+ if (!hdev)
+ return NULL;
+
+ BT_DBG("hdev %s", hdev->name);
+
+ hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
+ if (!hcon) {
+ chan = NULL;
+ goto done;
+ }
+
+ chan = hci_chan_list_lookup_id(hdev, hcon->handle);
+ if (chan) {
+ l2cap_aggregate(chan, pi);
+ sock_hold(sk);
+ chan->l2cap_sk = sk;
+ hci_chan_hold(chan);
+ pi->ampchan = chan;
+ goto done;
+ }
+
+ chan = hci_chan_add(hdev);
+ if (chan) {
+ chan->conn = hcon;
+ sock_hold(sk);
+ chan->l2cap_sk = sk;
+ hci_chan_hold(chan);
+ pi->ampchan = chan;
+ hci_chan_create(chan,
+ (struct hci_ext_fs *) &pi->local_fs,
+ (struct hci_ext_fs *) &pi->remote_fs);
+ }
+done:
+ hci_dev_put(hdev);
+ return chan;
+}
+
+static void l2cap_get_ertm_timeouts(struct l2cap_conf_rfc *rfc,
+ struct l2cap_pinfo *pi)
+{
+ if (pi->amp_id && pi->ampcon) {
+ u64 ertm_to = pi->ampcon->hdev->amp_be_flush_to;
+
+ /* Class 1 devices have must have ERTM timeouts
+ * exceeding the Link Supervision Timeout. The
+ * default Link Supervision Timeout for AMP
+ * controllers is 10 seconds.
+ *
+ * Class 1 devices use 0xffffffff for their
+ * best-effort flush timeout, so the clamping logic
+ * will result in a timeout that meets the above
+ * requirement. ERTM timeouts are 16-bit values, so
+ * the maximum timeout is 65.535 seconds.
+ */
+
+ /* Convert timeout to milliseconds and round */
+ ertm_to = div_u64(ertm_to + 999, 1000);
+
+ /* This is the recommended formula for class 2 devices
+ * that start ERTM timers when packets are sent to the
+ * controller.
+ */
+ ertm_to = 3 * ertm_to + 500;
+
+ if (ertm_to > 0xffff)
+ ertm_to = 0xffff;
+
+ rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
+ rfc->monitor_timeout = rfc->retrans_timeout;
+ } else {
+ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ }
+}
+
+int l2cap_build_conf_req(struct sock *sk, void *data)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = pi->mode };
+ void *ptr = req->data;
+
+ BT_DBG("sk %p", sk);
+
+ if (pi->num_conf_req || pi->num_conf_rsp)
goto done;
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
- if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
+ if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
break;
- if (__l2cap_efs_supported(chan))
- set_bit(FLAG_EFS_ENABLE, &chan->flags);
-
/* fall through */
default:
- chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
+ pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
break;
}
done:
- if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ if (pi->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_BASIC:
- if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
- !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
+ !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
- rfc.mode = L2CAP_MODE_BASIC;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
@@ -2143,94 +3325,143 @@
break;
case L2CAP_MODE_ERTM:
- rfc.mode = L2CAP_MODE_ERTM;
- rfc.max_transmit = chan->max_tx;
- rfc.retrans_timeout = 0;
- rfc.monitor_timeout = 0;
+ l2cap_setup_txwin(pi);
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
+ rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
+ else
+ rfc.txwin_size = pi->tx_win;
+ rfc.max_transmit = pi->max_tx;
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ l2cap_get_ertm_timeouts(&rfc, pi);
- size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
- L2CAP_EXT_HDR_SIZE -
- L2CAP_SDULEN_SIZE -
- L2CAP_FCS_SIZE);
- rfc.max_pdu_size = cpu_to_le16(size);
-
- l2cap_txwin_setup(chan);
-
- rfc.txwin_size = min_t(u16, chan->tx_win,
- L2CAP_DEFAULT_TX_WINDOW);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
+ rfc.max_pdu_size = cpu_to_le16(pi->imtu);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
-
- if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
- break;
-
- if (chan->fcs == L2CAP_FCS_NONE ||
- test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
- chan->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
+ if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
+ pi->extended_control) {
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
+ pi->tx_win);
}
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ if (pi->amp_id) {
+ /* default best effort extended flow spec */
+ struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
+ sizeof(fs), (unsigned long) &fs);
+ }
+
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
+ break;
+
+ if (pi->fcs == L2CAP_FCS_NONE ||
+ pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+ pi->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
+ }
break;
case L2CAP_MODE_STREAMING:
- rfc.mode = L2CAP_MODE_STREAMING;
+ l2cap_setup_txwin(pi);
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
-
- size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
- L2CAP_EXT_HDR_SIZE -
- L2CAP_SDULEN_SIZE -
- L2CAP_FCS_SIZE);
- rfc.max_pdu_size = cpu_to_le16(size);
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
+ rfc.max_pdu_size = cpu_to_le16(pi->imtu);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
+ pi->extended_control) {
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
+ }
- if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
break;
- if (chan->fcs == L2CAP_FCS_NONE ||
- test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
- chan->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
+ if (pi->fcs == L2CAP_FCS_NONE ||
+ pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+ pi->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
}
break;
}
- req->dcid = cpu_to_le16(chan->dcid);
+ req->dcid = cpu_to_le16(pi->dcid);
req->flags = cpu_to_le16(0);
return ptr - data;
}
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+
+static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = pi->mode };
+ void *ptr = req->data;
+
+ BT_DBG("sk %p", sk);
+
+ switch (pi->mode) {
+ case L2CAP_MODE_ERTM:
+ rfc.mode = L2CAP_MODE_ERTM;
+ rfc.txwin_size = pi->tx_win;
+ rfc.max_transmit = pi->max_tx;
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ l2cap_get_ertm_timeouts(&rfc, pi);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
+ rfc.max_pdu_size = cpu_to_le16(pi->imtu);
+
+ break;
+
+ default:
+ return -ECONNREFUSED;
+ }
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+
+ if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
+ /* TODO assign fcs for br/edr based on socket config option */
+ /* FCS is not used with AMP because it is redundant - lower
+ * layers already include a checksum. */
+ if (pi->amp_id)
+ pi->local_conf.fcs = L2CAP_FCS_NONE;
+ else
+ pi->local_conf.fcs = L2CAP_FCS_CRC16;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->local_conf.fcs);
+ pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
+ }
+
+ req->dcid = cpu_to_le16(pi->dcid);
+ req->flags = cpu_to_le16(0);
+
+ return ptr - data;
+}
+
+static int l2cap_parse_conf_req(struct sock *sk, void *data)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- void *req = chan->conf_req;
- int len = chan->conf_len;
+ void *req = pi->conf_req;
+ int len = pi->conf_len;
int type, hint, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
- struct l2cap_conf_efs efs;
- u8 remote_efs = 0;
+ struct l2cap_conf_ext_fs fs;
u16 mtu = L2CAP_DEFAULT_MTU;
u16 result = L2CAP_CONF_SUCCESS;
- u16 size;
- BT_DBG("chan %p", chan);
+ BT_DBG("sk %p", sk);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -2244,10 +3475,16 @@
break;
case L2CAP_CONF_FLUSH_TO:
- chan->flush_to = val;
+ pi->flush_to = val;
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
+ result = L2CAP_CONF_UNACCEPT;
+ else
+ pi->remote_conf.flush_to = val;
break;
case L2CAP_CONF_QOS:
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
+ result = L2CAP_CONF_UNACCEPT;
break;
case L2CAP_CONF_RFC:
@@ -2257,23 +3494,42 @@
case L2CAP_CONF_FCS:
if (val == L2CAP_FCS_NONE)
- set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
+ pi->remote_conf.fcs = val;
break;
- case L2CAP_CONF_EFS:
- remote_efs = 1;
- if (olen == sizeof(efs))
- memcpy(&efs, (void *) val, olen);
+ case L2CAP_CONF_EXT_FS:
+ if (olen == sizeof(fs)) {
+ pi->conf_state |= L2CAP_CONF_EFS_RECV;
+ if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
+ result = L2CAP_CONF_UNACCEPT;
+ break;
+ }
+ memcpy(&fs, (void *) val, olen);
+ if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
+ result = L2CAP_CONF_FLOW_SPEC_REJECT;
+ break;
+ }
+ pi->remote_conf.flush_to =
+ le32_to_cpu(fs.flush_to);
+ pi->remote_fs.id = fs.id;
+ pi->remote_fs.type = fs.type;
+ pi->remote_fs.max_sdu =
+ le16_to_cpu(fs.max_sdu);
+ pi->remote_fs.sdu_arr_time =
+ le32_to_cpu(fs.sdu_arr_time);
+ pi->remote_fs.acc_latency =
+ le32_to_cpu(fs.acc_latency);
+ pi->remote_fs.flush_to =
+ le32_to_cpu(fs.flush_to);
+ }
break;
- case L2CAP_CONF_EWS:
- if (!enable_hs)
- return -ECONNREFUSED;
-
- set_bit(FLAG_EXT_CTRL, &chan->flags);
- set_bit(CONF_EWS_RECV, &chan->conf_state);
- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
- chan->remote_tx_win = val;
+ case L2CAP_CONF_EXT_WINDOW:
+ pi->extended_control = 1;
+ pi->remote_tx_win = val;
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
+ pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
break;
default:
@@ -2286,132 +3542,83 @@
}
}
- if (chan->num_conf_rsp || chan->num_conf_req > 1)
+ if (pi->num_conf_rsp || pi->num_conf_req > 1)
goto done;
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
- if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
- chan->mode = l2cap_select_mode(rfc.mode,
- chan->conn->feat_mask);
+ if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
+ pi->mode = l2cap_select_mode(rfc.mode,
+ pi->conn->feat_mask);
break;
}
- if (remote_efs) {
- if (__l2cap_efs_supported(chan))
- set_bit(FLAG_EFS_ENABLE, &chan->flags);
- else
- return -ECONNREFUSED;
- }
-
- if (chan->mode != rfc.mode)
+ if (pi->mode != rfc.mode)
return -ECONNREFUSED;
break;
}
done:
- if (chan->mode != rfc.mode) {
+ if (pi->mode != rfc.mode) {
result = L2CAP_CONF_UNACCEPT;
- rfc.mode = chan->mode;
+ rfc.mode = pi->mode;
- if (chan->num_conf_rsp == 1)
+ if (pi->num_conf_rsp == 1)
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
}
+
+ if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
+ !(pi->conf_state & L2CAP_CONF_EFS_RECV))
+ return -ECONNREFUSED;
+
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
- if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ if (mtu < L2CAP_DEFAULT_MIN_MTU) {
result = L2CAP_CONF_UNACCEPT;
+ pi->omtu = L2CAP_DEFAULT_MIN_MTU;
+ }
else {
- chan->omtu = mtu;
- set_bit(CONF_MTU_DONE, &chan->conf_state);
+ pi->omtu = mtu;
+ pi->conf_state |= L2CAP_CONF_MTU_DONE;
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
-
- if (remote_efs) {
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != chan->local_stype) {
-
- result = L2CAP_CONF_UNACCEPT;
-
- if (chan->num_conf_req >= 1)
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- sizeof(efs),
- (unsigned long) &efs);
- } else {
- /* Send PENDING Conf Rsp */
- result = L2CAP_CONF_PENDING;
- set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
- }
- }
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
switch (rfc.mode) {
case L2CAP_MODE_BASIC:
- chan->fcs = L2CAP_FCS_NONE;
- set_bit(CONF_MODE_DONE, &chan->conf_state);
+ pi->fcs = L2CAP_FCS_NONE;
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
break;
case L2CAP_MODE_ERTM:
- if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
- chan->remote_tx_win = rfc.txwin_size;
- else
- rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
+ if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
+ pi->remote_tx_win = rfc.txwin_size;
+ pi->remote_max_tx = rfc.max_transmit;
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ l2cap_get_ertm_timeouts(&rfc, pi);
- chan->remote_max_tx = rfc.max_transmit;
-
- size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
- chan->conn->mtu -
- L2CAP_EXT_HDR_SIZE -
- L2CAP_SDULEN_SIZE -
- L2CAP_FCS_SIZE);
- rfc.max_pdu_size = cpu_to_le16(size);
- chan->remote_mps = size;
-
- rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
-
- set_bit(CONF_MODE_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
- chan->remote_id = efs.id;
- chan->remote_stype = efs.stype;
- chan->remote_msdu = le16_to_cpu(efs.msdu);
- chan->remote_flush_to =
- le32_to_cpu(efs.flush_to);
- chan->remote_acc_lat =
- le32_to_cpu(efs.acc_lat);
- chan->remote_sdu_itime =
- le32_to_cpu(efs.sdu_itime);
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- sizeof(efs), (unsigned long) &efs);
- }
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
+ sizeof(fs), (unsigned long) &fs);
+
break;
case L2CAP_MODE_STREAMING:
- size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
- chan->conn->mtu -
- L2CAP_EXT_HDR_SIZE -
- L2CAP_SDULEN_SIZE -
- L2CAP_FCS_SIZE);
- rfc.max_pdu_size = cpu_to_le16(size);
- chan->remote_mps = size;
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
- set_bit(CONF_MODE_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
@@ -2422,29 +3629,183 @@
result = L2CAP_CONF_UNACCEPT;
memset(&rfc, 0, sizeof(rfc));
- rfc.mode = chan->mode;
+ rfc.mode = pi->mode;
+ }
+
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
+ !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
+ pi->conf_state |= L2CAP_CONF_PEND_SENT;
+ result = L2CAP_CONF_PENDING;
+
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
+ pi->amp_id) {
+ struct hci_chan *chan;
+ /* Trigger logical link creation only on AMP */
+
+ chan = l2cap_chan_admit(pi->amp_id, sk);
+ if (!chan)
+ return -ECONNREFUSED;
+
+ if (chan->state == BT_CONNECTED)
+ l2cap_create_cfm(chan, 0);
+ }
}
if (result == L2CAP_CONF_SUCCESS)
- set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
}
- rsp->scid = cpu_to_le16(chan->dcid);
+ rsp->scid = cpu_to_le16(pi->dcid);
rsp->result = cpu_to_le16(result);
rsp->flags = cpu_to_le16(0x0000);
return ptr - data;
}
-static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
+static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_rsp *rsp = data;
+ void *ptr = rsp->data;
+ void *req = pi->conf_req;
+ int len = pi->conf_len;
+ int type, hint, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_ext_fs fs;
+ u16 mtu = pi->omtu;
+ u16 tx_win = pi->remote_tx_win;
+ u16 result = L2CAP_CONF_SUCCESS;
+
+ BT_DBG("sk %p", sk);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+
+ hint = type & L2CAP_CONF_HINT;
+ type &= L2CAP_CONF_MASK;
+
+ switch (type) {
+ case L2CAP_CONF_MTU:
+ mtu = val;
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ if (pi->amp_move_id)
+ result = L2CAP_CONF_UNACCEPT;
+ else
+ pi->remote_conf.flush_to = val;
+ break;
+
+ case L2CAP_CONF_QOS:
+ if (pi->amp_move_id)
+ result = L2CAP_CONF_UNACCEPT;
+ break;
+
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *) val, olen);
+ break;
+
+ case L2CAP_CONF_FCS:
+ pi->remote_conf.fcs = val;
+ break;
+
+ case L2CAP_CONF_EXT_FS:
+ if (olen == sizeof(fs)) {
+ memcpy(&fs, (void *) val, olen);
+ if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
+ result = L2CAP_CONF_FLOW_SPEC_REJECT;
+ else {
+ pi->remote_conf.flush_to =
+ le32_to_cpu(fs.flush_to);
+ }
+ }
+ break;
+
+ case L2CAP_CONF_EXT_WINDOW:
+ tx_win = val;
+ break;
+
+ default:
+ if (hint)
+ break;
+
+ result = L2CAP_CONF_UNKNOWN;
+ *((u8 *) ptr++) = type;
+ break;
+ }
+ }
+
+ BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
+ result, pi->mode, rfc.mode);
+
+ if (pi->mode != rfc.mode || rfc.mode == L2CAP_MODE_BASIC)
+ result = L2CAP_CONF_UNACCEPT;
+
+ if (result == L2CAP_CONF_SUCCESS) {
+ /* Configure output options and let the other side know
+ * which ones we don't like. */
+
+ /* Don't allow mtu to decrease. */
+ if (mtu < pi->omtu)
+ result = L2CAP_CONF_UNACCEPT;
+
+ BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+
+ /* Don't allow extended transmit window to change. */
+ if (tx_win != pi->remote_tx_win) {
+ result = L2CAP_CONF_UNACCEPT;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
+ pi->remote_tx_win);
+ }
+
+ pi->remote_mps = rfc.max_pdu_size;
+
+ if (rfc.mode == L2CAP_MODE_ERTM) {
+ l2cap_get_ertm_timeouts(&rfc, pi);
+ } else {
+ rfc.retrans_timeout = 0;
+ rfc.monitor_timeout = 0;
+ }
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+ }
+
+ if (result != L2CAP_CONF_SUCCESS)
+ goto done;
+
+ pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs;
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG)
+ pi->flush_to = pi->remote_conf.flush_to;
+
+done:
+ rsp->scid = cpu_to_le16(pi->dcid);
+ rsp->result = cpu_to_le16(result);
+ rsp->flags = cpu_to_le16(0x0000);
+
+ return ptr - data;
+}
+
+static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
void *ptr = req->data;
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
- struct l2cap_conf_efs efs;
+ struct l2cap_conf_rfc rfc;
- BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
+ BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
+
+ /* Initialize rfc in case no rfc option is received */
+ rfc.mode = pi->mode;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -2453,131 +3814,100 @@
case L2CAP_CONF_MTU:
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
- chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ pi->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
- chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ pi->imtu = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
break;
case L2CAP_CONF_FLUSH_TO:
- chan->flush_to = val;
+ pi->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, pi->flush_to);
break;
case L2CAP_CONF_RFC:
if (olen == sizeof(rfc))
memcpy(&rfc, (void *)val, olen);
- if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
- rfc.mode != chan->mode)
+ if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
+ rfc.mode != pi->mode)
return -ECONNREFUSED;
- chan->fcs = 0;
+ pi->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
break;
- case L2CAP_CONF_EWS:
- chan->tx_win = min_t(u16, val,
- L2CAP_DEFAULT_EXT_WINDOW);
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ case L2CAP_CONF_EXT_WINDOW:
+ pi->tx_win = val;
+
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
+ pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
+ 2, pi->tx_win);
break;
- case L2CAP_CONF_EFS:
- if (olen == sizeof(efs))
- memcpy(&efs, (void *)val, olen);
-
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != chan->local_stype)
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- sizeof(efs), (unsigned long) &efs);
+ default:
break;
}
}
- if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
+ if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
return -ECONNREFUSED;
- chan->mode = rfc.mode;
+ pi->mode = rfc.mode;
- if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
+ if (*result == L2CAP_CONF_SUCCESS) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
-
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
- chan->local_msdu = le16_to_cpu(efs.msdu);
- chan->local_sdu_itime =
- le32_to_cpu(efs.sdu_itime);
- chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
- chan->local_flush_to =
- le32_to_cpu(efs.flush_to);
- }
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
break;
-
case L2CAP_MODE_STREAMING:
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
- req->dcid = cpu_to_le16(chan->dcid);
+ req->dcid = cpu_to_le16(pi->dcid);
req->flags = cpu_to_le16(0x0000);
return ptr - data;
}
-static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
+static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- BT_DBG("chan %p", chan);
+ BT_DBG("sk %p", sk);
- rsp->scid = cpu_to_le16(chan->dcid);
+ rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
rsp->result = cpu_to_le16(result);
rsp->flags = cpu_to_le16(flags);
return ptr - data;
}
-void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
{
- struct l2cap_conn_rsp rsp;
- struct l2cap_conn *conn = chan->conn;
- u8 buf[128];
-
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, chan->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
- return;
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
-}
-
-static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
-{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc;
- BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
+ BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
- if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
+ /* Initialize rfc in case no rfc option is received */
+ rfc.mode = pi->mode;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+
+ if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
return;
while (len >= L2CAP_CONF_OPT_SIZE) {
@@ -2591,38 +3921,145 @@
}
}
- /* Use sane default values in case a misbehaving remote device
- * did not send an RFC option.
- */
- rfc.mode = chan->mode;
- rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- rfc.max_pdu_size = cpu_to_le16(chan->imtu);
-
- BT_ERR("Expected RFC option was not found, using defaults");
-
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
break;
case L2CAP_MODE_STREAMING:
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
+static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_ext_fs fs;
+
+ BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if ((type == L2CAP_CONF_EXT_FS) &&
+ (olen == sizeof(struct l2cap_conf_ext_fs))) {
+ memcpy(&fs, (void *)val, olen);
+ pi->local_fs.id = fs.id;
+ pi->local_fs.type = fs.type;
+ pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
+ pi->local_fs.sdu_arr_time =
+ le32_to_cpu(fs.sdu_arr_time);
+ pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
+ pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
+ break;
+ }
+ }
+
+}
+
+static int l2cap_finish_amp_move(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ int err;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+
+ if (pi->ampcon)
+ pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
+ else
+ pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
+
+ err = l2cap_setup_resegment(sk);
+
+ return err;
+}
+
+static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
+ u16 result)
+{
+ int err = 0;
+ struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
+
+ if (pi->reconf_state == L2CAP_RECONF_NONE)
+ return -ECONNREFUSED;
+
+ if (result == L2CAP_CONF_SUCCESS) {
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ int type, olen;
+ unsigned long val;
+
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ if (type == L2CAP_CONF_RFC) {
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+
+ if (rfc.mode != pi->mode) {
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ return -ECONNRESET;
+ }
+
+ goto done;
+ }
+ }
+ }
+
+ BT_ERR("Expected RFC option was missing, using existing values");
+
+ rfc.mode = pi->mode;
+ rfc.retrans_timeout = cpu_to_le16(pi->retrans_timeout);
+ rfc.monitor_timeout = cpu_to_le16(pi->monitor_timeout);
+
+done:
+ l2cap_ertm_stop_ack_timer(pi);
+ l2cap_ertm_stop_retrans_timer(pi);
+ l2cap_ertm_stop_monitor_timer(pi);
+
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (pi->mode == L2CAP_MODE_ERTM) {
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ }
+
+ if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
+ l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
+
+ /* Respond to poll */
+ err = l2cap_answer_move_poll(sk);
+ } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
+ if (pi->mode == L2CAP_MODE_ERTM) {
+ l2cap_ertm_tx(sk, NULL, NULL,
+ L2CAP_ERTM_EVENT_EXPLICIT_POLL);
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
+ }
+ }
+
+ return err;
+}
+
+
static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
- struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
+ struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
- if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
+ if (rej->reason != 0x0000)
return 0;
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
cmd->ident == conn->info_ident) {
- cancel_delayed_work(&conn->info_timer);
+ del_timer(&conn->info_timer);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -2633,11 +4070,14 @@
return 0;
}
-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u8 *data, u8 rsp_code,
+ u8 amp_id)
{
+ struct l2cap_chan_list *list = &conn->chan_list;
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
- struct l2cap_chan *chan = NULL, *pchan;
struct sock *parent, *sk = NULL;
int result, status = L2CAP_CS_NO_INFO;
@@ -2647,21 +4087,18 @@
BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
- if (!pchan) {
+ parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
+ if (!parent) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
}
- parent = pchan->sk;
-
- mutex_lock(&conn->chan_lock);
- lock_sock(parent);
+ bh_lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
if (psm != cpu_to_le16(0x0001) &&
!hci_conn_check_link_mode(conn->hcon)) {
- conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
+ conn->disc_reason = 0x05;
result = L2CAP_CR_SEC_BLOCK;
goto response;
}
@@ -2674,92 +4111,112 @@
goto response;
}
- chan = pchan->ops->new_connection(pchan->data);
- if (!chan)
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+ if (!sk)
goto response;
- sk = chan->sk;
+ write_lock_bh(&list->lock);
/* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(conn, scid)) {
+ if (__l2cap_get_chan_by_dcid(list, scid)) {
+ write_unlock_bh(&list->lock);
sock_set_flag(sk, SOCK_ZAPPED);
- chan->ops->close(chan->data);
+ l2cap_sock_kill(sk);
+ sk = NULL;
goto response;
}
hci_conn_hold(conn->hcon);
+ l2cap_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
- chan->psm = psm;
- chan->dcid = scid;
+ l2cap_pi(sk)->psm = psm;
+ l2cap_pi(sk)->dcid = scid;
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ __l2cap_chan_add(conn, sk);
+ dcid = l2cap_pi(sk)->scid;
+ l2cap_pi(sk)->amp_id = amp_id;
- dcid = chan->scid;
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- __set_chan_timer(chan, sk->sk_sndtimeo);
-
- chan->ident = cmd->ident;
+ l2cap_pi(sk)->ident = cmd->ident;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
- if (l2cap_chan_check_security(chan)) {
+ if (l2cap_check_security(sk)) {
if (bt_sk(sk)->defer_setup) {
- __l2cap_state_change(chan, BT_CONNECT2);
+ sk->sk_state = BT_CONNECT2;
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
parent->sk_data_ready(parent, 0);
} else {
- __l2cap_state_change(chan, BT_CONFIG);
- result = L2CAP_CR_SUCCESS;
+ /* Force pending result for AMP controllers.
+ * The connection will succeed after the
+ * physical link is up. */
+ if (amp_id) {
+ sk->sk_state = BT_CONNECT2;
+ result = L2CAP_CR_PEND;
+ } else {
+ sk->sk_state = BT_CONFIG;
+ result = L2CAP_CR_SUCCESS;
+ }
status = L2CAP_CS_NO_INFO;
}
} else {
- __l2cap_state_change(chan, BT_CONNECT2);
+ sk->sk_state = BT_CONNECT2;
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
}
} else {
- __l2cap_state_change(chan, BT_CONNECT2);
+ sk->sk_state = BT_CONNECT2;
result = L2CAP_CR_PEND;
status = L2CAP_CS_NO_INFO;
}
+ write_unlock_bh(&list->lock);
+
response:
- release_sock(parent);
- mutex_unlock(&conn->chan_lock);
+ bh_unlock_sock(parent);
sendresp:
rsp.scid = cpu_to_le16(scid);
rsp.dcid = cpu_to_le16(dcid);
rsp.result = cpu_to_le16(result);
rsp.status = cpu_to_le16(status);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
- if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
+ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
struct l2cap_info_req info;
info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
+ mod_timer(&conn->info_timer, jiffies +
+ msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
l2cap_send_cmd(conn, conn->info_ident,
L2CAP_INFO_REQ, sizeof(info), &info);
}
- if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
result == L2CAP_CR_SUCCESS) {
u8 buf[128];
- set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
+ return sk;
+}
+
+static inline int l2cap_connect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
return 0;
}
@@ -2767,107 +4224,115 @@
{
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
u16 scid, dcid, result, status;
- struct l2cap_chan *chan;
+ struct sock *sk;
u8 req[128];
- int err;
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
result = __le16_to_cpu(rsp->result);
status = __le16_to_cpu(rsp->status);
- BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
- dcid, scid, result, status);
-
- mutex_lock(&conn->chan_lock);
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
if (scid) {
- chan = __l2cap_get_chan_by_scid(conn, scid);
- if (!chan) {
- err = -EFAULT;
- goto unlock;
- }
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+ if (!sk)
+ return -EFAULT;
} else {
- chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
- if (!chan) {
- err = -EFAULT;
- goto unlock;
- }
+ sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
+ if (!sk)
+ return -EFAULT;
}
- err = 0;
-
- l2cap_chan_lock(chan);
-
switch (result) {
case L2CAP_CR_SUCCESS:
- l2cap_state_change(chan, BT_CONFIG);
- chan->ident = 0;
- chan->dcid = dcid;
- clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ sk->sk_state = BT_CONFIG;
+ l2cap_pi(sk)->ident = 0;
+ l2cap_pi(sk)->dcid = dcid;
+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
- if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
break;
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, req), req);
+ l2cap_pi(sk)->num_conf_req++;
break;
case L2CAP_CR_PEND:
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
break;
default:
- l2cap_chan_del(chan, ECONNREFUSED);
+ /* don't delete l2cap channel if sk is owned by user */
+ if (sock_owned_by_user(sk)) {
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
+ break;
+ }
+
+ l2cap_chan_del(sk, ECONNREFUSED);
break;
}
- l2cap_chan_unlock(chan);
-
-unlock:
- mutex_unlock(&conn->chan_lock);
-
- return err;
+ bh_unlock_sock(sk);
+ return 0;
}
-static inline void set_default_fcs(struct l2cap_chan *chan)
+static inline void set_default_fcs(struct l2cap_pinfo *pi)
{
/* FCS is enabled only in ERTM or streaming mode, if one or both
* sides request it.
*/
- if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
- chan->fcs = L2CAP_FCS_NONE;
- else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
- chan->fcs = L2CAP_FCS_CRC16;
+ if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
+ pi->fcs = L2CAP_FCS_NONE;
+ else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
+ pi->fcs = L2CAP_FCS_CRC16;
}
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
u16 dcid, flags;
- u8 rsp[64];
- struct l2cap_chan *chan;
+ u8 rspbuf[64];
+ struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
+ struct sock *sk;
int len;
+ u8 amp_move_reconf = 0;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
- chan = l2cap_get_chan_by_scid(conn, dcid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
+ if (!sk)
return -ENOENT;
- l2cap_chan_lock(chan);
+ BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
+ "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
+ sk->sk_state, l2cap_pi(sk)->rx_state,
+ l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
+ l2cap_pi(sk)->amp_move_id);
- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
- struct l2cap_cmd_rej_cid rej;
+ /* Detect a reconfig request due to channel move between
+ * BR/EDR and AMP
+ */
+ if (sk->sk_state == BT_CONNECTED &&
+ l2cap_pi(sk)->rx_state ==
+ L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
+ l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
- rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
- rej.scid = cpu_to_le16(chan->scid);
- rej.dcid = cpu_to_le16(chan->dcid);
+ if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
+ amp_move_reconf = 1;
+ if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
+ struct l2cap_cmd_rej rej;
+
+ rej.reason = cpu_to_le16(0x0002);
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
sizeof(rej), &rej);
goto unlock;
@@ -2875,80 +4340,84 @@
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
- if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
+ if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_REJECT, flags), rsp);
+ l2cap_build_conf_rsp(sk, rspbuf,
+ L2CAP_CONF_REJECT, flags), rspbuf);
goto unlock;
}
/* Store config. */
- memcpy(chan->conf_req + chan->conf_len, req->data, len);
- chan->conf_len += len;
+ memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
+ l2cap_pi(sk)->conf_len += len;
if (flags & 0x0001) {
/* Incomplete config. Send empty response. */
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0001), rsp);
+ l2cap_build_conf_rsp(sk, rspbuf,
+ L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
goto unlock;
}
/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ if (!amp_move_reconf)
+ len = l2cap_parse_conf_req(sk, rspbuf);
+ else
+ len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
+
if (len < 0) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto unlock;
}
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
- chan->num_conf_rsp++;
+ l2cap_pi(sk)->conf_ident = cmd->ident;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
+ rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
+ !l2cap_pi(sk)->amp_id) {
+ /* Send success response right after pending if using
+ * lockstep config on BR/EDR
+ */
+ rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
+ }
/* Reset config buffer. */
- chan->conf_len = 0;
+ l2cap_pi(sk)->conf_len = 0;
- if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
+ if (amp_move_reconf)
goto unlock;
- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
- set_default_fcs(chan);
+ l2cap_pi(sk)->num_conf_rsp++;
- l2cap_state_change(chan, BT_CONNECTED);
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
+ goto unlock;
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
+ set_default_fcs(l2cap_pi(sk));
- l2cap_chan_ready(chan);
+ sk->sk_state = BT_CONNECTED;
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
+ l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
+
+ l2cap_chan_ready(sk);
goto unlock;
}
- if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
u8 buf[64];
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
- }
-
- /* Got Conf Rsp PENDING from remote side and asume we sent
- Conf Rsp PENDING in the code above */
- if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
- test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
-
- /* check compatibility */
-
- clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
- set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0000), rsp);
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
unlock:
- l2cap_chan_unlock(chan);
+ bh_unlock_sock(sk);
return 0;
}
@@ -2956,7 +4425,8 @@
{
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
- struct l2cap_chan *chan;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
int len = cmd->len - sizeof(*rsp);
scid = __le16_to_cpu(rsp->scid);
@@ -2966,96 +4436,114 @@
BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
scid, flags, result);
- chan = l2cap_get_chan_by_scid(conn, scid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+ if (!sk)
return 0;
- l2cap_chan_lock(chan);
+ pi = l2cap_pi(sk);
+
+ if (pi->reconf_state != L2CAP_RECONF_NONE) {
+ l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
+ goto done;
+ }
switch (result) {
case L2CAP_CONF_SUCCESS:
- l2cap_conf_rfc_get(chan, rsp->data, len);
- clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
+ !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
+ /* Lockstep procedure requires a pending response
+ * before success.
+ */
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ goto done;
+ }
+
+ l2cap_conf_rfc_get(sk, rsp->data, len);
break;
case L2CAP_CONF_PENDING:
- set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+ if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ goto done;
+ }
- if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
- char buf[64];
+ l2cap_conf_rfc_get(sk, rsp->data, len);
- len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- buf, &result);
- if (len < 0) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
+
+ l2cap_conf_ext_fs_get(sk, rsp->data, len);
+
+ if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
+ struct hci_chan *chan;
+
+ /* Already sent a 'pending' response, so set up
+ * the logical link now
+ */
+ chan = l2cap_chan_admit(pi->amp_id, sk);
+ if (!chan) {
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
goto done;
}
- /* check compatibility */
-
- clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
- set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(chan, buf,
- L2CAP_CONF_SUCCESS, 0x0000), buf);
+ if (chan->state == BT_CONNECTED)
+ l2cap_create_cfm(chan, 0);
}
+
goto done;
case L2CAP_CONF_UNACCEPT:
- if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
+ if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64];
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto done;
}
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
- len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ len = l2cap_parse_conf_rsp(sk, rsp->data,
+ len, req, &result);
if (len < 0) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto done;
}
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ, len, req);
- chan->num_conf_req++;
+ pi->num_conf_req++;
if (result != L2CAP_CONF_SUCCESS)
goto done;
break;
}
default:
- l2cap_chan_set_err(chan, ECONNRESET);
-
- __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ sk->sk_err = ECONNRESET;
+ l2cap_sock_set_timer(sk, HZ * 5);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto done;
}
if (flags & 0x01)
goto done;
- set_bit(CONF_INPUT_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_INPUT_DONE;
- if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
- set_default_fcs(chan);
+ if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
+ set_default_fcs(pi);
- l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ sk->sk_state = BT_CONNECTED;
- l2cap_chan_ready(chan);
+ if (pi->mode == L2CAP_MODE_ERTM ||
+ pi->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
+
+ l2cap_chan_ready(sk);
}
done:
- l2cap_chan_unlock(chan);
+ bh_unlock_sock(sk);
return 0;
}
@@ -3064,7 +4552,6 @@
struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
struct l2cap_disconn_rsp rsp;
u16 dcid, scid;
- struct l2cap_chan *chan;
struct sock *sk;
scid = __le16_to_cpu(req->scid);
@@ -3072,34 +4559,44 @@
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
- mutex_lock(&conn->chan_lock);
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
+ if (!sk)
+ return 0;
- chan = __l2cap_get_chan_by_scid(conn, dcid);
- if (!chan) {
- mutex_unlock(&conn->chan_lock);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
+
+ /* Only do cleanup if a disconnect request was not sent already */
+ if (sk->sk_state != BT_DISCONN) {
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ sk->sk_send_head = NULL;
+ skb_queue_purge(TX_QUEUE(sk));
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+ skb_queue_purge(SREJ_QUEUE(sk));
+
+ __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
+ }
+ }
+
+ /* don't delete l2cap channel if sk is owned by user */
+ if (sock_owned_by_user(sk)) {
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
return 0;
}
- l2cap_chan_lock(chan);
+ l2cap_chan_del(sk, ECONNRESET);
- sk = chan->sk;
+ bh_unlock_sock(sk);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.scid = cpu_to_le16(chan->dcid);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
-
- lock_sock(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
- release_sock(sk);
-
- l2cap_chan_del(chan, ECONNRESET);
-
- l2cap_chan_unlock(chan);
-
- chan->ops->close(chan->data);
-
- mutex_unlock(&conn->chan_lock);
-
+ l2cap_sock_kill(sk);
return 0;
}
@@ -3107,31 +4604,30 @@
{
struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
u16 dcid, scid;
- struct l2cap_chan *chan;
+ struct sock *sk;
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
- mutex_lock(&conn->chan_lock);
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+ if (!sk)
+ return 0;
- chan = __l2cap_get_chan_by_scid(conn, scid);
- if (!chan) {
- mutex_unlock(&conn->chan_lock);
+ /* don't delete l2cap channel if sk is owned by user */
+ if (sock_owned_by_user(sk)) {
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
return 0;
}
- l2cap_chan_lock(chan);
+ l2cap_chan_del(sk, 0);
+ bh_unlock_sock(sk);
- l2cap_chan_del(chan, 0);
-
- l2cap_chan_unlock(chan);
-
- chan->ops->close(chan->data);
-
- mutex_unlock(&conn->chan_lock);
-
+ l2cap_sock_kill(sk);
return 0;
}
@@ -3152,26 +4648,16 @@
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
- | L2CAP_FEAT_FCS;
- if (enable_hs)
- feat_mask |= L2CAP_FEAT_EXT_FLOW
- | L2CAP_FEAT_EXT_WINDOW;
-
+ | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
u8 buf[12];
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
-
- if (enable_hs)
- l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
- else
- l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
-
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
+ memcpy(buf + 4, l2cap_fixed_chan, 8);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else {
@@ -3200,7 +4686,7 @@
conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
return 0;
- cancel_delayed_work(&conn->info_timer);
+ del_timer(&conn->info_timer);
if (result != L2CAP_IR_SUCCESS) {
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -3211,8 +4697,7 @@
return 0;
}
- switch (type) {
- case L2CAP_IT_FEAT_MASK:
+ if (type == L2CAP_IT_FEAT_MASK) {
conn->feat_mask = get_unaligned_le32(rsp->data);
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
@@ -3229,65 +4714,42 @@
l2cap_conn_start(conn);
}
- break;
-
- case L2CAP_IT_FIXED_CHAN:
- conn->fixed_chan_mask = rsp->data[0];
+ } else if (type == L2CAP_IT_FIXED_CHAN) {
+ conn->fc_mask = rsp->data[0];
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
l2cap_conn_start(conn);
- break;
}
return 0;
}
-static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len,
- void *data)
+static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
{
- struct l2cap_create_chan_req *req = data;
- struct l2cap_create_chan_rsp rsp;
- u16 psm, scid;
+ struct l2cap_move_chan_req req;
+ u8 ident;
- if (cmd_len != sizeof(*req))
- return -EPROTO;
+ BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
+ (int) dest_amp_id);
- if (!enable_hs)
- return -EINVAL;
+ ident = l2cap_get_ident(conn);
+ if (pi)
+ pi->ident = ident;
- psm = le16_to_cpu(req->psm);
- scid = le16_to_cpu(req->scid);
+ req.icid = cpu_to_le16(icid);
+ req.dest_amp_id = dest_amp_id;
- BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
-
- /* Placeholder: Always reject */
- rsp.dcid = 0;
- rsp.scid = cpu_to_le16(scid);
- rsp.result = L2CAP_CR_NO_MEM;
- rsp.status = L2CAP_CS_NO_INFO;
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- return 0;
-}
-
-static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, void *data)
-{
- BT_DBG("conn %p", conn);
-
- return l2cap_connect_rsp(conn, cmd, data);
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
}
static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid, u16 result)
+ u16 icid, u16 result)
{
struct l2cap_move_chan_rsp rsp;
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
rsp.icid = cpu_to_le16(icid);
rsp.result = cpu_to_le16(result);
@@ -3296,16 +4758,16 @@
}
static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
- struct l2cap_chan *chan, u16 icid, u16 result)
+ struct l2cap_pinfo *pi, u16 icid, u16 result)
{
struct l2cap_move_chan_cfm cfm;
u8 ident;
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
ident = l2cap_get_ident(conn);
- if (chan)
- chan->ident = ident;
+ if (pi)
+ pi->ident = ident;
cfm.icid = cpu_to_le16(icid);
cfm.result = cpu_to_le16(result);
@@ -3314,90 +4776,879 @@
}
static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
+ u16 icid)
{
struct l2cap_move_chan_cfm_rsp rsp;
- BT_DBG("icid %d", icid);
+ BT_DBG("icid %d", (int) icid);
rsp.icid = cpu_to_le16(icid);
l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
}
-static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
{
- struct l2cap_move_chan_req *req = data;
- u16 icid = 0;
- u16 result = L2CAP_MR_NOT_ALLOWED;
+ struct l2cap_create_chan_req *req =
+ (struct l2cap_create_chan_req *) data;
+ struct sock *sk;
+ u16 psm, scid;
- if (cmd_len != sizeof(*req))
- return -EPROTO;
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
+ (int) req->amp_id);
+
+ if (req->amp_id) {
+ struct hci_dev *hdev;
+
+ /* Validate AMP controller id */
+ hdev = hci_dev_get(req->amp_id);
+ if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
+ struct l2cap_create_chan_rsp rsp;
+
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
+ rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+ return 0;
+ }
+
+ hci_dev_put(hdev);
+ }
+
+ sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+ req->amp_id);
+
+ if (sk)
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
+
+ if (sk && req->amp_id &&
+ (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
+ amp_accept_physical(conn, req->amp_id, sk);
+
+ return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ BT_DBG("conn %p", conn);
+
+ return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
+ u16 icid = 0;
+ u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
icid = le16_to_cpu(req->icid);
- BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+ BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
- if (!enable_hs)
- return -EINVAL;
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
- /* Placeholder: Always refuse */
+ if (!sk)
+ goto send_move_response;
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ if (pi->scid < L2CAP_CID_DYN_START ||
+ (pi->mode != L2CAP_MODE_ERTM &&
+ pi->mode != L2CAP_MODE_STREAMING)) {
+ goto send_move_response;
+ }
+
+ if (pi->amp_id == req->dest_amp_id) {
+ result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
+ goto send_move_response;
+ }
+
+ if (req->dest_amp_id) {
+ struct hci_dev *hdev;
+ hdev = hci_dev_get(req->dest_amp_id);
+ if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
+ if (hdev)
+ hci_dev_put(hdev);
+
+ result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
+ goto send_move_response;
+ }
+ hci_dev_put(hdev);
+ }
+
+ if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
+ pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
+ bacmp(conn->src, conn->dst) > 0) {
+ result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
+ goto send_move_response;
+ }
+
+ if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
+ result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
+ goto send_move_response;
+ }
+
+ pi->amp_move_cmd_ident = cmd->ident;
+ pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
+ l2cap_amp_move_setup(sk);
+ pi->amp_move_id = req->dest_amp_id;
+ icid = pi->dcid;
+
+ if (req->dest_amp_id == 0) {
+ /* Moving to BR/EDR */
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
+ result = L2CAP_MOVE_CHAN_PENDING;
+ } else {
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ result = L2CAP_MOVE_CHAN_SUCCESS;
+ }
+ } else {
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
+ amp_accept_physical(pi->conn, req->dest_amp_id, sk);
+ result = L2CAP_MOVE_CHAN_PENDING;
+ }
+
+send_move_response:
l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+ if (sk)
+ release_sock(sk);
+
return 0;
}
static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd, u8 *data)
{
- struct l2cap_move_chan_rsp *rsp = data;
+ struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
u16 icid, result;
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
icid = le16_to_cpu(rsp->icid);
result = le16_to_cpu(rsp->result);
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
- /* Placeholder: Always unconfirmed */
- l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+ switch (result) {
+ case L2CAP_MOVE_CHAN_SUCCESS:
+ case L2CAP_MOVE_CHAN_PENDING:
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk) {
+ l2cap_send_move_chan_cfm(conn, NULL, icid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ break;
+ }
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ l2cap_sock_clear_timer(sk);
+ if (result == L2CAP_MOVE_CHAN_PENDING)
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
+
+ if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
+ /* Move confirm will be sent when logical link
+ * is complete.
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
+ if (result == L2CAP_MOVE_CHAN_PENDING) {
+ break;
+ } else if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
+ } else {
+ /* Logical link is up or moving to BR/EDR,
+ * proceed with move */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_CONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
+ struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
+ struct hci_chan *chan;
+ /* Moving to AMP */
+ if (result == L2CAP_MOVE_CHAN_SUCCESS) {
+ /* Remote is ready, send confirm immediately
+ * after logical link is ready
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
+ } else {
+ /* Both logical link and move success
+ * are required to confirm
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
+ }
+ pi->remote_fs = default_fs;
+ pi->local_fs = default_fs;
+ chan = l2cap_chan_admit(pi->amp_move_id, sk);
+ if (!chan) {
+ /* Logical link not available */
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ break;
+ }
+
+ if (chan->state == BT_CONNECTED) {
+ /* Logical link is already ready to go */
+ pi->ampcon = chan->conn;
+ pi->ampcon->l2cap_data = pi->conn;
+ if (result == L2CAP_MOVE_CHAN_SUCCESS) {
+ /* Can confirm now */
+ l2cap_send_move_chan_cfm(conn, pi,
+ pi->scid,
+ L2CAP_MOVE_CHAN_CONFIRMED);
+ } else {
+ /* Now only need move success
+ * required to confirm
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
+ }
+
+ l2cap_create_cfm(chan, 0);
+ }
+ } else {
+ /* Any other amp move state means the move failed. */
+ pi->amp_move_id = pi->amp_id;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_amp_move_revert(sk);
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+ break;
+ default:
+ /* Failed (including collision case) */
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk) {
+ /* Could not locate channel, icid is best guess */
+ l2cap_send_move_chan_cfm(conn, NULL, icid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ break;
+ }
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ l2cap_sock_clear_timer(sk);
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
+ pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
+ else {
+ /* Cleanup - cancel move */
+ pi->amp_move_id = pi->amp_id;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_amp_move_revert(sk);
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ }
+ }
+
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ break;
+ }
+
+ if (sk)
+ release_sock(sk);
return 0;
}
static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd, u8 *data)
{
- struct l2cap_move_chan_cfm *cfm = data;
+ struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
u16 icid, result;
- if (cmd_len != sizeof(*cfm))
- return -EPROTO;
-
icid = le16_to_cpu(cfm->icid);
result = le16_to_cpu(cfm->result);
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk) {
+ BT_DBG("Bad channel (%d)", (int) icid);
+ goto send_move_confirm_response;
+ }
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
+ pi->amp_id = pi->amp_move_id;
+ if (!pi->amp_id && pi->ampchan) {
+ struct hci_chan *ampchan = pi->ampchan;
+ struct hci_conn *ampcon = pi->ampcon;
+ /* Have moved off of AMP, free the channel */
+ pi->ampchan = NULL;
+ pi->ampcon = NULL;
+ if (hci_chan_put(ampchan))
+ ampcon->l2cap_data = NULL;
+ else
+ l2cap_deaggregate(ampchan, pi);
+ }
+ l2cap_amp_move_success(sk);
+ } else {
+ pi->amp_move_id = pi->amp_id;
+ l2cap_amp_move_revert(sk);
+ }
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
+ BT_DBG("Bad AMP_MOVE_STATE (%d)", pi->amp_move_state);
+ }
+
+send_move_confirm_response:
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+ if (sk)
+ release_sock(sk);
+
return 0;
}
static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd, u8 *data)
{
- struct l2cap_move_chan_cfm_rsp *rsp = data;
- u16 icid;
+ struct l2cap_move_chan_cfm_rsp *rsp =
+ (struct l2cap_move_chan_cfm_rsp *) data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
+ u16 icid;
icid = le16_to_cpu(rsp->icid);
- BT_DBG("icid %d", icid);
+ BT_DBG("icid %d", (int) icid);
+
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ l2cap_sock_clear_timer(sk);
+
+ if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ pi->amp_id = pi->amp_move_id;
+
+ if (!pi->amp_id && pi->ampchan) {
+ struct hci_chan *ampchan = pi->ampchan;
+ struct hci_conn *ampcon = pi->ampcon;
+ /* Have moved off of AMP, free the channel */
+ pi->ampchan = NULL;
+ pi->ampcon = NULL;
+ if (hci_chan_put(ampchan))
+ ampcon->l2cap_data = NULL;
+ else
+ l2cap_deaggregate(ampchan, pi);
+ }
+
+ l2cap_amp_move_success(sk);
+
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ }
+
+ release_sock(sk);
+
+ return 0;
+}
+
+static void l2cap_amp_signal_worker(struct work_struct *work)
+{
+ int err = 0;
+ struct l2cap_amp_signal_work *ampwork =
+ container_of(work, struct l2cap_amp_signal_work, work);
+
+ switch (ampwork->cmd.code) {
+ case L2CAP_MOVE_CHAN_REQ:
+ err = l2cap_move_channel_req(ampwork->conn, &work->cmd,
+ ampwork->data);
+ break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ err = l2cap_move_channel_rsp(ampwork->conn, &work->cmd,
+ ampwork->data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(ampwork->conn, &work->cmd,
+ ampwork->data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_move_channel_confirm_rsp(ampwork->conn,
+ &work->cmd, ampwork->data);
+ break;
+
+ default:
+ BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err) {
+ struct l2cap_cmd_rej rej;
+ BT_DBG("error %d", err);
+
+ /* In this context, commands are only rejected with
+ * "command not understood", code 0.
+ */
+ rej.reason = cpu_to_le16(0);
+ l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
+ L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+ }
+
+ kfree_skb(ampwork->skb);
+ kfree(ampwork);
+}
+
+void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
+ struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
+ (int) local_id, (int) remote_id, sk);
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
+ release_sock(sk);
+ return;
+ }
+
+ pi = l2cap_pi(sk);
+
+ if (sk->sk_state != BT_CONNECTED) {
+ if (bt_sk(sk)->parent) {
+ struct l2cap_conn_rsp rsp;
+ char buf[128];
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+
+ /* Incoming channel on AMP */
+ if (result == L2CAP_CREATE_CHAN_SUCCESS) {
+ /* Send successful response */
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ } else {
+ /* Send negative response */
+ rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ }
+
+ l2cap_send_cmd(pi->conn, pi->ident,
+ L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (result == L2CAP_CREATE_CHAN_SUCCESS) {
+ sk->sk_state = BT_CONFIG;
+ pi->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(pi->conn,
+ l2cap_get_ident(pi->conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
+ }
+ } else {
+ /* Outgoing channel on AMP */
+ if (result != L2CAP_CREATE_CHAN_SUCCESS) {
+ /* Revert to BR/EDR connect */
+ l2cap_send_conn_req(sk);
+ } else {
+ pi->amp_id = local_id;
+ l2cap_send_create_chan_req(sk, remote_id);
+ }
+ }
+ } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
+ pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ l2cap_amp_move_setup(sk);
+ pi->amp_move_id = local_id;
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
+
+ l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
+ pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ struct hci_chan *chan;
+ struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
+ pi->remote_fs = default_fs;
+ pi->local_fs = default_fs;
+ chan = l2cap_chan_admit(local_id, sk);
+ if (chan) {
+ if (chan->state == BT_CONNECTED) {
+ /* Logical link is ready to go */
+ pi->ampcon = chan->conn;
+ pi->ampcon->l2cap_data = pi->conn;
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+
+ l2cap_create_cfm(chan, 0);
+ } else {
+ /* Wait for logical link to be ready */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
+ }
+ } else {
+ /* Logical link not available */
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
+ }
+ } else {
+ BT_DBG("result %d, role %d, local_busy %d", result,
+ (int) pi->amp_move_role,
+ (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ if (result == -EINVAL)
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
+ else
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
+ }
+
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+
+ if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ l2cap_rmem_available(sk))
+ l2cap_ertm_tx(sk, 0, 0,
+ L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
+
+ /* Restart data transmission */
+ l2cap_ertm_send(sk);
+ }
+
+ release_sock(sk);
+}
+
+static void l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
+{
+ struct l2cap_pinfo *pi;
+ struct sock *sk;
+ struct hci_chan *ampchan;
+ struct hci_conn *ampcon;
+
+ BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
+
+ sk = chan->l2cap_sk;
+ chan->l2cap_sk = NULL;
+
+ BT_DBG("sk %p", sk);
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
+ release_sock(sk);
+ return;
+ }
+
+ pi = l2cap_pi(sk);
+
+ if ((!status) && (chan != NULL)) {
+ pi->ampcon = chan->conn;
+ pi->ampcon->l2cap_data = pi->conn;
+
+ BT_DBG("amp_move_state %d", pi->amp_move_state);
+
+ if (sk->sk_state != BT_CONNECTED) {
+ struct l2cap_conf_rsp rsp;
+
+ /* Must use spinlock to prevent concurrent
+ * execution of l2cap_config_rsp()
+ */
+ bh_lock_sock(sk);
+ l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(sk, &rsp,
+ L2CAP_CONF_SUCCESS, 0), &rsp);
+ pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
+ set_default_fcs(l2cap_pi(sk));
+
+ sk->sk_state = BT_CONNECTED;
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
+ l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
+
+ l2cap_chan_ready(sk);
+ }
+ bh_unlock_sock(sk);
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
+ /* Move confirm will be sent after a success
+ * response is received
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
+ else if (pi->amp_move_role ==
+ L2CAP_AMP_MOVE_INITIATOR) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ } else if (pi->amp_move_role ==
+ L2CAP_AMP_MOVE_RESPONDER) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ }
+ } else if ((pi->amp_move_state !=
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) &&
+ (pi->amp_move_state !=
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) &&
+ (pi->amp_move_state !=
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP)) {
+ /* Move was not in expected state, free the channel */
+ ampchan = pi->ampchan;
+ ampcon = pi->ampcon;
+ pi->ampchan = NULL;
+ pi->ampcon = NULL;
+ if (ampchan) {
+ if (hci_chan_put(ampchan))
+ ampcon->l2cap_data = NULL;
+ else
+ l2cap_deaggregate(ampchan, pi);
+ }
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ }
+ } else {
+ /* Logical link setup failed. */
+
+ if (sk->sk_state != BT_CONNECTED)
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ l2cap_amp_move_revert(sk);
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_CONFIG);
+ } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ if ((pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
+ (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
+ /* Remote has only sent pending or
+ * success responses, clean up
+ */
+ l2cap_amp_move_revert(sk);
+ l2cap_pi(sk)->amp_move_role =
+ L2CAP_AMP_MOVE_NONE;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ }
+
+ /* Other amp move states imply that the move
+ * has already aborted
+ */
+ l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+ ampchan = pi->ampchan;
+ ampcon = pi->ampcon;
+ pi->ampchan = NULL;
+ pi->ampcon = NULL;
+ if (ampchan) {
+ if (hci_chan_put(ampchan))
+ ampcon->l2cap_data = NULL;
+ else
+ l2cap_deaggregate(ampchan, pi);
+ }
+ }
+
+ release_sock(sk);
+}
+
+static void l2cap_logical_link_worker(struct work_struct *work)
+{
+ struct l2cap_logical_link_work *log_link_work =
+ container_of(work, struct l2cap_logical_link_work, work);
+ struct sock *sk = log_link_work->chan->l2cap_sk;
+
+ if (sk) {
+ l2cap_logical_link_complete(log_link_work->chan,
+ log_link_work->status);
+ sock_put(sk);
+ }
+ hci_chan_put(log_link_work->chan);
+ kfree(log_link_work);
+}
+
+static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
+{
+ struct l2cap_logical_link_work *amp_work;
+
+ if (!chan->l2cap_sk) {
+ BT_ERR("Expected l2cap_sk to point to connecting socket");
+ return -EFAULT;
+ }
+
+ amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
+ if (!amp_work) {
+ sock_put(chan->l2cap_sk);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&_work->work, l2cap_logical_link_worker);
+ amp_work->chan = chan;
+ amp_work->status = status;
+
+ hci_chan_hold(chan);
+
+ if (!queue_work(_l2cap_wq, &_work->work)) {
+ kfree(amp_work);
+ sock_put(chan->l2cap_sk);
+ hci_chan_put(chan);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
+{
+ struct l2cap_conn *conn = chan->conn->l2cap_data;
+
+ BT_DBG("chan %p conn %p status %d", chan, conn, status);
+
+ /* TODO: if failed status restore previous fs */
+ return 0;
+}
+
+int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
+{
+ struct l2cap_chan_list *l;
+ struct l2cap_conn *conn = chan->conn->l2cap_data;
+ struct sock *sk;
+
+ BT_DBG("chan %p conn %p", chan, conn);
+
+ if (!conn)
+ return 0;
+
+ l = &conn->chan_list;
+
+ read_lock(&l->lock);
+
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ bh_lock_sock(sk);
+ /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
+ if (l2cap_pi(sk)->ampchan == chan) {
+ struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
+ l2cap_pi(sk)->ampchan = NULL;
+ l2cap_pi(sk)->ampcon = NULL;
+ if (hci_chan_put(chan))
+ ampcon->l2cap_data = NULL;
+ else
+ l2cap_deaggregate(chan, l2cap_pi(sk));
+
+ l2cap_amp_move_init(sk);
+ }
+ bh_unlock_sock(sk);
+ }
+
+ read_unlock(&l->lock);
+
+ return 0;
+
+
+}
+
+static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ u8 *data, struct sk_buff *skb)
+{
+ struct l2cap_amp_signal_work *amp_work;
+
+ amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
+ if (!amp_work)
+ return -ENOMEM;
+
+ INIT_WORK(&_work->work, l2cap_amp_signal_worker);
+ amp_work->conn = conn;
+ amp_work->cmd = *cmd;
+ amp_work->data = data;
+ amp_work->skb = skb_clone(skb, GFP_ATOMIC);
+ if (!amp_work->skb) {
+ kfree(amp_work);
+ return -ENOMEM;
+ }
+
+ if (!queue_work(_l2cap_wq, &_work->work)) {
+ kfree_skb(amp_work->skb);
+ kfree(amp_work);
+ return -ENOMEM;
+ }
return 0;
}
@@ -3429,7 +5680,8 @@
struct hci_conn *hcon = conn->hcon;
struct l2cap_conn_param_update_req *req;
struct l2cap_conn_param_update_rsp rsp;
- u16 min, max, latency, to_multiplier, cmd_len;
+ struct sock *sk;
+ u16 min, max, latency, timeout, cmd_len;
int err;
if (!(hcon->link_mode & HCI_LM_MASTER))
@@ -3439,34 +5691,39 @@
if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
return -EPROTO;
- req = (struct l2cap_conn_param_update_req *) data;
- min = __le16_to_cpu(req->min);
- max = __le16_to_cpu(req->max);
- latency = __le16_to_cpu(req->latency);
- to_multiplier = __le16_to_cpu(req->to_multiplier);
-
- BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
- min, max, latency, to_multiplier);
-
memset(&rsp, 0, sizeof(rsp));
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
- err = l2cap_check_conn_param(min, max, latency, to_multiplier);
- if (err)
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
- else
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+ sk = l2cap_find_sock_by_fixed_cid_and_dir(4, conn->src, conn->dst, 0);
+
+ if (sk && !bt_sk(sk)->le_params.prohibit_remote_chg) {
+ req = (struct l2cap_conn_param_update_req *) data;
+ min = __le16_to_cpu(req->min);
+ max = __le16_to_cpu(req->max);
+ latency = __le16_to_cpu(req->latency);
+ timeout = __le16_to_cpu(req->to_multiplier);
+
+ err = l2cap_check_conn_param(min, max, latency, timeout);
+ if (!err) {
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+ hci_le_conn_update(hcon, min, max, latency, timeout);
+ bt_sk(sk)->le_params.interval_min = min;
+ bt_sk(sk)->le_params.interval_max = max;
+ bt_sk(sk)->le_params.latency = latency;
+ bt_sk(sk)->le_params.supervision_timeout = timeout;
+ }
+ }
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
sizeof(rsp), &rsp);
- if (!err)
- hci_le_conn_update(hcon, min, max, latency, to_multiplier);
return 0;
}
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
+ struct sk_buff *skb)
{
int err = 0;
@@ -3515,7 +5772,7 @@
break;
case L2CAP_CREATE_CHAN_REQ:
- err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+ err = l2cap_create_channel_req(conn, cmd, data);
break;
case L2CAP_CREATE_CHAN_RSP:
@@ -3523,21 +5780,11 @@
break;
case L2CAP_MOVE_CHAN_REQ:
- err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
- break;
-
case L2CAP_MOVE_CHAN_RSP:
- err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
- break;
-
case L2CAP_MOVE_CHAN_CFM:
- err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
- break;
-
case L2CAP_MOVE_CHAN_CFM_RSP:
- err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ err = l2cap_sig_amp(conn, cmd, data, skb);
break;
-
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -3594,15 +5841,16 @@
if (conn->hcon->type == LE_LINK)
err = l2cap_le_sig_cmd(conn, &cmd, data);
else
- err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
+ err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
+ data, skb);
if (err) {
- struct l2cap_cmd_rej_unk rej;
+ struct l2cap_cmd_rej rej;
BT_ERR("Wrong link type (%d)", err);
/* FIXME: Map err to a valid reason */
- rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ rej.reason = cpu_to_le16(0);
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}
@@ -3613,94 +5861,183 @@
kfree_skb(skb);
}
-static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
+static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
{
u16 our_fcs, rcv_fcs;
int hdr_size;
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hdr_size = L2CAP_EXT_HDR_SIZE;
+ if (pi->extended_control)
+ hdr_size = L2CAP_EXTENDED_HDR_SIZE;
else
- hdr_size = L2CAP_ENH_HDR_SIZE;
+ hdr_size = L2CAP_ENHANCED_HDR_SIZE;
- if (chan->fcs == L2CAP_FCS_CRC16) {
+ if (pi->fcs == L2CAP_FCS_CRC16) {
skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
- if (our_fcs != rcv_fcs)
+ if (our_fcs != rcv_fcs) {
+ BT_DBG("Bad FCS");
return -EBADMSG;
+ }
}
return 0;
}
-static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
+static void l2cap_ertm_pass_to_tx(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- u32 control = 0;
-
- chan->frames_sent = 0;
-
- control |= __set_reqseq(chan, chan->buffer_seq);
-
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- l2cap_send_sframe(chan, control);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- }
-
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
- l2cap_retransmit_frames(chan);
-
- l2cap_ertm_send(chan);
-
- if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
- chan->frames_sent == 0) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
- }
+ BT_DBG("sk %p, control %p", sk, control);
+ l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
}
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
+static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- struct sk_buff *next_skb;
- int tx_seq_offset, next_tx_seq_offset;
+ BT_DBG("sk %p, control %p", sk, control);
+ l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
+}
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+static void l2cap_ertm_resend(struct sock *sk)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+ struct sk_buff *tx_skb;
+ u16 seq;
- next_skb = skb_peek(&chan->srej_q);
+ BT_DBG("sk %p", sk);
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
+ pi = l2cap_pi(sk);
- while (next_skb) {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
- return -EINVAL;
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ return;
- next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->tx_seq, chan->buffer_seq);
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ return;
- if (next_tx_seq_offset > tx_seq_offset) {
- __skb_queue_before(&chan->srej_q, next_skb, skb);
- return 0;
+ while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&pi->retrans_list);
+
+ skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
+ if (!skb) {
+ BT_DBG("Error: Can't retransmit seq %d, frame missing",
+ (int) seq);
+ continue;
}
- if (skb_queue_is_last(&chan->srej_q, next_skb))
- next_skb = NULL;
- else
- next_skb = skb_queue_next(&chan->srej_q, next_skb);
+ bt_cb(skb)->retries += 1;
+ control = bt_cb(skb)->control;
+
+ if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
+ BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ l2cap_seq_list_clear(&pi->retrans_list);
+ break;
+ }
+
+ control.reqseq = pi->buffer_seq;
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control.final = 1;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ } else {
+ control.final = 0;
+ }
+
+ if (skb_cloned(skb)) {
+ /* Cloned sk_buffs are read-only, so we need a
+ * writeable copy
+ */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ }
+
+ if (!tx_skb) {
+ l2cap_seq_list_clear(&pi->retrans_list);
+ break;
+ }
+
+ /* Update skb contents */
+ if (pi->extended_control) {
+ put_unaligned_le32(__pack_extended_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ }
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ apply_fcs(tx_skb);
+
+ sock_hold(sk);
+ tx_skb->sk = sk;
+ tx_skb->destructor = l2cap_skb_destructor;
+ atomic_inc(&pi->ertm_queued);
+
+ l2cap_do_send(sk, tx_skb);
+
+ BT_DBG("Resent txseq %d", (int)control.txseq);
+
+ pi->last_acked_seq = pi->buffer_seq;
}
-
- __skb_queue_tail(&chan->srej_q, skb);
-
- return 0;
}
-static void append_skb_frag(struct sk_buff *skb,
+static inline void l2cap_ertm_retransmit(struct sock *sk,
+ struct bt_l2cap_control *control)
+{
+ BT_DBG("sk %p, control %p", sk, control);
+
+ l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
+ l2cap_ertm_resend(sk);
+}
+
+static void l2cap_ertm_retransmit_all(struct sock *sk,
+ struct bt_l2cap_control *control)
+{
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+
+ BT_DBG("sk %p, control %p", sk, control);
+
+ pi = l2cap_pi(sk);
+
+ if (control->poll)
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+ l2cap_seq_list_clear(&pi->retrans_list);
+
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ return;
+
+ if (pi->unacked_frames) {
+ skb_queue_walk(TX_QUEUE(sk), skb) {
+ if ((bt_cb(skb)->control.txseq == control->reqseq) ||
+ skb == sk->sk_send_head)
+ break;
+ }
+
+ skb_queue_walk_from(TX_QUEUE(sk), skb) {
+ if (skb == sk->sk_send_head)
+ break;
+
+ l2cap_seq_list_append(&pi->retrans_list,
+ bt_cb(skb)->control.txseq);
+ }
+
+ l2cap_ertm_resend(sk);
+ }
+}
+
+static inline void append_skb_frag(struct sk_buff *skb,
struct sk_buff *new_frag, struct sk_buff **last_frag)
{
/* skb->len reflects data in skb as well as all fragments
- * skb->data_len reflects only data in fragments
+ skb->data_len reflects only data in fragments
*/
+ BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
+
if (!skb_has_frag_list(skb))
skb_shinfo(skb)->frag_list = new_frag;
@@ -3714,651 +6051,1147 @@
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
+static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
+ struct bt_l2cap_control *control, struct sk_buff *skb)
{
+ struct l2cap_pinfo *pi;
int err = -EINVAL;
- switch (__get_ctrl_sar(chan, control)) {
- case L2CAP_SAR_UNSEGMENTED:
- if (chan->sdu)
- break;
+ BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
+ skb, skb->len, skb->truesize);
- err = chan->ops->recv(chan->data, skb);
+ if (!control)
+ return err;
+
+ pi = l2cap_pi(sk);
+
+ BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
+ control->frame_type, control->sar, control->txseq,
+ control->reqseq, control->final);
+
+ switch (control->sar) {
+ case L2CAP_SAR_UNSEGMENTED:
+ if (pi->sdu) {
+ BT_DBG("Unexpected unsegmented PDU during reassembly");
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
+ }
+
+ BT_DBG("Unsegmented");
+ err = sock_queue_rcv_skb(sk, skb);
break;
case L2CAP_SAR_START:
- if (chan->sdu)
- break;
+ if (pi->sdu) {
+ BT_DBG("Unexpected start PDU during reassembly");
+ kfree_skb(pi->sdu);
+ }
- chan->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, L2CAP_SDULEN_SIZE);
+ pi->sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, 2);
- if (chan->sdu_len > chan->imtu) {
+ if (pi->sdu_len > pi->imtu) {
err = -EMSGSIZE;
break;
}
- if (skb->len >= chan->sdu_len)
+ if (skb->len >= pi->sdu_len)
break;
- chan->sdu = skb;
- chan->sdu_last_frag = skb;
+ pi->sdu = skb;
+ pi->sdu_last_frag = skb;
+
+ BT_DBG("Start");
skb = NULL;
err = 0;
break;
case L2CAP_SAR_CONTINUE:
- if (!chan->sdu)
+ if (!pi->sdu)
break;
- append_skb_frag(chan->sdu, skb,
- &chan->sdu_last_frag);
+ append_skb_frag(pi->sdu, skb,
+ &pi->sdu_last_frag);
skb = NULL;
- if (chan->sdu->len >= chan->sdu_len)
+ if (pi->sdu->len >= pi->sdu_len)
break;
+ BT_DBG("Continue, reassembled %d", pi->sdu->len);
+
err = 0;
break;
case L2CAP_SAR_END:
- if (!chan->sdu)
+ if (!pi->sdu)
break;
- append_skb_frag(chan->sdu, skb,
- &chan->sdu_last_frag);
+ append_skb_frag(pi->sdu, skb,
+ &pi->sdu_last_frag);
skb = NULL;
- if (chan->sdu->len != chan->sdu_len)
+ if (pi->sdu->len != pi->sdu_len)
break;
- err = chan->ops->recv(chan->data, chan->sdu);
+ BT_DBG("End, reassembled %d", pi->sdu->len);
+ /* If the sender used tiny PDUs, the rcv queuing could fail.
+ * Applications that have issues here should use a larger
+ * sk_rcvbuf.
+ */
+ err = sock_queue_rcv_skb(sk, pi->sdu);
if (!err) {
/* Reassembly complete */
- chan->sdu = NULL;
- chan->sdu_last_frag = NULL;
- chan->sdu_len = 0;
+ pi->sdu = NULL;
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
}
break;
+
+ default:
+ BT_DBG("Bad SAR value");
+ break;
}
if (err) {
- kfree_skb(skb);
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
- chan->sdu_last_frag = NULL;
- chan->sdu_len = 0;
+ BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
+ err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
+ if (pi->sdu) {
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+ }
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
+ if (skb)
+ kfree_skb(skb);
+ }
+
+ /* Update local busy state */
+ if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
+
+ return err;
+}
+
+static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
+{
+ int err = 0;
+ /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
+ * until a gap is encountered.
+ */
+
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+ pi = l2cap_pi(sk);
+
+ while (l2cap_rmem_available(sk)) {
+ struct sk_buff *skb;
+ BT_DBG("Searching for skb with txseq %d (queue len %d)",
+ (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
+
+ skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
+
+ if (!skb)
+ break;
+
+ skb_unlink(skb, SREJ_QUEUE(sk));
+ pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
+ err = l2cap_ertm_rx_expected_iframe(sk,
+ &bt_cb(skb)->control, skb);
+ if (err)
+ break;
+ }
+
+ if (skb_queue_empty(SREJ_QUEUE(sk))) {
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ l2cap_ertm_send_ack(sk);
}
return err;
}
-static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
+static void l2cap_ertm_handle_srej(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- BT_DBG("chan %p, Enter local busy", chan);
-
- set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
-
- __set_ack_timer(chan);
-}
-
-static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
-{
- u32 control;
-
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- goto done;
-
- control = __set_reqseq(chan, chan->buffer_seq);
- control |= __set_ctrl_poll(chan);
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
- chan->retry_count = 1;
-
- __clear_retrans_timer(chan);
- __set_monitor_timer(chan);
-
- set_bit(CONN_WAIT_F, &chan->conn_state);
-
-done:
- clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- clear_bit(CONN_RNR_SENT, &chan->conn_state);
-
- BT_DBG("chan %p, Exit local busy", chan);
-}
-
-void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
-{
- if (chan->mode == L2CAP_MODE_ERTM) {
- if (busy)
- l2cap_ertm_enter_local_busy(chan);
- else
- l2cap_ertm_exit_local_busy(chan);
- }
-}
-
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
-{
+ struct l2cap_pinfo *pi;
struct sk_buff *skb;
- u32 control;
- while ((skb = skb_peek(&chan->srej_q)) &&
- !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- int err;
+ BT_DBG("sk %p, control %p", sk, control);
- if (bt_cb(skb)->tx_seq != tx_seq)
- break;
+ pi = l2cap_pi(sk);
- skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
- err = l2cap_reassemble_sdu(chan, skb, control);
-
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- break;
- }
-
- chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
- tx_seq = __next_seq(chan, tx_seq);
- }
-}
-
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
-{
- struct srej_list *l, *tmp;
- u32 control;
-
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- list_del(&l->list);
- kfree(l);
- return;
- }
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_reqseq(chan, l->tx_seq);
- l2cap_send_sframe(chan, control);
- list_del(&l->list);
- list_add_tail(&l->list, &chan->srej_l);
- }
-}
-
-static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
-{
- struct srej_list *new;
- u32 control;
-
- while (tx_seq != chan->expected_tx_seq) {
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_reqseq(chan, chan->expected_tx_seq);
- l2cap_send_sframe(chan, control);
-
- new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
- if (!new)
- return -ENOMEM;
-
- new->tx_seq = chan->expected_tx_seq;
-
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
-
- list_add_tail(&new->list, &chan->srej_l);
- }
-
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
-
- return 0;
-}
-
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
-{
- u16 tx_seq = __get_txseq(chan, rx_control);
- u16 req_seq = __get_reqseq(chan, rx_control);
- u8 sar = __get_ctrl_sar(chan, rx_control);
- int tx_seq_offset, expected_tx_seq_offset;
- int num_to_ack = (chan->tx_win/6) + 1;
- int err = 0;
-
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
- tx_seq, rx_control);
-
- if (__is_ctrl_final(chan, rx_control) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
- }
-
- chan->expected_ack_seq = req_seq;
- l2cap_drop_acked_frames(chan);
-
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
-
- /* invalid tx_seq */
- if (tx_seq_offset >= chan->tx_win) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- goto drop;
- }
-
- if (tx_seq == chan->expected_tx_seq)
- goto expected;
-
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- struct srej_list *first;
-
- first = list_first_entry(&chan->srej_l,
- struct srej_list, list);
- if (tx_seq == first->tx_seq) {
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
- l2cap_check_srej_gap(chan, tx_seq);
-
- list_del(&first->list);
- kfree(first);
-
- if (list_empty(&chan->srej_l)) {
- chan->buffer_seq = chan->buffer_seq_srej;
- clear_bit(CONN_SREJ_SENT, &chan->conn_state);
- l2cap_send_ack(chan);
- BT_DBG("chan %p, Exit SREJ_SENT", chan);
- }
- } else {
- struct srej_list *l;
-
- /* duplicated tx_seq */
- if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
- goto drop;
-
- list_for_each_entry(l, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- l2cap_resend_srejframe(chan, tx_seq);
- return 0;
- }
- }
-
- err = l2cap_send_srejframe(chan, tx_seq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, -err);
- return err;
- }
- }
- } else {
- expected_tx_seq_offset = __seq_offset(chan,
- chan->expected_tx_seq, chan->buffer_seq);
-
- /* duplicated tx_seq */
- if (tx_seq_offset < expected_tx_seq_offset)
- goto drop;
-
- set_bit(CONN_SREJ_SENT, &chan->conn_state);
-
- BT_DBG("chan %p, Enter SREJ", chan);
-
- INIT_LIST_HEAD(&chan->srej_l);
- chan->buffer_seq_srej = chan->buffer_seq;
-
- __skb_queue_head_init(&chan->srej_q);
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
-
- /* Set P-bit only if there are some I-frames to ack. */
- if (__clear_ack_timer(chan))
- set_bit(CONN_SEND_PBIT, &chan->conn_state);
-
- err = l2cap_send_srejframe(chan, tx_seq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, -err);
- return err;
- }
- }
- return 0;
-
-expected:
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
-
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
- }
-
- err = l2cap_reassemble_sdu(chan, skb, rx_control);
- chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
-
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- return err;
- }
-
- if (__is_ctrl_final(chan, rx_control)) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- }
-
-
- chan->num_acked = (chan->num_acked + 1) % num_to_ack;
- if (chan->num_acked == num_to_ack - 1)
- l2cap_send_ack(chan);
- else
- __set_ack_timer(chan);
-
- return 0;
-
-drop:
- kfree_skb(skb);
- return 0;
-}
-
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
-{
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
- __get_reqseq(chan, rx_control), rx_control);
-
- chan->expected_ack_seq = __get_reqseq(chan, rx_control);
- l2cap_drop_acked_frames(chan);
-
- if (__is_ctrl_poll(chan, rx_control)) {
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
- __set_retrans_timer(chan);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- l2cap_send_srejtail(chan);
- } else {
- l2cap_send_i_or_rr_or_rnr(chan);
- }
-
- } else if (__is_ctrl_final(chan, rx_control)) {
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
-
- } else {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
- __set_retrans_timer(chan);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- else
- l2cap_ertm_send(chan);
- }
-}
-
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
-{
- u16 tx_seq = __get_reqseq(chan, rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- if (__is_ctrl_final(chan, rx_control)) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- } else {
- l2cap_retransmit_frames(chan);
-
- if (test_bit(CONN_WAIT_F, &chan->conn_state))
- set_bit(CONN_REJ_ACT, &chan->conn_state);
- }
-}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
-{
- u16 tx_seq = __get_reqseq(chan, rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (__is_ctrl_poll(chan, rx_control)) {
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- l2cap_retransmit_one_frame(chan, tx_seq);
-
- l2cap_ertm_send(chan);
-
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
- }
- } else if (__is_ctrl_final(chan, rx_control)) {
- if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
- chan->srej_save_reqseq == tx_seq)
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- else
- l2cap_retransmit_one_frame(chan, tx_seq);
- } else {
- l2cap_retransmit_one_frame(chan, tx_seq);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
- }
- }
-}
-
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
-{
- u16 tx_seq = __get_reqseq(chan, rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- if (__is_ctrl_poll(chan, rx_control))
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
-
- if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- __clear_retrans_timer(chan);
- if (__is_ctrl_poll(chan, rx_control))
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
+ if (control->reqseq == pi->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting",
+ (int) control->reqseq);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
return;
}
- if (__is_ctrl_poll(chan, rx_control)) {
- l2cap_send_srejtail(chan);
+ skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
+
+ if (skb == NULL) {
+ BT_DBG("Seq %d not available for retransmission",
+ (int) control->reqseq);
+ return;
+ }
+
+ if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
+ BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ return;
+ }
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (control->poll) {
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_retransmit(sk, control);
+ l2cap_ertm_send(sk);
+
+ if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ pi->srej_save_reqseq = control->reqseq;
+ }
} else {
- rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, rx_control);
+ l2cap_ertm_pass_to_tx_fbit(sk, control);
+
+ if (control->final) {
+ if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
+ (pi->srej_save_reqseq == control->reqseq)) {
+ pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+ } else {
+ l2cap_ertm_retransmit(sk, control);
+ }
+ } else {
+ l2cap_ertm_retransmit(sk, control);
+ if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ pi->srej_save_reqseq = control->reqseq;
+ }
+ }
}
}
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
+static void l2cap_ertm_handle_rej(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
- if (__is_ctrl_final(chan, rx_control) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
+ BT_DBG("sk %p, control %p", sk, control);
+
+ pi = l2cap_pi(sk);
+
+ if (control->reqseq == pi->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting",
+ (int) control->reqseq);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ return;
}
- switch (__get_ctrl_super(chan, rx_control)) {
- case L2CAP_SUPER_RR:
- l2cap_data_channel_rrframe(chan, rx_control);
- break;
+ skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
- case L2CAP_SUPER_REJ:
- l2cap_data_channel_rejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_SREJ:
- l2cap_data_channel_srejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_RNR:
- l2cap_data_channel_rnrframe(chan, rx_control);
- break;
+ if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ return;
}
- kfree_skb(skb);
- return 0;
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ if (control->final) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ l2cap_ertm_retransmit_all(sk, control);
+ } else {
+ l2cap_ertm_retransmit_all(sk, control);
+ l2cap_ertm_send(sk);
+ if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
+ pi->conn_state |= L2CAP_CONN_REJ_ACT;
+ }
}
-static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
{
- u32 control;
- u16 req_seq;
- int len, next_tx_seq_offset, req_seq_offset;
+ struct l2cap_pinfo *pi;
- control = __get_control(chan, skb->data);
- skb_pull(skb, __ctrl_size(chan));
- len = skb->len;
+ BT_DBG("sk %p, txseq %d", sk, (int)txseq);
+ pi = l2cap_pi(sk);
- /*
- * We can just drop the corrupted I-frame here.
- * Receiver will miss it and start proper recovery
- * procedures and ask retransmission.
- */
- if (l2cap_check_fcs(chan, skb))
- goto drop;
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
+ (int)pi->expected_tx_seq);
- if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
- len -= L2CAP_SDULEN_SIZE;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- len -= L2CAP_FCS_SIZE;
-
- if (len > chan->mps) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- req_seq = __get_reqseq(chan, control);
-
- req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
-
- next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
- chan->expected_ack_seq);
-
- /* check for invalid req-seq */
- if (req_seq_offset > next_tx_seq_offset) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- if (!__is_sframe(chan, control)) {
- if (len < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ /* See notes below regarding "double poll" and
+ * invalid packets.
+ */
+ if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside "
+ "tx window after SREJ sent");
+ return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - bad txseq within tx "
+ "window after SREJ sent");
+ return L2CAP_ERTM_TXSEQ_INVALID;
+ }
}
- l2cap_data_channel_iframe(chan, control, skb);
- } else {
- if (len != 0) {
- BT_ERR("%d", len);
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ if (pi->srej_list.head == txseq) {
+ BT_DBG("Expected SREJ");
+ return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
}
- l2cap_data_channel_sframe(chan, control, skb);
+ if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
+ BT_DBG("Duplicate SREJ - txseq already stored");
+ return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
+ }
+
+ if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - txseq not requested "
+ "with SREJ");
+ return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
+ }
}
- return 0;
+ if (pi->expected_tx_seq == txseq) {
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_ERTM_TXSEQ_INVALID;
+ } else {
+ BT_DBG("Expected");
+ return L2CAP_ERTM_TXSEQ_EXPECTED;
+ }
+ }
-drop:
- kfree_skb(skb);
- return 0;
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) <
+ __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
+ BT_DBG("Duplicate - expected_tx_seq later than txseq");
+ return L2CAP_ERTM_TXSEQ_DUPLICATE;
+ }
+
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ /* A source of invalid packets is a "double poll" condition,
+ * where delays cause us to send multiple poll packets. If
+ * the remote stack receives and processes both polls,
+ * sequence numbers can wrap around in such a way that a
+ * resent frame has a sequence number that looks like new data
+ * with a sequence gap. This would trigger an erroneous SREJ
+ * request.
+ *
+ * Fortunately, this is impossible with a tx window that's
+ * less than half of the maximum sequence number, which allows
+ * invalid frames to be safely ignored.
+ *
+ * With tx window sizes greater than half of the tx window
+ * maximum, the frame is invalid and cannot be ignored. This
+ * causes a disconnect.
+ */
+
+ if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside tx window");
+ return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_ERTM_TXSEQ_INVALID;
+ }
+ } else {
+ BT_DBG("Unexpected - txseq indicates missing frames");
+ return L2CAP_ERTM_TXSEQ_UNEXPECTED;
+ }
}
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
+static int l2cap_ertm_rx_state_recv(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
{
- struct l2cap_chan *chan;
- u32 control;
- u16 tx_seq;
- int len;
+ struct l2cap_pinfo *pi;
+ int err = 0;
+ bool skb_in_use = 0;
- chan = l2cap_get_chan_by_scid(conn, cid);
- if (!chan) {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
+ BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_RECV_IFRAME:
+ switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
+ case L2CAP_ERTM_TXSEQ_EXPECTED:
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
+ }
+
+ pi->expected_tx_seq = __next_seq(control->txseq, pi);
+ pi->buffer_seq = pi->expected_tx_seq;
+ skb_in_use = 1;
+
+ err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ if (err)
+ break;
+
+ if (control->final) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else {
+ control->final = 0;
+ l2cap_ertm_retransmit_all(sk, control);
+ l2cap_ertm_send(sk);
+ }
+ }
+
+ if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
+ l2cap_ertm_send_ack(sk);
+ break;
+ case L2CAP_ERTM_TXSEQ_UNEXPECTED:
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ /* Can't issue SREJ frames in the local busy state.
+ * Drop this frame, it will be seen as missing
+ * when local busy is exited.
+ */
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ BT_DBG("Busy, discarding unexpected seq %d",
+ control->txseq);
+ break;
+ }
+
+ /* There was a gap in the sequence, so an SREJ
+ * must be sent for each missing frame. The
+ * current frame is stored for later use.
+ */
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+ l2cap_seq_list_clear(&pi->srej_list);
+ l2cap_ertm_send_srej(sk, control->txseq);
+
+ pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
+ break;
+ case L2CAP_ERTM_TXSEQ_DUPLICATE:
+ l2cap_ertm_pass_to_tx(sk, control);
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RR:
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control->final) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
+ pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_PREPARE) {
+ control->final = 0;
+ l2cap_ertm_retransmit_all(sk, control);
+ }
+
+ l2cap_ertm_send(sk);
+ } else if (control->poll) {
+ l2cap_ertm_send_i_or_rr_or_rnr(sk);
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ pi->unacked_frames)
+ l2cap_ertm_start_retrans_timer(pi);
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_send(sk);
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RNR:
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control && control->poll) {
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_send_rr_or_rnr(sk, 0);
+ }
+ l2cap_ertm_stop_retrans_timer(pi);
+ l2cap_seq_list_clear(&pi->retrans_list);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_REJ:
+ l2cap_ertm_handle_rej(sk, control);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_SREJ:
+ l2cap_ertm_handle_srej(sk, control);
+ break;
+ default:
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
kfree_skb(skb);
- return 0;
}
- l2cap_chan_lock(chan);
+ return err;
+}
- BT_DBG("chan %p, len %d", chan, skb->len);
+static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+ u16 txseq = control->txseq;
+ bool skb_in_use = 0;
- if (chan->state != BT_CONNECTED)
+ BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_RECV_IFRAME:
+ switch (l2cap_ertm_classify_txseq(sk, txseq)) {
+ case L2CAP_ERTM_TXSEQ_EXPECTED:
+ /* Keep frame for reassembly later */
+ l2cap_ertm_pass_to_tx(sk, control);
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ pi->expected_tx_seq = __next_seq(txseq, pi);
+ break;
+ case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
+ l2cap_seq_list_pop(&pi->srej_list);
+
+ l2cap_ertm_pass_to_tx(sk, control);
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ err = l2cap_ertm_rx_queued_iframes(sk);
+ if (err)
+ break;
+
+ break;
+ case L2CAP_ERTM_TXSEQ_UNEXPECTED:
+ /* Got a frame that can't be reassembled yet.
+ * Save it for later, and send SREJs to cover
+ * the missing frames.
+ */
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_ertm_send_srej(sk, control->txseq);
+ break;
+ case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
+ /* This frame was requested with an SREJ, but
+ * some expected retransmitted frames are
+ * missing. Request retransmission of missing
+ * SREJ'd frames.
+ */
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_ertm_send_srej_list(sk, control->txseq);
+ break;
+ case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
+ /* We've already queued this frame. Drop this copy. */
+ l2cap_ertm_pass_to_tx(sk, control);
+ break;
+ case L2CAP_ERTM_TXSEQ_DUPLICATE:
+ /* Expecting a later sequence number, so this frame
+ * was already received. Ignore it completely.
+ */
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RR:
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control->final) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else {
+ control->final = 0;
+ l2cap_ertm_retransmit_all(sk, control);
+ }
+
+ l2cap_ertm_send(sk);
+ } else if (control->poll) {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ pi->unacked_frames) {
+ l2cap_ertm_start_retrans_timer(pi);
+ }
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_send_srej_tail(sk);
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ pi->unacked_frames) {
+ l2cap_ertm_start_retrans_timer(pi);
+ }
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_send_ack(sk);
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RNR:
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control->poll)
+ l2cap_ertm_send_srej_tail(sk);
+ else {
+ struct bt_l2cap_control rr_control;
+ memset(&rr_control, 0, sizeof(rr_control));
+ rr_control.frame_type = 's';
+ rr_control.super = L2CAP_SFRAME_RR;
+ rr_control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &rr_control);
+ }
+
+ break;
+ case L2CAP_ERTM_EVENT_RECV_REJ:
+ l2cap_ertm_handle_rej(sk, control);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_SREJ:
+ l2cap_ertm_handle_srej(sk, control);
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+ bool skb_in_use = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ /* Only handle expected frames, to avoid state changes. */
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_RECV_IFRAME:
+ if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
+ L2CAP_ERTM_TXSEQ_EXPECTED) {
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
+ }
+
+ pi->expected_tx_seq = __next_seq(control->txseq, pi);
+ pi->buffer_seq = pi->expected_tx_seq;
+ skb_in_use = 1;
+
+ err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ if (err)
+ break;
+
+ if (control->final) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ control->final = 0;
+ }
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RR:
+ case L2CAP_ERTM_EVENT_RECV_RNR:
+ case L2CAP_ERTM_EVENT_RECV_REJ:
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_SREJ:
+ /* Ignore */
+ break;
+ default:
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_answer_move_poll(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control control;
+ int err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
+
+ if (!skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head = skb_peek(TX_QUEUE(sk));
+ else
+ sk->sk_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ pi->next_tx_seq = pi->amp_move_reqseq;
+ pi->unacked_frames = 0;
+
+ err = l2cap_finish_amp_move(sk);
+
+ if (err)
+ return err;
+
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_send_i_or_rr_or_rnr(sk);
+
+ memset(&control, 0, sizeof(control));
+ control.reqseq = pi->amp_move_reqseq;
+
+ if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
+ err = -EPROTO;
+ else
+ err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
+ pi->amp_move_event);
+
+ return err;
+}
+
+static void l2cap_amp_move_setup(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ l2cap_ertm_stop_ack_timer(pi);
+ l2cap_ertm_stop_retrans_timer(pi);
+ l2cap_ertm_stop_monitor_timer(pi);
+
+ pi->retry_count = 0;
+ skb_queue_walk(TX_QUEUE(sk), skb) {
+ if (bt_cb(skb)->retries)
+ bt_cb(skb)->retries = 1;
+ else
+ break;
+ }
+
+ pi->expected_tx_seq = pi->buffer_seq;
+
+ pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
+ l2cap_seq_list_clear(&pi->retrans_list);
+ l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
+ skb_queue_purge(SREJ_QUEUE(sk));
+
+ pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
+ pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
+
+ BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
+ pi->rx_state);
+
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+}
+
+static void l2cap_amp_move_revert(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
+ } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
+}
+
+static int l2cap_amp_move_reconf(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ u8 buf[64];
+ int err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
+ l2cap_build_amp_reconf_req(sk, buf), buf);
+ return err;
+}
+
+static void l2cap_amp_move_success(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ int err = 0;
+ /* Send reconfigure request */
+ if (pi->mode == L2CAP_MODE_ERTM) {
+ pi->reconf_state = L2CAP_RECONF_INT;
+ if (enable_reconfig)
+ err = l2cap_amp_move_reconf(sk);
+
+ if (err || !enable_reconfig) {
+ pi->reconf_state = L2CAP_RECONF_NONE;
+ l2cap_ertm_tx(sk, NULL, NULL,
+ L2CAP_ERTM_EVENT_EXPLICIT_POLL);
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
+ }
+ } else
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ if (pi->mode == L2CAP_MODE_ERTM)
+ pi->rx_state =
+ L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
+ else
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ }
+}
+
+static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
+{
+ /* Make sure reqseq is for a packet that has been sent but not acked */
+ u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
+ return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
+}
+
+static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
+ struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, state %d",
+ sk, control, skb, l2cap_pi(sk)->rx_state);
+
+ pi = l2cap_pi(sk);
+
+ if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
+ L2CAP_ERTM_TXSEQ_EXPECTED) {
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
+ __next_seq(pi->buffer_seq, pi));
+
+ pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
+
+ l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ } else {
+ if (pi->sdu) {
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+ }
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
+
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+ }
+
+ pi->last_acked_seq = control->txseq;
+ pi->expected_tx_seq = __next_seq(control->txseq, pi);
+
+ return err;
+}
+
+static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
+ sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
+
+ pi = l2cap_pi(sk);
+
+ if (__valid_reqseq(pi, control->reqseq)) {
+ switch (pi->rx_state) {
+ case L2CAP_ERTM_RX_STATE_RECV:
+ err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
+ break;
+ case L2CAP_ERTM_RX_STATE_SREJ_SENT:
+ err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
+ event);
+ break;
+ case L2CAP_ERTM_RX_STATE_AMP_MOVE:
+ err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
+ event);
+ break;
+ case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
+ if (control->final) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+
+ if (!skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head =
+ skb_peek(TX_QUEUE(sk));
+ else
+ sk->sk_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ pi->next_tx_seq = control->reqseq;
+ pi->unacked_frames = 0;
+
+ if (pi->ampcon)
+ pi->conn->mtu =
+ pi->ampcon->hdev->acl_mtu;
+ else
+ pi->conn->mtu =
+ pi->conn->hcon->hdev->acl_mtu;
+
+ err = l2cap_setup_resegment(sk);
+
+ if (err)
+ break;
+
+ err = l2cap_ertm_rx_state_recv(sk, control, skb,
+ event);
+ }
+ break;
+ case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
+ if (control->poll) {
+ pi->amp_move_reqseq = control->reqseq;
+ pi->amp_move_event = event;
+ err = l2cap_answer_move_poll(sk);
+ }
+ break;
+ case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
+ if (control->poll) {
+ pi->amp_move_reqseq = control->reqseq;
+ pi->amp_move_event = event;
+
+ BT_DBG("amp_move_role 0x%2.2x, "
+ "reconf_state 0x%2.2x",
+ pi->amp_move_role, pi->reconf_state);
+
+ if (pi->reconf_state == L2CAP_RECONF_ACC)
+ err = l2cap_amp_move_reconf(sk);
+ else
+ err = l2cap_answer_move_poll(sk);
+ }
+ break;
+ default:
+ /* shut it down */
+ break;
+ }
+ } else {
+ BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
+ control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ }
+
+ return err;
+}
+
+void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
+{
+ lock_sock(sk);
+
+ l2cap_pi(sk)->fixed_channel = 1;
+
+ l2cap_pi(sk)->imtu = opt->imtu;
+ l2cap_pi(sk)->omtu = opt->omtu;
+ l2cap_pi(sk)->remote_mps = opt->omtu;
+ l2cap_pi(sk)->mps = opt->omtu;
+ l2cap_pi(sk)->flush_to = opt->flush_to;
+ l2cap_pi(sk)->mode = opt->mode;
+ l2cap_pi(sk)->fcs = opt->fcs;
+ l2cap_pi(sk)->max_tx = opt->max_tx;
+ l2cap_pi(sk)->remote_max_tx = opt->max_tx;
+ l2cap_pi(sk)->tx_win = opt->txwin_size;
+ l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
+ l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ if (opt->mode == L2CAP_MODE_ERTM ||
+ l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
+
+ release_sock(sk);
+
+ return;
+}
+
+static const u8 l2cap_ertm_rx_func_to_event[4] = {
+ L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
+ L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
+};
+
+int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control *control;
+ u16 len;
+ u8 event;
+ pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
+
+ if (sk->sk_state != BT_CONNECTED)
goto drop;
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_BASIC:
/* If socket recv buffers overflows we drop data here
* which is *bad* because L2CAP has to be reliable.
* But we don't have any other choice. L2CAP doesn't
* provide flow control mechanism. */
- if (chan->imtu < skb->len)
+ if (pi->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
+ if (!sock_queue_rcv_skb(sk, skb))
goto done;
break;
case L2CAP_MODE_ERTM:
- l2cap_ertm_data_rcv(chan, skb);
-
- goto done;
-
case L2CAP_MODE_STREAMING:
- control = __get_control(chan, skb->data);
- skb_pull(skb, __ctrl_size(chan));
- len = skb->len;
-
- if (l2cap_check_fcs(chan, skb))
- goto drop;
-
- if (__is_sar_start(chan, control))
- len -= L2CAP_SDULEN_SIZE;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- len -= L2CAP_FCS_SIZE;
-
- if (len > chan->mps || len < 0 || __is_sframe(chan, control))
- goto drop;
-
- tx_seq = __get_txseq(chan, control);
-
- if (chan->expected_tx_seq != tx_seq) {
- /* Frame(s) missing - must discard partial SDU */
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
- chan->sdu_last_frag = NULL;
- chan->sdu_len = 0;
-
- /* TODO: Notify userland of missing data */
+ control = &bt_cb(skb)->control;
+ if (pi->extended_control) {
+ __get_extended_control(get_unaligned_le32(skb->data),
+ control);
+ skb_pull(skb, 4);
+ } else {
+ __get_enhanced_control(get_unaligned_le16(skb->data),
+ control);
+ skb_pull(skb, 2);
}
- chan->expected_tx_seq = __next_seq(chan, tx_seq);
+ len = skb->len;
- if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ if (l2cap_check_fcs(pi, skb))
+ goto drop;
+
+ if ((control->frame_type == 'i') &&
+ (control->sar == L2CAP_SAR_START))
+ len -= 2;
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ len -= 2;
+
+ /*
+ * We can just drop the corrupted I-frame here.
+ * Receiver will miss it and start proper recovery
+ * procedures and ask for retransmission.
+ */
+ if (len > pi->mps) {
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ goto drop;
+ }
+
+ if (control->frame_type == 'i') {
+
+ int err;
+
+ BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
+ control->sar, control->reqseq, control->final,
+ control->txseq);
+
+ /* Validate F-bit - F=0 always valid, F=1 only
+ * valid in TX WAIT_F
+ */
+ if (control->final && (pi->tx_state !=
+ L2CAP_ERTM_TX_STATE_WAIT_F))
+ goto drop;
+
+ if (pi->mode != L2CAP_MODE_STREAMING) {
+ event = L2CAP_ERTM_EVENT_RECV_IFRAME;
+ err = l2cap_ertm_rx(sk, control, skb, event);
+ } else
+ err = l2cap_strm_rx(sk, control, skb);
+ if (err)
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ } else {
+ /* Only I-frames are expected in streaming mode */
+ if (pi->mode == L2CAP_MODE_STREAMING)
+ goto drop;
+
+ BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
+ control->reqseq, control->final, control->poll,
+ control->super);
+
+ if (len != 0) {
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ goto drop;
+ }
+
+ /* Validate F and P bits */
+ if (control->final &&
+ ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
+ || control->poll))
+ goto drop;
+
+ event = l2cap_ertm_rx_func_to_event[control->super];
+ if (l2cap_ertm_rx(sk, control, skb, event))
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ }
goto done;
default:
- BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
+ BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
break;
}
@@ -4366,64 +7199,128 @@
kfree_skb(skb);
done:
- l2cap_chan_unlock(chan);
-
return 0;
}
+void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
+{
+ lock_sock(sk);
+ l2cap_data_channel(sk, skb);
+ release_sock(sk);
+}
+
static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
{
- struct l2cap_chan *chan;
+ struct sock *sk;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src);
- if (!chan)
+ sk = l2cap_get_sock_by_psm(0, psm, conn->src);
+ if (!sk)
goto drop;
- BT_DBG("chan %p, len %d", chan, skb->len);
+ bh_lock_sock(sk);
- if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ BT_DBG("sk %p, len %d", sk, skb->len);
+
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
goto drop;
- if (chan->imtu < skb->len)
+ if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
- return 0;
+ if (!sock_queue_rcv_skb(sk, skb))
+ goto done;
drop:
kfree_skb(skb);
+done:
+ if (sk)
+ bh_unlock_sock(sk);
return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
+static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid,
+ struct sk_buff *skb)
{
- struct l2cap_chan *chan;
+ struct sock *sk;
+ struct sk_buff *skb_rsp;
+ struct l2cap_hdr *lh;
+ int dir;
+ u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
+ u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
+ L2CAP_ATT_NOT_SUPPORTED};
- chan = l2cap_global_chan_by_scid(0, cid, conn->src);
- if (!chan)
+ dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1;
+
+ sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src,
+ conn->dst, dir);
+
+ BT_DBG("sk %p, dir:%d", sk, dir);
+
+ if (!sk)
goto drop;
- BT_DBG("chan %p, len %d", chan, skb->len);
+ bh_lock_sock(sk);
- if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ BT_DBG("sk %p, len %d", sk, skb->len);
+
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
goto drop;
- if (chan->imtu < skb->len)
+ if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
- return 0;
+ if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
+ skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
+ GFP_ATOMIC);
+ if (!skb_rsp)
+ goto drop;
+
+ lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(mtu_rsp));
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+ memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
+ sizeof(mtu_rsp));
+ hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
+
+ goto free_skb;
+ }
+
+ if (!sock_queue_rcv_skb(sk, skb))
+ goto done;
drop:
+ if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
+ skb->data[0] != L2CAP_ATT_INDICATE)
+ goto free_skb;
+
+ /* If this is an incoming PDU that requires a response, respond with
+ * a generic error so remote device doesn't hang */
+
+ skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
+ if (!skb_rsp)
+ goto free_skb;
+
+ lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(err_rsp));
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+ err_rsp[1] = skb->data[0];
+ memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
+ hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
+
+free_skb:
kfree_skb(skb);
+done:
+ if (sk)
+ bh_unlock_sock(sk);
return 0;
}
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_hdr *lh = (void *) skb->data;
+ struct sock *sk;
u16 cid, len;
__le16 psm;
@@ -4456,204 +7353,244 @@
case L2CAP_CID_SMP:
if (smp_sig_channel(conn, skb))
- l2cap_conn_del(conn->hcon, EACCES);
+ l2cap_conn_del(conn->hcon, EACCES, 0);
break;
default:
- l2cap_data_channel(conn, cid, skb);
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
+ if (sk) {
+ if (sock_owned_by_user(sk)) {
+ BT_DBG("backlog sk %p", sk);
+ if (sk_add_backlog(sk, skb))
+ kfree_skb(skb);
+ } else
+ l2cap_data_channel(sk, skb);
+
+ bh_unlock_sock(sk);
+ } else if (cid == L2CAP_CID_A2MP) {
+ BT_DBG("A2MP");
+ amp_conn_ind(conn->hcon, skb);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ kfree_skb(skb);
+ }
+
break;
}
}
/* ---- L2CAP interface with lower layer (HCI) ---- */
-int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
+static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
int exact = 0, lm1 = 0, lm2 = 0;
- struct l2cap_chan *c;
+ register struct sock *sk;
+ struct hlist_node *node;
+
+ if (type != ACL_LINK)
+ return 0;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets and check their link_mode */
- read_lock(&chan_list_lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
- if (c->state != BT_LISTEN)
+ read_lock(&l2cap_sk_list.lock);
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
+ if (l2cap_pi(sk)->role_switch)
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
+ if (l2cap_pi(sk)->role_switch)
lm2 |= HCI_LM_MASTER;
}
}
- read_unlock(&chan_list_lock);
+ read_unlock(&l2cap_sk_list.lock);
return exact ? lm1 : lm2;
}
-int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn;
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
+ if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
+ return -EINVAL;
+
if (!status) {
conn = l2cap_conn_add(hcon, status);
if (conn)
l2cap_conn_ready(conn);
} else
- l2cap_conn_del(hcon, bt_to_errno(status));
+ l2cap_conn_del(hcon, bt_err(status), 0);
return 0;
}
-int l2cap_disconn_ind(struct hci_conn *hcon)
+static int l2cap_disconn_ind(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
BT_DBG("hcon %p", hcon);
- if (!conn)
- return HCI_ERROR_REMOTE_USER_TERM;
+ if (hcon->type != ACL_LINK || !conn)
+ return 0x13;
+
return conn->disc_reason;
}
-int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason, u8 is_process)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- l2cap_conn_del(hcon, bt_to_errno(reason));
+ if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
+ return -EINVAL;
+
+ l2cap_conn_del(hcon, bt_err(reason), is_process);
+
return 0;
}
-static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
+static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
{
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
return;
if (encrypt == 0x00) {
- if (chan->sec_level == BT_SECURITY_MEDIUM) {
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
- } else if (chan->sec_level == BT_SECURITY_HIGH)
- l2cap_chan_close(chan, ECONNREFUSED);
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ * 5);
+ } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
+ __l2cap_sock_close(sk, ECONNREFUSED);
} else {
- if (chan->sec_level == BT_SECURITY_MEDIUM)
- __clear_chan_timer(chan);
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
+ l2cap_sock_clear_timer(sk);
}
}
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
{
+ struct l2cap_chan_list *l;
struct l2cap_conn *conn = hcon->l2cap_data;
- struct l2cap_chan *chan;
+ struct sock *sk;
+ int smp = 0;
if (!conn)
return 0;
+ l = &conn->chan_list;
+
BT_DBG("conn %p", conn);
- if (hcon->type == LE_LINK) {
- smp_distribute_keys(conn, 0);
- cancel_delayed_work(&conn->security_timer);
- }
+ read_lock(&l->lock);
- mutex_lock(&conn->chan_lock);
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ bh_lock_sock(sk);
- list_for_each_entry(chan, &conn->chan_l, list) {
- l2cap_chan_lock(chan);
+ BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
- BT_DBG("chan->scid %d", chan->scid);
-
- if (chan->scid == L2CAP_CID_LE_DATA) {
+ if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
- chan->sec_level = hcon->sec_level;
- l2cap_chan_ready(chan);
+ l2cap_pi(sk)->sec_level = hcon->sec_level;
+ l2cap_chan_ready(sk);
}
- l2cap_chan_unlock(chan);
+ smp = 1;
+ bh_unlock_sock(sk);
continue;
}
- if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
- l2cap_chan_unlock(chan);
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
+ bh_unlock_sock(sk);
continue;
}
- if (!status && (chan->state == BT_CONNECTED ||
- chan->state == BT_CONFIG)) {
- struct sock *sk = chan->sk;
-
- bt_sk(sk)->suspended = false;
- sk->sk_state_change(sk);
-
- l2cap_check_encryption(chan, encrypt);
- l2cap_chan_unlock(chan);
+ if (!status && (sk->sk_state == BT_CONNECTED ||
+ sk->sk_state == BT_CONFIG)) {
+ l2cap_check_encryption(sk, encrypt);
+ bh_unlock_sock(sk);
continue;
}
- if (chan->state == BT_CONNECT) {
+ if (sk->sk_state == BT_CONNECT) {
if (!status) {
- l2cap_send_conn_req(chan);
+ l2cap_pi(sk)->conf_state |=
+ L2CAP_CONF_CONNECT_PEND;
+ if (l2cap_pi(sk)->amp_pref ==
+ BT_AMP_POLICY_PREFER_AMP) {
+ amp_create_physical(l2cap_pi(sk)->conn,
+ sk);
+ } else
+ l2cap_send_conn_req(sk);
} else {
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 10);
}
- } else if (chan->state == BT_CONNECT2) {
- struct sock *sk = chan->sk;
+ } else if (sk->sk_state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
- __u16 res, stat;
-
- lock_sock(sk);
+ __u16 result;
if (!status) {
- if (bt_sk(sk)->defer_setup) {
- struct sock *parent = bt_sk(sk)->parent;
- res = L2CAP_CR_PEND;
- stat = L2CAP_CS_AUTHOR_PEND;
- if (parent)
- parent->sk_data_ready(parent, 0);
- } else {
- __l2cap_state_change(chan, BT_CONFIG);
- res = L2CAP_CR_SUCCESS;
- stat = L2CAP_CS_NO_INFO;
+ if (l2cap_pi(sk)->amp_id) {
+ amp_accept_physical(conn,
+ l2cap_pi(sk)->amp_id, sk);
+ bh_unlock_sock(sk);
+ continue;
}
+
+ sk->sk_state = BT_CONFIG;
+ result = L2CAP_CR_SUCCESS;
} else {
- __l2cap_state_change(chan, BT_DISCONN);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- res = L2CAP_CR_SEC_BLOCK;
- stat = L2CAP_CS_NO_INFO;
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_set_timer(sk, HZ / 10);
+ result = L2CAP_CR_SEC_BLOCK;
}
- release_sock(sk);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(res);
- rsp.status = cpu_to_le16(stat);
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
- sizeof(rsp), &rsp);
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
+ result == L2CAP_CR_SUCCESS) {
+ char buf[128];
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf),
+ buf);
+ l2cap_pi(sk)->num_conf_req++;
+ }
}
- l2cap_chan_unlock(chan);
+ bh_unlock_sock(sk);
}
- mutex_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
+
+ if (smp) {
+ del_timer(&hcon->smp_timer);
+ smp_link_encrypt_cmplt(conn, status, encrypt);
+ }
return 0;
}
-int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ if (!conn && hcon->hdev->dev_type != HCI_BREDR)
+ goto drop;
+
if (!conn)
conn = l2cap_conn_add(hcon, 0);
@@ -4662,10 +7599,8 @@
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
- if (!(flags & ACL_CONT)) {
+ if (flags & ACL_START) {
struct l2cap_hdr *hdr;
- struct l2cap_chan *chan;
- u16 cid;
int len;
if (conn->rx_len) {
@@ -4685,7 +7620,6 @@
hdr = (struct l2cap_hdr *) skb->data;
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
- cid = __le16_to_cpu(hdr->cid);
if (len == skb->len) {
/* Complete frame received */
@@ -4693,6 +7627,14 @@
return 0;
}
+ if (flags & ACL_CONT) {
+ BT_ERR("Complete frame is incomplete "
+ "(len %d, expected len %d)",
+ skb->len, len);
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
+ }
+
BT_DBG("Start: total len %d, frag len %d", len, skb->len);
if (skb->len > len) {
@@ -4702,23 +7644,6 @@
goto drop;
}
- chan = l2cap_get_chan_by_scid(conn, cid);
-
- if (chan && chan->sk) {
- struct sock *sk = chan->sk;
- lock_sock(sk);
-
- if (chan->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, "
- "MTU %d)", len,
- chan->imtu);
- release_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
- release_sock(sk);
- }
-
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!conn->rx_skb)
@@ -4762,24 +7687,53 @@
return 0;
}
+static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to)
+{
+ struct hci_cp_write_automatic_flush_timeout flush_tm;
+ if (hcon && hcon->hdev) {
+ flush_tm.handle = hcon->handle;
+ if (flush_to == L2CAP_DEFAULT_FLUSH_TO)
+ flush_to = 0;
+ flush_tm.timeout = (flush_to < L2CAP_MAX_FLUSH_TO) ?
+ flush_to : L2CAP_MAX_FLUSH_TO;
+ hci_send_cmd(hcon->hdev,
+ HCI_OP_WRITE_AUTOMATIC_FLUSH_TIMEOUT,
+ 4, &(flush_tm));
+ }
+}
+
+static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l)
+{
+ int ret_flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->flush_to > 0 &&
+ l2cap_pi(s)->flush_to < ret_flush_to)
+ ret_flush_to = l2cap_pi(s)->flush_to;
+ }
+ return ret_flush_to;
+}
+
static int l2cap_debugfs_show(struct seq_file *f, void *p)
{
- struct l2cap_chan *c;
+ struct sock *sk;
+ struct hlist_node *node;
- read_lock(&chan_list_lock);
+ read_lock_bh(&l2cap_sk_list.lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst),
- c->state, __le16_to_cpu(c->psm),
- c->scid, c->dcid, c->imtu, c->omtu,
- c->sec_level, c->mode);
+ sk->sk_state, __le16_to_cpu(pi->psm),
+ pi->scid, pi->dcid,
+ pi->imtu, pi->omtu, pi->sec_level,
+ pi->mode);
}
- read_unlock(&chan_list_lock);
+ read_unlock_bh(&l2cap_sk_list.lock);
return 0;
}
@@ -4798,6 +7752,20 @@
static struct dentry *l2cap_debugfs;
+static struct hci_proto l2cap_hci_proto = {
+ .name = "L2CAP",
+ .id = HCI_PROTO_L2CAP,
+ .connect_ind = l2cap_connect_ind,
+ .connect_cfm = l2cap_connect_cfm,
+ .disconn_ind = l2cap_disconn_ind,
+ .disconn_cfm = l2cap_disconn_cfm,
+ .security_cfm = l2cap_security_cfm,
+ .recv_acldata = l2cap_recv_acldata,
+ .create_cfm = l2cap_create_cfm,
+ .modify_cfm = l2cap_modify_cfm,
+ .destroy_cfm = l2cap_destroy_cfm,
+};
+
int __init l2cap_init(void)
{
int err;
@@ -4806,6 +7774,19 @@
if (err < 0)
return err;
+ _l2cap_wq = create_singlethread_workqueue("l2cap");
+ if (!_l2cap_wq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = hci_register_proto(&l2cap_hci_proto);
+ if (err < 0) {
+ BT_ERR("L2CAP protocol registration failed");
+ bt_sock_unregister(BTPROTO_L2CAP);
+ goto error;
+ }
+
if (bt_debugfs) {
l2cap_debugfs = debugfs_create_file("l2cap", 0444,
bt_debugfs, NULL, &l2cap_debugfs_fops);
@@ -4813,14 +7794,36 @@
BT_ERR("Failed to create L2CAP debug file");
}
+ if (amp_init() < 0) {
+ BT_ERR("AMP Manager initialization failed");
+ goto error;
+ }
+
return 0;
+
+error:
+ destroy_workqueue(_l2cap_wq);
+ l2cap_cleanup_sockets();
+ return err;
}
void l2cap_exit(void)
{
+ amp_exit();
+
debugfs_remove(l2cap_debugfs);
+
+ flush_workqueue(_l2cap_wq);
+ destroy_workqueue(_l2cap_wq);
+
+ if (hci_unregister_proto(&l2cap_hci_proto) < 0)
+ BT_ERR("L2CAP protocol unregistration failed");
+
l2cap_cleanup_sockets();
}
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
+
+module_param(enable_reconfig, bool, 0644);
+MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 04e7c17..0ad1633 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1,9 +1,8 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2000-2001, 2011-2012 Code Aurora Forum. All rights reserved.
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
- Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -27,22 +26,93 @@
/* Bluetooth L2CAP sockets. */
-#include <linux/security.h>
-#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/amp.h>
-static const struct proto_ops l2cap_sock_ops;
-static void l2cap_sock_init(struct sock *sk, struct sock *parent);
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
+/* ---- L2CAP timers ---- */
+static void l2cap_sock_timeout(unsigned long arg)
+{
+ struct sock *sk = (struct sock *) arg;
+ int reason;
+
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+
+ bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ /* sk is owned by user. Try again later */
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
+ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
+ reason = ECONNREFUSED;
+ else if (sk->sk_state == BT_CONNECT &&
+ l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+
+ __l2cap_sock_close(sk, reason);
+
+ bh_unlock_sock(sk);
+
+ l2cap_sock_kill(sk);
+ sock_put(sk);
+}
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+void l2cap_sock_clear_timer(struct sock *sk)
+{
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+ sk_stop_timer(sk, &sk->sk_timer);
+}
+
+int l2cap_sock_le_params_valid(struct bt_le_params *le_params)
+{
+ if (!le_params || le_params->latency > BT_LE_LATENCY_MAX ||
+ le_params->scan_window > BT_LE_SCAN_WINDOW_MAX ||
+ le_params->scan_interval < BT_LE_SCAN_INTERVAL_MIN ||
+ le_params->scan_window > le_params->scan_interval ||
+ le_params->interval_min < BT_LE_CONN_INTERVAL_MIN ||
+ le_params->interval_max > BT_LE_CONN_INTERVAL_MAX ||
+ le_params->interval_min > le_params->interval_max ||
+ le_params->supervision_timeout < BT_LE_SUP_TO_MIN ||
+ le_params->supervision_timeout > BT_LE_SUP_TO_MAX) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ sk_for_each(sk, node, &l2cap_sk_list.head)
+ if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
+ goto found;
+ sk = NULL;
+found:
+ return sk;
+}
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
@@ -81,22 +151,26 @@
}
}
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+ err = -EADDRINUSE;
+ } else {
+ /* Save source address */
+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->sport = la.l2_psm;
+ sk->sk_state = BT_BOUND;
+
+ if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+ __le16_to_cpu(la.l2_psm) == 0x0003)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ }
+
if (la.l2_cid)
- err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
- else
- err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
+ l2cap_pi(sk)->scid = la.l2_cid;
- if (err < 0)
- goto done;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- chan->sec_level = BT_SECURITY_SDP;
-
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
-
- chan->state = BT_BOUND;
- sk->sk_state = BT_BOUND;
+ write_unlock_bh(&l2cap_sk_list.lock);
done:
release_sock(sk);
@@ -106,11 +180,11 @@
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %p type %d mode %d state %d", sk, sk->sk_type,
+ l2cap_pi(sk)->mode, sk->sk_state);
if (!addr || alen < sizeof(addr->sa_family) ||
addr->sa_family != AF_BLUETOOTH)
@@ -123,38 +197,15 @@
if (la.l2_cid && la.l2_psm)
return -EINVAL;
- err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr);
- if (err)
- return err;
-
lock_sock(sk);
- err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
-
- release_sock(sk);
-
- return err;
-}
-
-static int l2cap_sock_listen(struct socket *sock, int backlog)
-{
- struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- int err = 0;
-
- BT_DBG("sk %p backlog %d", sk, backlog);
-
- lock_sock(sk);
-
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
- err = -EBADFD;
+ if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+ && !(la.l2_psm || la.l2_cid || l2cap_pi(sk)->fixed_channel)) {
+ err = -EINVAL;
goto done;
}
- switch (chan->mode) {
+ switch (l2cap_pi(sk)->mode) {
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_ERTM:
@@ -167,10 +218,108 @@
goto done;
}
+ switch (sk->sk_state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ goto wait;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
+ !l2cap_pi(sk)->fixed_channel &&
+ sk->sk_type != SOCK_RAW && !la.l2_cid) {
+ BT_DBG("Bad PSM 0x%x", (int)__le16_to_cpu(la.l2_psm));
+ err = -EINVAL;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->dcid = la.l2_cid;
+
+ err = l2cap_do_connect(sk);
+ if (err)
+ goto done;
+
+wait:
+ err = bt_sock_wait_state(sk, BT_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+done:
+ if (err)
+ BT_ERR("failed %d", err);
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sk %p backlog %d", sk, backlog);
+
+ lock_sock(sk);
+
+ if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+ || sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->scid) {
+ bdaddr_t *src = &bt_sk(sk)->src;
+ u16 psm;
+
+ err = -EINVAL;
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ for (psm = 0x1001; psm < 0x1100; psm += 2)
+ if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
+ l2cap_pi(sk)->psm = cpu_to_le16(psm);
+ l2cap_pi(sk)->sport = cpu_to_le16(psm);
+ err = 0;
+ break;
+ }
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+
+ if (err < 0)
+ goto done;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
-
- chan->state = BT_LISTEN;
sk->sk_state = BT_LISTEN;
done:
@@ -187,26 +336,30 @@
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (1) {
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
- nsk = bt_accept_dequeue(sk, newsock);
- if (nsk)
- break;
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
- if (!timeo) {
- err = -EAGAIN;
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
@@ -214,12 +367,8 @@
err = sock_intr_errno(timeo);
break;
}
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -238,7 +387,6 @@
{
struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -246,13 +394,13 @@
*len = sizeof(struct sockaddr_l2);
if (peer) {
- la->l2_psm = chan->psm;
+ la->l2_psm = l2cap_pi(sk)->psm;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
- la->l2_cid = cpu_to_le16(chan->dcid);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
} else {
- la->l2_psm = chan->sport;
+ la->l2_psm = l2cap_pi(sk)->sport;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
- la->l2_cid = cpu_to_le16(chan->scid);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
}
return 0;
@@ -261,7 +409,6 @@
static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
int len, err = 0;
@@ -277,13 +424,13 @@
switch (optname) {
case L2CAP_OPTIONS:
memset(&opts, 0, sizeof(opts));
- opts.imtu = chan->imtu;
- opts.omtu = chan->omtu;
- opts.flush_to = chan->flush_to;
- opts.mode = chan->mode;
- opts.fcs = chan->fcs;
- opts.max_tx = chan->max_tx;
- opts.txwin_size = chan->tx_win;
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -292,7 +439,7 @@
break;
case L2CAP_LM:
- switch (chan->sec_level) {
+ switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = L2CAP_LM_AUTH;
break;
@@ -308,12 +455,15 @@
break;
}
- if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
+ if (l2cap_pi(sk)->role_switch)
opt |= L2CAP_LM_MASTER;
- if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
+ if (l2cap_pi(sk)->force_reliable)
opt |= L2CAP_LM_RELIABLE;
+ if (l2cap_pi(sk)->flushable)
+ opt |= L2CAP_LM_FLUSHABLE;
+
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
@@ -326,9 +476,8 @@
break;
}
- memset(&cinfo, 0, sizeof(cinfo));
- cinfo.hci_handle = chan->conn->hcon->handle;
- memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
+ cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
@@ -348,7 +497,6 @@
static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
int len, err = 0;
@@ -368,17 +516,19 @@
switch (optname) {
case BT_SECURITY:
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
memset(&sec, 0, sizeof(sec));
- sec.level = chan->sec_level;
+ sec.level = l2cap_pi(sk)->sec_level;
- if (sk->sk_state == BT_CONNECTED)
- sec.key_size = chan->conn->hcon->enc_key_size;
+ if (sk->sk_state == BT_CONNECTED) {
+ sec.key_size = l2cap_pi(sk)->conn->hcon->enc_key_size;
+ sec.level = l2cap_pi(sk)->conn->hcon->sec_level;
+ }
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
@@ -397,13 +547,6 @@
break;
- case BT_FLUSHABLE:
- if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
- (u32 __user *) optval))
- err = -EFAULT;
-
- break;
-
case BT_POWER:
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
&& sk->sk_type != SOCK_RAW) {
@@ -411,7 +554,7 @@
break;
}
- pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ pwr.force_active = l2cap_pi(sk)->force_active;
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
@@ -419,13 +562,19 @@
break;
- case BT_CHANNEL_POLICY:
- if (!enable_hs) {
- err = -ENOPROTOOPT;
+ case BT_AMP_POLICY:
+ if (put_user(l2cap_pi(sk)->amp_pref, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case BT_LE_PARAMS:
+ if (l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) {
+ err = -EINVAL;
break;
}
- if (put_user(chan->chan_policy, (u32 __user *) optval))
+ if (copy_to_user(optval, (char *) &bt_sk(sk)->le_params,
+ sizeof(bt_sk(sk)->le_params)))
err = -EFAULT;
break;
@@ -441,29 +590,30 @@
static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
- int len, err = 0;
+ int len, le_sock, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
+ le_sock = l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA;
+
switch (optname) {
case L2CAP_OPTIONS:
- if (sk->sk_state == BT_CONNECTED) {
+ if (sk->sk_state == BT_CONNECTED && !le_sock) {
err = -EINVAL;
break;
}
- opts.imtu = chan->imtu;
- opts.omtu = chan->omtu;
- opts.flush_to = chan->flush_to;
- opts.mode = chan->mode;
- opts.fcs = chan->fcs;
- opts.max_tx = chan->max_tx;
- opts.txwin_size = chan->tx_win;
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -471,18 +621,39 @@
break;
}
- if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
+ if ((opts.imtu || opts.omtu) && le_sock &&
+ (sk->sk_state == BT_CONNECTED)) {
+ if (opts.imtu >= L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->imtu = opts.imtu;
+ if (opts.omtu >= L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->omtu = opts.omtu;
+ if (opts.imtu < L2CAP_LE_DEFAULT_MTU ||
+ opts.omtu < L2CAP_LE_DEFAULT_MTU)
+ err = -EINVAL;
+ break;
+ }
+
+ if (opts.txwin_size < 1 ||
+ opts.txwin_size > L2CAP_TX_WIN_MAX_EXTENDED) {
err = -EINVAL;
break;
}
- chan->mode = opts.mode;
- switch (chan->mode) {
+ l2cap_pi(sk)->mode = opts.mode;
+ switch (l2cap_pi(sk)->mode) {
case L2CAP_MODE_BASIC:
- clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
+ break;
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm) {
+ /* No fallback to ERTM or Basic mode */
+ l2cap_pi(sk)->conf_state |=
+ L2CAP_CONF_STATE2_DEVICE;
+ break;
+ }
+ err = -EINVAL;
break;
case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
@@ -491,11 +662,12 @@
break;
}
- chan->imtu = opts.imtu;
- chan->omtu = opts.omtu;
- chan->fcs = opts.fcs;
- chan->max_tx = opts.max_tx;
- chan->tx_win = opts.txwin_size;
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->fcs = opts.fcs;
+ l2cap_pi(sk)->max_tx = opts.max_tx;
+ l2cap_pi(sk)->tx_win = opts.txwin_size;
+ l2cap_pi(sk)->flush_to = opts.flush_to;
break;
case L2CAP_LM:
@@ -505,21 +677,15 @@
}
if (opt & L2CAP_LM_AUTH)
- chan->sec_level = BT_SECURITY_LOW;
+ l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & L2CAP_LM_ENCRYPT)
- chan->sec_level = BT_SECURITY_MEDIUM;
+ l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & L2CAP_LM_SECURE)
- chan->sec_level = BT_SECURITY_HIGH;
+ l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
- if (opt & L2CAP_LM_MASTER)
- set_bit(FLAG_ROLE_SWITCH, &chan->flags);
- else
- clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
-
- if (opt & L2CAP_LM_RELIABLE)
- set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
- else
- clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
+ l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE);
break;
default:
@@ -534,9 +700,9 @@
static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
+ struct bt_le_params le_params;
struct l2cap_conn *conn;
int len, err = 0;
u32 opt;
@@ -553,8 +719,8 @@
switch (optname) {
case BT_SECURITY:
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
@@ -573,15 +739,10 @@
break;
}
- chan->sec_level = sec.level;
+ l2cap_pi(sk)->sec_level = sec.level;
- if (!chan->conn)
- break;
-
- conn = chan->conn;
-
- /*change security for LE channels */
- if (chan->scid == L2CAP_CID_LE_DATA) {
+ conn = l2cap_pi(sk)->conn;
+ if (conn && l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
@@ -589,19 +750,9 @@
if (smp_conn_security(conn, sec.level))
break;
- sk->sk_state = BT_CONFIG;
- chan->state = BT_CONFIG;
- /* or for ACL link */
- } else if ((sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup) ||
- sk->sk_state == BT_CONNECTED) {
- if (!l2cap_chan_check_security(chan))
- bt_sk(sk)->suspended = true;
- else
- sk->sk_state_change(sk);
- } else {
- err = -EINVAL;
+ err = 0;
+ sk->sk_state = BT_CONFIG;
}
break;
@@ -619,77 +770,87 @@
bt_sk(sk)->defer_setup = opt;
break;
- case BT_FLUSHABLE:
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- if (opt > BT_FLUSHABLE_ON) {
- err = -EINVAL;
- break;
- }
-
- if (opt == BT_FLUSHABLE_OFF) {
- struct l2cap_conn *conn = chan->conn;
- /* proceed further only when we have l2cap_conn and
- No Flush support in the LM */
- if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
- err = -EINVAL;
- break;
- }
- }
-
- if (opt)
- set_bit(FLAG_FLUSHABLE, &chan->flags);
- else
- clear_bit(FLAG_FLUSHABLE, &chan->flags);
- break;
-
case BT_POWER:
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
- pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+ pwr.force_active = 1;
len = min_t(unsigned int, sizeof(pwr), optlen);
if (copy_from_user((char *) &pwr, optval, len)) {
err = -EFAULT;
break;
}
-
- if (pwr.force_active)
- set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
- else
- clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ l2cap_pi(sk)->force_active = pwr.force_active;
break;
- case BT_CHANNEL_POLICY:
- if (!enable_hs) {
- err = -ENOPROTOOPT;
- break;
- }
-
+ case BT_AMP_POLICY:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
- if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+ if ((opt > BT_AMP_POLICY_PREFER_AMP) ||
+ ((l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) &&
+ (l2cap_pi(sk)->mode != L2CAP_MODE_STREAMING))) {
err = -EINVAL;
break;
}
- if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
- err = -EOPNOTSUPP;
+ l2cap_pi(sk)->amp_pref = (u8) opt;
+ BT_DBG("BT_AMP_POLICY now %d", opt);
+
+ if ((sk->sk_state == BT_CONNECTED) &&
+ (l2cap_pi(sk)->amp_move_role == L2CAP_AMP_MOVE_NONE))
+ l2cap_amp_move_init(sk);
+
+ break;
+
+ case BT_FLUSHABLE:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+ l2cap_pi(sk)->flushable = opt;
+
+ break;
+
+ case BT_LE_PARAMS:
+ if (l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) {
+ err = -EINVAL;
break;
}
- chan->chan_policy = (u8) opt;
+ if (copy_from_user((char *) &le_params, optval,
+ sizeof(struct bt_le_params))) {
+ err = -EFAULT;
+ break;
+ }
+
+ conn = l2cap_pi(sk)->conn;
+ if (!conn || !conn->hcon ||
+ l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) {
+ memcpy(&bt_sk(sk)->le_params, &le_params,
+ sizeof(le_params));
+ break;
+ }
+
+ if (!conn->hcon->out ||
+ !l2cap_sock_le_params_valid(&le_params)) {
+ err = -EINVAL;
+ break;
+ }
+
+ memcpy(&bt_sk(sk)->le_params, &le_params, sizeof(le_params));
+
+ hci_le_conn_update(conn->hcon,
+ le_params.interval_min,
+ le_params.interval_max,
+ le_params.latency,
+ le_params.supervision_timeout);
break;
default:
@@ -704,8 +865,11 @@
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ struct sk_buff_head seg_queue;
int err;
+ u8 amp_id;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -719,12 +883,102 @@
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
- return -ENOTCONN;
+ err = -ENOTCONN;
+ goto done;
}
- err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ /* Connectionless channel */
+ if (sk->sk_type == SOCK_DGRAM) {
+ skb = l2cap_create_connless_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ } else {
+ l2cap_do_send(sk, skb);
+ err = len;
+ }
+ goto done;
+ }
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EMSGSIZE;
+ goto done;
+ }
+
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+
+ l2cap_do_send(sk, skb);
+ err = len;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EMSGSIZE;
+ goto done;
+ }
+
+ __skb_queue_head_init(&seg_queue);
+
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ amp_id = pi->amp_id;
+ err = l2cap_segment_sdu(sk, &seg_queue, msg, len, 0);
+
+ /* The socket lock is released while segmenting, so check
+ * that the socket is still connected
+ */
+ if (sk->sk_state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
+ }
+
+ if (err) {
+ BT_DBG("Error %d, sk_sndbuf %d, sk_wmem_alloc %d",
+ err, sk->sk_sndbuf,
+ atomic_read(&sk->sk_wmem_alloc));
+ break;
+ }
+
+ if (pi->amp_id != amp_id) {
+ /* Channel moved while unlocked. Resegment. */
+ err = l2cap_resegment_queue(sk, &seg_queue);
+
+ if (err)
+ break;
+ }
+
+ if (pi->mode != L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_tx(sk, 0, &seg_queue,
+ L2CAP_ERTM_EVENT_DATA_REQUEST);
+ else
+ err = l2cap_strm_tx(sk, &seg_queue);
+ if (!err)
+ err = len;
+
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
+ break;
+
+ default:
+ BT_DBG("bad state %1.1x", pi->mode);
+ err = -EBADFD;
+ }
+
+done:
release_sock(sk);
return err;
}
@@ -732,16 +986,43 @@
static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
int err;
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
- sk->sk_state = BT_CONFIG;
- pi->chan->state = BT_CONFIG;
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ u8 buf[128];
- __l2cap_connect_rsp_defer(pi->chan);
+ if (l2cap_pi(sk)->amp_id) {
+ /* Physical link must be brought up before connection
+ * completes.
+ */
+ amp_accept_physical(conn, l2cap_pi(sk)->amp_id, sk);
+ release_sock(sk);
+ return 0;
+ }
+
+ sk->sk_state = BT_CONFIG;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
+ release_sock(sk);
+ return 0;
+ }
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
+
release_sock(sk);
return 0;
}
@@ -753,57 +1034,112 @@
else
err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
- if (pi->chan->mode != L2CAP_MODE_ERTM)
- return err;
+ if (err >= 0)
+ l2cap_ertm_recv_done(sk);
- /* Attempt to put pending rx data in the socket buffer */
-
- lock_sock(sk);
-
- if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
- goto done;
-
- if (pi->rx_busy_skb) {
- if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
- pi->rx_busy_skb = NULL;
- else
- goto done;
- }
-
- /* Restore data flow when half of the receive buffer is
- * available. This avoids resending large numbers of
- * frames.
- */
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
- l2cap_chan_busy(pi->chan, 0);
-
-done:
- release_sock(sk);
return err;
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
-static void l2cap_sock_kill(struct sock *sk)
+void l2cap_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
-
- l2cap_chan_destroy(l2cap_pi(sk)->chan);
+ bt_sock_unlink(&l2cap_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
+/* Must be called on unlocked socket. */
+static void l2cap_sock_close(struct sock *sk)
+{
+ l2cap_sock_clear_timer(sk);
+ lock_sock(sk);
+ __l2cap_sock_close(sk, ECONNRESET);
+ release_sock(sk);
+ l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL)))
+ l2cap_sock_close(sk);
+
+ parent->sk_state = BT_CLOSED;
+ sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+void __l2cap_sock_close(struct sock *sk, int reason)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+
+ switch (sk->sk_state) {
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ break;
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+ l2cap_send_disconn_req(conn, sk, reason);
+ } else
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT2:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ struct l2cap_conn_rsp rsp;
+ __u16 result;
+
+ if (bt_sk(sk)->defer_setup)
+ result = L2CAP_CR_SEC_BLOCK;
+ else
+ result = L2CAP_CR_BAD_PSM;
+ sk->sk_state = BT_DISCONN;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ }
+
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(sk, reason);
+ break;
+
+ default:
+ sock_set_flag(sk, SOCK_ZAPPED);
+ break;
+ }
+}
+
static int l2cap_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan;
- struct l2cap_conn *conn;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -811,24 +1147,17 @@
if (!sk)
return 0;
- chan = l2cap_pi(sk)->chan;
- conn = chan->conn;
-
- if (conn)
- mutex_lock(&conn->chan_lock);
-
- l2cap_chan_lock(chan);
lock_sock(sk);
-
if (!sk->sk_shutdown) {
- if (chan->mode == L2CAP_MODE_ERTM)
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
err = __l2cap_wait_ack(sk);
+ l2cap_ertm_shutdown(sk);
+ }
sk->sk_shutdown = SHUTDOWN_MASK;
-
- release_sock(sk);
- l2cap_chan_close(chan, 0);
- lock_sock(sk);
+ l2cap_sock_clear_timer(sk);
+ __l2cap_sock_close(sk, 0);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -839,17 +1168,13 @@
err = -sk->sk_err;
release_sock(sk);
- l2cap_chan_unlock(chan);
-
- if (conn)
- mutex_unlock(&conn->chan_lock);
-
return err;
}
static int l2cap_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
+ struct sock *srv_sk = NULL;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -857,6 +1182,16 @@
if (!sk)
return 0;
+ /* If this is an ATT Client socket, find the matching Server */
+ if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA && !l2cap_pi(sk)->incoming)
+ srv_sk = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
+ &bt_sk(sk)->src, &bt_sk(sk)->dst, 1);
+
+ /* If server socket found, request tear down */
+ BT_DBG("client:%p server:%p", sk, srv_sk);
+ if (srv_sk)
+ l2cap_sock_set_timer(srv_sk, 1);
+
err = l2cap_sock_shutdown(sock, 2);
sock_orphan(sk);
@@ -864,166 +1199,86 @@
return err;
}
-static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
-{
- struct sock *sk, *parent = data;
-
- sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
- GFP_ATOMIC);
- if (!sk)
- return NULL;
-
- bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
-
- l2cap_sock_init(sk, parent);
-
- return l2cap_pi(sk)->chan;
-}
-
-static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
-{
- int err;
- struct sock *sk = data;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- lock_sock(sk);
-
- if (pi->rx_busy_skb) {
- err = -ENOMEM;
- goto done;
- }
-
- err = sock_queue_rcv_skb(sk, skb);
-
- /* For ERTM, handle one skb that doesn't fit into the recv
- * buffer. This is important to do because the data frames
- * have already been acked, so the skb cannot be discarded.
- *
- * Notify the l2cap core that the buffer is full, so the
- * LOCAL_BUSY state is entered and no more frames are
- * acked and reassembled until there is buffer space
- * available.
- */
- if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
- pi->rx_busy_skb = skb;
- l2cap_chan_busy(pi->chan, 1);
- err = 0;
- }
-
-done:
- release_sock(sk);
-
- return err;
-}
-
-static void l2cap_sock_close_cb(void *data)
-{
- struct sock *sk = data;
-
- l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_state_change_cb(void *data, int state)
-{
- struct sock *sk = data;
-
- sk->sk_state = state;
-}
-
-static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long len, int nb,
- int *err)
-{
- struct sock *sk = chan->sk;
-
- return bt_skb_send_alloc(sk, len, nb, err);
-}
-
-static struct l2cap_ops l2cap_chan_ops = {
- .name = "L2CAP Socket Interface",
- .new_connection = l2cap_sock_new_connection_cb,
- .recv = l2cap_sock_recv_cb,
- .close = l2cap_sock_close_cb,
- .state_change = l2cap_sock_state_change_cb,
- .alloc_skb = l2cap_sock_alloc_skb_cb,
-};
-
static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
- if (l2cap_pi(sk)->rx_busy_skb) {
- kfree_skb(l2cap_pi(sk)->rx_busy_skb);
- l2cap_pi(sk)->rx_busy_skb = NULL;
- }
-
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
+
+ l2cap_ertm_destruct(sk);
}
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+static void set_default_config(struct l2cap_conf_prm *conf_prm)
+{
+ conf_prm->fcs = L2CAP_FCS_CRC16;
+ conf_prm->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+}
+
+void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_chan *chan = pi->chan;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %p parent %p", sk, parent);
if (parent) {
- struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
-
sk->sk_type = parent->sk_type;
+ sk->sk_rcvbuf = parent->sk_rcvbuf;
+ sk->sk_sndbuf = parent->sk_sndbuf;
bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
- chan->chan_type = pchan->chan_type;
- chan->imtu = pchan->imtu;
- chan->omtu = pchan->omtu;
- chan->conf_state = pchan->conf_state;
- chan->mode = pchan->mode;
- chan->fcs = pchan->fcs;
- chan->max_tx = pchan->max_tx;
- chan->tx_win = pchan->tx_win;
- chan->tx_win_max = pchan->tx_win_max;
- chan->sec_level = pchan->sec_level;
- chan->flags = pchan->flags;
-
- security_sk_clone(parent, sk);
+ pi->imtu = l2cap_pi(parent)->imtu;
+ pi->omtu = l2cap_pi(parent)->omtu;
+ pi->conf_state = l2cap_pi(parent)->conf_state;
+ pi->mode = l2cap_pi(parent)->mode;
+ pi->fcs = l2cap_pi(parent)->fcs;
+ pi->max_tx = l2cap_pi(parent)->max_tx;
+ pi->tx_win = l2cap_pi(parent)->tx_win;
+ pi->sec_level = l2cap_pi(parent)->sec_level;
+ pi->role_switch = l2cap_pi(parent)->role_switch;
+ pi->force_reliable = l2cap_pi(parent)->force_reliable;
+ pi->flushable = l2cap_pi(parent)->flushable;
+ pi->force_active = l2cap_pi(parent)->force_active;
+ pi->amp_pref = l2cap_pi(parent)->amp_pref;
} else {
-
- switch (sk->sk_type) {
- case SOCK_RAW:
- chan->chan_type = L2CAP_CHAN_RAW;
- break;
- case SOCK_DGRAM:
- chan->chan_type = L2CAP_CHAN_CONN_LESS;
- break;
- case SOCK_SEQPACKET:
- case SOCK_STREAM:
- chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
- break;
- }
-
- chan->imtu = L2CAP_DEFAULT_MTU;
- chan->omtu = 0;
+ pi->imtu = L2CAP_DEFAULT_MTU;
+ pi->omtu = 0;
if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
- chan->mode = L2CAP_MODE_ERTM;
- set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ pi->mode = L2CAP_MODE_ERTM;
+ pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
} else {
- chan->mode = L2CAP_MODE_BASIC;
+ pi->mode = L2CAP_MODE_BASIC;
}
- chan->max_tx = L2CAP_DEFAULT_MAX_TX;
- chan->fcs = L2CAP_FCS_CRC16;
- chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
- chan->sec_level = BT_SECURITY_LOW;
- chan->flags = 0;
- set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ pi->reconf_state = L2CAP_RECONF_NONE;
+ pi->max_tx = L2CAP_DEFAULT_MAX_TX;
+ pi->fcs = L2CAP_FCS_CRC16;
+ pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ pi->sec_level = BT_SECURITY_LOW;
+ pi->role_switch = 0;
+ pi->force_reliable = 0;
+ pi->flushable = 0;
+ pi->force_active = 1;
+ pi->amp_pref = BT_AMP_POLICY_REQUIRE_BR_EDR;
}
/* Default config options */
- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ sk->sk_backlog_rcv = l2cap_data_channel;
+ pi->ampcon = NULL;
+ pi->ampchan = NULL;
+ pi->conf_len = 0;
+ pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ pi->scid = 0;
+ pi->dcid = 0;
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
+ pi->extended_control = 0;
- chan->data = sk;
- chan->ops = &l2cap_chan_ops;
+ pi->local_conf.fcs = pi->fcs;
+ pi->local_conf.flush_to = pi->flush_to;
+
+ set_default_config(&pi->remote_conf);
+
+ skb_queue_head_init(TX_QUEUE(sk));
+ skb_queue_head_init(SREJ_QUEUE(sk));
}
static struct proto l2cap_proto = {
@@ -1032,10 +1287,9 @@
.obj_size = sizeof(struct l2cap_pinfo)
};
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
- struct l2cap_chan *chan;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
if (!sk)
@@ -1045,21 +1299,16 @@
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+ sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- chan = l2cap_chan_create(sk);
- if (!chan) {
- l2cap_sock_kill(sk);
- return NULL;
- }
+ setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
- l2cap_pi(sk)->chan = chan;
-
+ bt_sock_link(&l2cap_sk_list, sk);
return sk;
}
@@ -1089,7 +1338,7 @@
return 0;
}
-static const struct proto_ops l2cap_sock_ops = {
+const struct proto_ops l2cap_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = l2cap_sock_release,
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 5066288..b826d1b 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -24,8 +24,6 @@
/* Bluetooth kernel library. */
-#define pr_fmt(fmt) "Bluetooth: " fmt
-
#include <linux/module.h>
#include <linux/kernel.h>
@@ -61,7 +59,7 @@
EXPORT_SYMBOL(batostr);
/* Bluetooth error codes to Unix errno mapping */
-int bt_to_errno(__u16 code)
+int bt_err(__u16 code)
{
switch (code) {
case 0:
@@ -151,42 +149,4 @@
return ENOSYS;
}
}
-EXPORT_SYMBOL(bt_to_errno);
-
-int bt_info(const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = pr_info("%pV", &vaf);
-
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(bt_info);
-
-int bt_err(const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = pr_err("%pV", &vaf);
-
- va_end(args);
-
- return r;
-}
EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index c05c3a6..ac85423 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1,8 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
-
Copyright (C) 2010 Nokia Corporation
- Copyright (C) 2011-2012 Intel Corporation
+ Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -24,191 +23,45 @@
/* Bluetooth HCI Management interface */
-#include <linux/kernel.h>
#include <linux/uaccess.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
-bool enable_hs;
-bool enable_le;
+#define MGMT_VERSION 0
+#define MGMT_REVISION 1
-#define MGMT_VERSION 1
-#define MGMT_REVISION 0
-
-static const u16 mgmt_commands[] = {
- MGMT_OP_READ_INDEX_LIST,
- MGMT_OP_READ_INFO,
- MGMT_OP_SET_POWERED,
- MGMT_OP_SET_DISCOVERABLE,
- MGMT_OP_SET_CONNECTABLE,
- MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_OP_SET_PAIRABLE,
- MGMT_OP_SET_LINK_SECURITY,
- MGMT_OP_SET_SSP,
- MGMT_OP_SET_HS,
- MGMT_OP_SET_LE,
- MGMT_OP_SET_DEV_CLASS,
- MGMT_OP_SET_LOCAL_NAME,
- MGMT_OP_ADD_UUID,
- MGMT_OP_REMOVE_UUID,
- MGMT_OP_LOAD_LINK_KEYS,
- MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_OP_DISCONNECT,
- MGMT_OP_GET_CONNECTIONS,
- MGMT_OP_PIN_CODE_REPLY,
- MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_OP_SET_IO_CAPABILITY,
- MGMT_OP_PAIR_DEVICE,
- MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_OP_UNPAIR_DEVICE,
- MGMT_OP_USER_CONFIRM_REPLY,
- MGMT_OP_USER_CONFIRM_NEG_REPLY,
- MGMT_OP_USER_PASSKEY_REPLY,
- MGMT_OP_USER_PASSKEY_NEG_REPLY,
- MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- MGMT_OP_START_DISCOVERY,
- MGMT_OP_STOP_DISCOVERY,
- MGMT_OP_CONFIRM_NAME,
- MGMT_OP_BLOCK_DEVICE,
- MGMT_OP_UNBLOCK_DEVICE,
-};
-
-static const u16 mgmt_events[] = {
- MGMT_EV_CONTROLLER_ERROR,
- MGMT_EV_INDEX_ADDED,
- MGMT_EV_INDEX_REMOVED,
- MGMT_EV_NEW_SETTINGS,
- MGMT_EV_CLASS_OF_DEV_CHANGED,
- MGMT_EV_LOCAL_NAME_CHANGED,
- MGMT_EV_NEW_LINK_KEY,
- MGMT_EV_NEW_LONG_TERM_KEY,
- MGMT_EV_DEVICE_CONNECTED,
- MGMT_EV_DEVICE_DISCONNECTED,
- MGMT_EV_CONNECT_FAILED,
- MGMT_EV_PIN_CODE_REQUEST,
- MGMT_EV_USER_CONFIRM_REQUEST,
- MGMT_EV_USER_PASSKEY_REQUEST,
- MGMT_EV_AUTH_FAILED,
- MGMT_EV_DEVICE_FOUND,
- MGMT_EV_DISCOVERING,
- MGMT_EV_DEVICE_BLOCKED,
- MGMT_EV_DEVICE_UNBLOCKED,
- MGMT_EV_DEVICE_UNPAIRED,
-};
-
-/*
- * These LE scan and inquiry parameters were chosen according to LE General
- * Discovery Procedure specification.
- */
-#define LE_SCAN_TYPE 0x01
-#define LE_SCAN_WIN 0x12
-#define LE_SCAN_INT 0x12
-#define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
-#define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
-
-#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
-#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
-
-#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
-
-#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
- !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+#define SCAN_IDLE 0x00
+#define SCAN_LE 0x01
+#define SCAN_BR 0x02
struct pending_cmd {
struct list_head list;
- u16 opcode;
+ __u16 opcode;
int index;
void *param;
struct sock *sk;
void *user_data;
};
-/* HCI to MGMT error code conversion table */
-static u8 mgmt_status_table[] = {
- MGMT_STATUS_SUCCESS,
- MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
- MGMT_STATUS_NOT_CONNECTED, /* No Connection */
- MGMT_STATUS_FAILED, /* Hardware Failure */
- MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
- MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
- MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
- MGMT_STATUS_NO_RESOURCES, /* Memory Full */
- MGMT_STATUS_TIMEOUT, /* Connection Timeout */
- MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
- MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
- MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
- MGMT_STATUS_BUSY, /* Command Disallowed */
- MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
- MGMT_STATUS_REJECTED, /* Rejected Security */
- MGMT_STATUS_REJECTED, /* Rejected Personal */
- MGMT_STATUS_TIMEOUT, /* Host Timeout */
- MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
- MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
- MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
- MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
- MGMT_STATUS_DISCONNECTED, /* OE Power Off */
- MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
- MGMT_STATUS_BUSY, /* Repeated Attempts */
- MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
- MGMT_STATUS_FAILED, /* Unknown LMP PDU */
- MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
- MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
- MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
- MGMT_STATUS_REJECTED, /* Air Mode Rejected */
- MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
- MGMT_STATUS_FAILED, /* Unspecified Error */
- MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
- MGMT_STATUS_FAILED, /* Role Change Not Allowed */
- MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
- MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
- MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
- MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
- MGMT_STATUS_FAILED, /* Unit Link Key Used */
- MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
- MGMT_STATUS_TIMEOUT, /* Instant Passed */
- MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
- MGMT_STATUS_FAILED, /* Transaction Collision */
- MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
- MGMT_STATUS_REJECTED, /* QoS Rejected */
- MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
- MGMT_STATUS_REJECTED, /* Insufficient Security */
- MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
- MGMT_STATUS_BUSY, /* Role Switch Pending */
- MGMT_STATUS_FAILED, /* Slot Violation */
- MGMT_STATUS_FAILED, /* Role Switch Failed */
- MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
- MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
- MGMT_STATUS_BUSY, /* Host Busy Pairing */
- MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
- MGMT_STATUS_BUSY, /* Controller Busy */
- MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
- MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
- MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
- MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
- MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
+struct mgmt_pending_free_work {
+ struct work_struct work;
+ struct sock *sk;
};
-static u8 mgmt_status(u8 hci_status)
-{
- if (hci_status < ARRAY_SIZE(mgmt_status_table))
- return mgmt_status_table[hci_status];
-
- return MGMT_STATUS_FAILED;
-}
+LIST_HEAD(cmd_list);
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
- int err;
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
@@ -226,20 +79,18 @@
ev->status = status;
put_unaligned_le16(cmd, &ev->opcode);
- err = sock_queue_rcv_skb(sk, skb);
- if (err < 0)
+ if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
- return err;
+ return 0;
}
-static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
- void *rp, size_t rp_len)
+static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
+ size_t rp_len)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
- int err;
BT_DBG("sock %p", sk);
@@ -255,20 +106,17 @@
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
put_unaligned_le16(cmd, &ev->opcode);
- ev->status = status;
if (rp)
memcpy(ev->data, rp, rp_len);
- err = sock_queue_rcv_skb(sk, skb);
- if (err < 0)
+ if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
- return err;
+ return 0;
}
-static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 data_len)
+static int read_version(struct sock *sk)
{
struct mgmt_rp_read_version rp;
@@ -277,50 +125,14 @@
rp.version = MGMT_VERSION;
put_unaligned_le16(MGMT_REVISION, &rp.revision);
- return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
- sizeof(rp));
+ return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
+ sizeof(rp));
}
-static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 data_len)
-{
- struct mgmt_rp_read_commands *rp;
- u16 num_commands = ARRAY_SIZE(mgmt_commands);
- u16 num_events = ARRAY_SIZE(mgmt_events);
- u16 *opcode;
- size_t rp_size;
- int i, err;
-
- BT_DBG("sock %p", sk);
-
- rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
-
- rp = kmalloc(rp_size, GFP_KERNEL);
- if (!rp)
- return -ENOMEM;
-
- put_unaligned_le16(num_commands, &rp->num_commands);
- put_unaligned_le16(num_events, &rp->num_events);
-
- for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
- put_unaligned_le16(mgmt_commands[i], opcode);
-
- for (i = 0; i < num_events; i++, opcode++)
- put_unaligned_le16(mgmt_events[i], opcode);
-
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
- rp_size);
- kfree(rp);
-
- return err;
-}
-
-static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 data_len)
+static int read_index_list(struct sock *sk)
{
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
- struct hci_dev *d;
size_t rp_len;
u16 count;
int i, err;
@@ -331,6 +143,9 @@
count = 0;
list_for_each(p, &hci_dev_list) {
+ struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ if (d->dev_type != HCI_BREDR)
+ continue;
count++;
}
@@ -341,90 +156,579 @@
return -ENOMEM;
}
- put_unaligned_le16(count, &rp->num_controllers);
+ put_unaligned_le16(0, &rp->num_controllers);
i = 0;
- list_for_each_entry(d, &hci_dev_list, list) {
- if (test_bit(HCI_SETUP, &d->dev_flags))
+ list_for_each(p, &hci_dev_list) {
+ struct hci_dev *d = list_entry(p, struct hci_dev, list);
+
+ hci_del_off_timer(d);
+
+ if (d->dev_type != HCI_BREDR)
+ continue;
+
+ set_bit(HCI_MGMT, &d->flags);
+
+ if (test_bit(HCI_SETUP, &d->flags))
continue;
put_unaligned_le16(d->id, &rp->index[i++]);
+ put_unaligned_le16((u16)i, &rp->num_controllers);
BT_DBG("Added hci%u", d->id);
}
read_unlock(&hci_dev_list_lock);
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
- rp_len);
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
+ rp_len);
kfree(rp);
return err;
}
-static u32 get_supported_settings(struct hci_dev *hdev)
+static int read_controller_info(struct sock *sk, u16 index)
{
- u32 settings = 0;
+ struct mgmt_rp_read_info rp;
+ struct hci_dev *hdev;
- settings |= MGMT_SETTING_POWERED;
- settings |= MGMT_SETTING_CONNECTABLE;
- settings |= MGMT_SETTING_FAST_CONNECTABLE;
- settings |= MGMT_SETTING_DISCOVERABLE;
- settings |= MGMT_SETTING_PAIRABLE;
+ BT_DBG("sock %p hci%u", sk, index);
- if (hdev->features[6] & LMP_SIMPLE_PAIR)
- settings |= MGMT_SETTING_SSP;
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
- if (!(hdev->features[4] & LMP_NO_BREDR)) {
- settings |= MGMT_SETTING_BREDR;
- settings |= MGMT_SETTING_LINK_SECURITY;
- }
+ hci_del_off_timer(hdev);
- if (enable_hs)
- settings |= MGMT_SETTING_HS;
+ hci_dev_lock_bh(hdev);
- if (enable_le) {
- if (hdev->features[4] & LMP_LE)
- settings |= MGMT_SETTING_LE;
- }
+ set_bit(HCI_MGMT, &hdev->flags);
- return settings;
+ memset(&rp, 0, sizeof(rp));
+
+ rp.type = hdev->dev_type;
+
+ rp.powered = test_bit(HCI_UP, &hdev->flags);
+ rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
+ rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
+ rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ rp.sec_mode = 3;
+ else if (hdev->ssp_mode > 0)
+ rp.sec_mode = 4;
+ else
+ rp.sec_mode = 2;
+
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
+ memcpy(rp.features, hdev->features, 8);
+ memcpy(rp.dev_class, hdev->dev_class, 3);
+ put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.hci_ver = hdev->hci_ver;
+ put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
+
+ memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
}
-static u32 get_current_settings(struct hci_dev *hdev)
+static void mgmt_pending_free_worker(struct work_struct *work)
{
- u32 settings = 0;
+ struct mgmt_pending_free_work *free_work =
+ container_of(work, struct mgmt_pending_free_work, work);
- if (hdev_is_powered(hdev))
- settings |= MGMT_SETTING_POWERED;
+ BT_DBG("sk %p", free_work->sk);
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- settings |= MGMT_SETTING_CONNECTABLE;
-
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- settings |= MGMT_SETTING_DISCOVERABLE;
-
- if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
- settings |= MGMT_SETTING_PAIRABLE;
-
- if (!(hdev->features[4] & LMP_NO_BREDR))
- settings |= MGMT_SETTING_BREDR;
-
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- settings |= MGMT_SETTING_LE;
-
- if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
- settings |= MGMT_SETTING_LINK_SECURITY;
-
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- settings |= MGMT_SETTING_SSP;
-
- if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
- settings |= MGMT_SETTING_HS;
-
- return settings;
+ sock_put(free_work->sk);
+ kfree(free_work);
}
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+ struct mgmt_pending_free_work *free_work;
+ struct sock *sk = cmd->sk;
+
+ BT_DBG("opcode %d, sk %p", cmd->opcode, sk);
+
+ kfree(cmd->param);
+ kfree(cmd);
+
+ free_work = kzalloc(sizeof(*free_work), GFP_ATOMIC);
+ if (free_work) {
+ INIT_WORK(&free_work->work, mgmt_pending_free_worker);
+ free_work->sk = sk;
+
+ if (!schedule_work(&free_work->work))
+ kfree(free_work);
+ }
+}
+
+static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ u16 index, void *data, u16 len)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("%d", opcode);
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ cmd->opcode = opcode;
+ cmd->index = index;
+
+ cmd->param = kmalloc(len, GFP_ATOMIC);
+ if (!cmd->param) {
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (data)
+ memcpy(cmd->param, data, len);
+
+ cmd->sk = sk;
+ sock_hold(sk);
+
+ list_add(&cmd->list, &cmd_list);
+
+ return cmd;
+}
+
+static void mgmt_pending_foreach(u16 opcode, int index,
+ void (*cb)(struct pending_cmd *cmd, void *data),
+ void *data)
+{
+ struct list_head *p, *n;
+
+ BT_DBG(" %d", opcode);
+
+ list_for_each_safe(p, n, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (opcode > 0 && cmd->opcode != opcode)
+ continue;
+
+ if (index >= 0 && cmd->index != index)
+ continue;
+
+ cb(cmd, data);
+ }
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+{
+ struct list_head *p;
+
+ BT_DBG(" %d", opcode);
+
+ list_for_each(p, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (cmd->opcode != opcode)
+ continue;
+
+ if (index >= 0 && cmd->index != index)
+ continue;
+
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void mgmt_pending_remove(struct pending_cmd *cmd)
+{
+ BT_DBG(" %d", cmd->opcode);
+
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
+
+static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ int err, up;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ up = test_bit(HCI_UP, &hdev->flags);
+ if ((cp->val && up) || (!cp->val && !up)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val)
+ queue_work(hdev->workqueue, &hdev->power_on);
+ else
+ queue_work(hdev->workqueue, &hdev->power_off);
+
+ err = 0;
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+ return err;
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct list_head *p;
+ u8 val = 0;
+
+ list_for_each(p, &hdev->uuids) {
+ struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+
+ val |= uuid->svc_hint;
+ }
+
+ return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static int set_limited_discoverable(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ struct hci_cp_write_current_iac_lap dcp;
+ int update_cod;
+ int err = 0;
+ /* General Inquiry LAP: 0x9E8B33, Limited Inquiry LAP: 0x9E8B00 */
+ u8 lap[] = { 0x33, 0x8b, 0x9e, 0x00, 0x8b, 0x9e };
+
+ cp = (void *) data;
+
+ BT_DBG("hci%u discoverable: %d", index, cp->val);
+
+ if (!cp || len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
+ ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LIMIT_DISCOVERABLE, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
+ EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
+ EALREADY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LIMIT_DISCOVERABLE, index, data,
+ len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ memset(&dcp, 0, sizeof(dcp));
+ dcp.num_current_iac = cp->val ? 2 : 1;
+ memcpy(&dcp.lap, lap, dcp.num_current_iac * 3);
+ update_cod = 1;
+
+ if (cp->val) {
+ if (hdev->major_class & MGMT_MAJOR_CLASS_LIMITED)
+ update_cod = 0;
+ hdev->major_class |= MGMT_MAJOR_CLASS_LIMITED;
+ } else {
+ if (!(hdev->major_class & MGMT_MAJOR_CLASS_LIMITED))
+ update_cod = 0;
+ hdev->major_class &= ~MGMT_MAJOR_CLASS_LIMITED;
+ }
+
+ if (update_cod)
+ err = update_class(hdev);
+
+ if (err >= 0)
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
+ sizeof(dcp), &dcp);
+
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ u8 scan;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ scan = SCAN_PAGE;
+
+ if (cp->val)
+ scan |= SCAN_INQUIRY;
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ u8 scan;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val)
+ scan = SCAN_PAGE;
+ else
+ scan = 0;
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
+ struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ BT_DBG("hci%d %d", index, event);
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(event);
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(data_len);
+
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ hci_send_to_sock(NULL, skb, skip_sk);
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
+{
+ struct mgmt_mode rp;
+
+ rp.val = val;
+
+ return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
+}
+
+static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_mode *cp, ev;
+ struct hci_dev *hdev;
+ int err;
+
+ cp = (void *) data;
+
+ BT_DBG("request for hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (cp->val)
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+ else
+ clear_bit(HCI_PAIRABLE, &hdev->flags);
+
+ err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
+ if (err < 0)
+ goto failed;
+
+ ev.val = cp->val;
+
+ err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+
#define PNP_INFO_SVCLASS_ID 0x1200
static u8 bluetooth_base_uuid[] = {
@@ -457,10 +761,10 @@
u16 eir_len = 0;
u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
int i, truncated = 0;
- struct bt_uuid *uuid;
+ struct list_head *p;
size_t name_len;
- name_len = strlen(hdev->dev_name);
+ name_len = strnlen(hdev->dev_name, HCI_MAX_EIR_LENGTH);
if (name_len > 0) {
/* EIR Data type */
@@ -482,7 +786,8 @@
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
- list_for_each_entry(uuid, &hdev->uuids, list) {
+ list_for_each(p, &hdev->uuids) {
+ struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
u16 uuid16;
uuid16 = get_uuid16(uuid->uuid);
@@ -535,16 +840,13 @@
{
struct hci_cp_write_eir cp;
- if (!hdev_is_powered(hdev))
- return 0;
-
if (!(hdev->features[6] & LMP_EXT_INQ))
return 0;
- if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ if (hdev->ssp_mode == 0)
return 0;
- if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
return 0;
memset(&cp, 0, sizeof(cp));
@@ -559,700 +861,25 @@
return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
}
-static u8 get_service_classes(struct hci_dev *hdev)
+static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
- struct bt_uuid *uuid;
- u8 val = 0;
-
- list_for_each_entry(uuid, &hdev->uuids, list)
- val |= uuid->svc_hint;
-
- return val;
-}
-
-static int update_class(struct hci_dev *hdev)
-{
- u8 cod[3];
- int err;
-
- BT_DBG("%s", hdev->name);
-
- if (!hdev_is_powered(hdev))
- return 0;
-
- if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
- return 0;
-
- cod[0] = hdev->minor_class;
- cod[1] = hdev->major_class;
- cod[2] = get_service_classes(hdev);
-
- if (memcmp(cod, hdev->dev_class, 3) == 0)
- return 0;
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
- if (err == 0)
- set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
-
- return err;
-}
-
-static void service_cache_off(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- service_cache.work);
-
- if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
- return;
-
- hci_dev_lock(hdev);
-
- update_eir(hdev);
- update_class(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
-{
- if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
- return;
-
- INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
-
- /* Non-mgmt controlled devices get this bit set
- * implicitly so that pairing works for them, however
- * for mgmt we require user-space to explicitly enable
- * it
- */
- clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
-}
-
-static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 data_len)
-{
- struct mgmt_rp_read_info rp;
-
- BT_DBG("sock %p %s", sk, hdev->name);
-
- hci_dev_lock(hdev);
-
- memset(&rp, 0, sizeof(rp));
-
- bacpy(&rp.bdaddr, &hdev->bdaddr);
-
- rp.version = hdev->hci_ver;
-
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
-
- rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
- rp.current_settings = cpu_to_le32(get_current_settings(hdev));
-
- memcpy(rp.dev_class, hdev->dev_class, 3);
-
- memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
- memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
-
- hci_dev_unlock(hdev);
-
- return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
- sizeof(rp));
-}
-
-static void mgmt_pending_free(struct pending_cmd *cmd)
-{
- sock_put(cmd->sk);
- kfree(cmd->param);
- kfree(cmd);
-}
-
-static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct pending_cmd *cmd;
-
- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
- return NULL;
-
- cmd->opcode = opcode;
- cmd->index = hdev->id;
-
- cmd->param = kmalloc(len, GFP_ATOMIC);
- if (!cmd->param) {
- kfree(cmd);
- return NULL;
- }
-
- if (data)
- memcpy(cmd->param, data, len);
-
- cmd->sk = sk;
- sock_hold(sk);
-
- list_add(&cmd->list, &hdev->mgmt_pending);
-
- return cmd;
-}
-
-static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
- void (*cb)(struct pending_cmd *cmd, void *data),
- void *data)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &hdev->mgmt_pending) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
- if (opcode > 0 && cmd->opcode != opcode)
- continue;
-
- cb(cmd, data);
- }
-}
-
-static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
-{
- struct pending_cmd *cmd;
-
- list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
- if (cmd->opcode == opcode)
- return cmd;
- }
-
- return NULL;
-}
-
-static void mgmt_pending_remove(struct pending_cmd *cmd)
-{
- list_del(&cmd->list);
- mgmt_pending_free(cmd);
-}
-
-static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
-{
- __le32 settings = cpu_to_le32(get_current_settings(hdev));
-
- return cmd_complete(sk, hdev->id, opcode, 0, &settings,
- sizeof(settings));
-}
-
-static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
- int err;
-
- BT_DBG("request for %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
- cancel_delayed_work(&hdev->power_off);
-
- if (cp->val) {
- err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
- mgmt_powered(hdev, 1);
- goto failed;
- }
- }
-
- if (!!cp->val == hdev_is_powered(hdev)) {
- err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- if (cp->val)
- schedule_work(&hdev->power_on);
- else
- schedule_work(&hdev->power_off.work);
-
- err = 0;
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
- struct sock *skip_sk)
-{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
-
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- if (hdev)
- hdr->index = cpu_to_le16(hdev->id);
- else
- hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
- hdr->len = cpu_to_le16(data_len);
-
- if (data)
- memcpy(skb_put(skb, data_len), data, data_len);
-
- /* Time stamp */
- __net_timestamp(skb);
-
- hci_send_to_control(skb, skip_sk);
- kfree_skb(skb);
-
- return 0;
-}
-
-static int new_settings(struct hci_dev *hdev, struct sock *skip)
-{
- __le32 ev;
-
- ev = cpu_to_le32(get_current_settings(hdev));
-
- return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
-}
-
-static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_set_discoverable *cp = data;
- struct pending_cmd *cmd;
- u16 timeout;
- u8 scan;
- int err;
-
- BT_DBG("request for %s", hdev->name);
-
- timeout = get_unaligned_le16(&cp->timeout);
- if (!cp->val && timeout > 0)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_INVALID_PARAMS);
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev) && timeout > 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_NOT_POWERED);
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_REJECTED);
- goto failed;
- }
-
- if (!hdev_is_powered(hdev)) {
- bool changed = false;
-
- if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
- change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- changed = true;
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
- if (err < 0)
- goto failed;
-
- if (changed)
- err = new_settings(hdev, sk);
-
- goto failed;
- }
-
- if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
- if (hdev->discov_timeout > 0) {
- cancel_delayed_work(&hdev->discov_off);
- hdev->discov_timeout = 0;
- }
-
- if (cp->val && timeout > 0) {
- hdev->discov_timeout = timeout;
- queue_delayed_work(hdev->workqueue, &hdev->discov_off,
- msecs_to_jiffies(hdev->discov_timeout * 1000));
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- scan = SCAN_PAGE;
-
- if (cp->val)
- scan |= SCAN_INQUIRY;
- else
- cancel_delayed_work(&hdev->discov_off);
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
- if (cp->val)
- hdev->discov_timeout = timeout;
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
- u8 scan;
- int err;
-
- BT_DBG("request for %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- bool changed = false;
-
- if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- changed = true;
-
- if (cp->val) {
- set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
- } else {
- clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
- if (err < 0)
- goto failed;
-
- if (changed)
- err = new_settings(hdev, sk);
-
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
- if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
- err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- if (cp->val) {
- scan = SCAN_PAGE;
- } else {
- scan = 0;
-
- if (test_bit(HCI_ISCAN, &hdev->flags) &&
- hdev->discov_timeout > 0)
- cancel_delayed_work(&hdev->discov_off);
- }
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_mode *cp = data;
- int err;
-
- BT_DBG("request for %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (cp->val)
- set_bit(HCI_PAIRABLE, &hdev->dev_flags);
- else
- clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
-
- err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
- if (err < 0)
- goto failed;
-
- err = new_settings(hdev, sk);
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
- u8 val;
- int err;
-
- BT_DBG("request for %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- bool changed = false;
-
- if (!!cp->val != test_bit(HCI_LINK_SECURITY,
- &hdev->dev_flags)) {
- change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
- changed = true;
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
- if (err < 0)
- goto failed;
-
- if (changed)
- err = new_settings(hdev, sk);
-
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
- val = !!cp->val;
-
- if (test_bit(HCI_AUTH, &hdev->flags) == val) {
- err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
- if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
- }
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
-{
- struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
- u8 val;
- int err;
-
- BT_DBG("request for %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_NOT_SUPPORTED);
- goto failed;
- }
-
- val = !!cp->val;
-
- if (!hdev_is_powered(hdev)) {
- bool changed = false;
-
- if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
- change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
- changed = true;
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
- if (err < 0)
- goto failed;
-
- if (changed)
- err = new_settings(hdev, sk);
-
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
- err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
- if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
- }
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
-{
- struct mgmt_mode *cp = data;
-
- BT_DBG("request for %s", hdev->name);
-
- if (!enable_hs)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_NOT_SUPPORTED);
-
- if (cp->val)
- set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
- else
- clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
-
- return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
-}
-
-static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
-{
- struct mgmt_mode *cp = data;
- struct hci_cp_write_le_host_supported hci_cp;
- struct pending_cmd *cmd;
- int err;
- u8 val, enabled;
-
- BT_DBG("request for %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!enable_le || !(hdev->features[4] & LMP_LE)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
- MGMT_STATUS_NOT_SUPPORTED);
- goto unlock;
- }
-
- val = !!cp->val;
- enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
-
- if (!hdev_is_powered(hdev) || val == enabled) {
- bool changed = false;
-
- if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
- change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
- changed = true;
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
- if (err < 0)
- goto unlock;
-
- if (changed)
- err = new_settings(hdev, sk);
-
- goto unlock;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
-
- memset(&hci_cp, 0, sizeof(hci_cp));
-
- if (val) {
- hci_cp.le = val;
- hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
- }
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
- &hci_cp);
- if (err < 0) {
- mgmt_pending_remove(cmd);
- goto unlock;
- }
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
-{
- struct mgmt_cp_add_uuid *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_cp_add_uuid *cp;
+ struct hci_dev *hdev;
struct bt_uuid *uuid;
int err;
- BT_DBG("request for %s", hdev->name);
+ cp = (void *) data;
- hci_dev_lock(hdev);
+ BT_DBG("request for hci%u", index);
- if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
- MGMT_STATUS_BUSY);
- goto failed;
- }
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+
+ hci_dev_lock_bh(hdev);
uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
if (!uuid) {
@@ -1265,73 +892,51 @@
list_add(&uuid->list, &hdev->uuids);
- err = update_class(hdev);
- if (err < 0)
- goto failed;
+ if (test_bit(HCI_UP, &hdev->flags)) {
- err = update_eir(hdev);
- if (err < 0)
- goto failed;
+ err = update_class(hdev);
+ if (err < 0)
+ goto failed;
- if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
- hdev->dev_class, 3);
- goto failed;
- }
+ err = update_eir(hdev);
+ if (err < 0)
+ goto failed;
+ } else
+ err = 0;
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
+ err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
failed:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static bool enable_service_cache(struct hci_dev *hdev)
+static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
- if (!hdev_is_powered(hdev))
- return false;
-
- if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
- schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
- return true;
- }
-
- return false;
-}
-
-static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_remove_uuid *cp = data;
- struct pending_cmd *cmd;
struct list_head *p, *n;
+ struct mgmt_cp_remove_uuid *cp;
+ struct hci_dev *hdev;
u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int err, found;
- BT_DBG("request for %s", hdev->name);
+ cp = (void *) data;
- hci_dev_lock(hdev);
+ BT_DBG("request for hci%u", index);
- if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+
+ hci_dev_lock_bh(hdev);
if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
err = hci_uuids_clear(hdev);
-
- if (enable_service_cache(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
- 0, hdev->dev_class, 3);
- goto unlock;
- }
-
- goto update_class;
+ goto unlock;
}
found = 0;
@@ -1347,222 +952,232 @@
}
if (found == 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_INVALID_PARAMS);
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
goto unlock;
}
-update_class:
- err = update_class(hdev);
- if (err < 0)
- goto unlock;
+ if (test_bit(HCI_UP, &hdev->flags)) {
+ err = update_class(hdev);
+ if (err < 0)
+ goto unlock;
- err = update_eir(hdev);
- if (err < 0)
- goto unlock;
+ err = update_eir(hdev);
+ if (err < 0)
+ goto unlock;
+ } else
+ err = 0;
- if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
unlock:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct mgmt_cp_set_dev_class *cp = data;
- struct pending_cmd *cmd;
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_dev_class *cp;
int err;
- BT_DBG("request for %s", hdev->name);
+ cp = (void *) data;
- hci_dev_lock(hdev);
+ BT_DBG("request for hci%u", index);
- if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
- hdev->major_class = cp->major;
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ hdev->major_class &= ~MGMT_MAJOR_CLASS_MASK;
+ hdev->major_class |= cp->major & MGMT_MAJOR_CLASS_MASK;
hdev->minor_class = cp->minor;
- if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
+ if (test_bit(HCI_UP, &hdev->flags))
+ err = update_class(hdev);
+ else
+ err = 0;
- if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
- hci_dev_unlock(hdev);
- cancel_delayed_work_sync(&hdev->service_cache);
- hci_dev_lock(hdev);
- update_eir(hdev);
- }
+ if (err == 0)
+ err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
- err = update_class(hdev);
- if (err < 0)
- goto unlock;
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
- if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
-
-unlock:
- hci_dev_unlock(hdev);
return err;
}
-static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct mgmt_cp_load_link_keys *cp = data;
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_service_cache *cp;
+ int err;
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ BT_DBG("hci%u enable %d", index, cp->enable);
+
+ if (cp->enable) {
+ set_bit(HCI_SERVICE_CACHE, &hdev->flags);
+ err = 0;
+ } else {
+ clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
+ if (test_bit(HCI_UP, &hdev->flags)) {
+ err = update_class(hdev);
+ if (err == 0)
+ err = update_eir(hdev);
+ } else
+ err = 0;
+ }
+
+ if (err == 0)
+ err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
+ 0);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_load_keys *cp;
u16 key_count, expected_len;
- int i;
+ int i, err;
+
+ cp = (void *) data;
+
+ if (len < sizeof(*cp))
+ return -EINVAL;
key_count = get_unaligned_le16(&cp->key_count);
- expected_len = sizeof(*cp) + key_count *
- sizeof(struct mgmt_link_key_info);
- if (expected_len != len) {
- BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
- len, expected_len);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+ if (expected_len > len) {
+ BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
+ expected_len, len);
+ return -EINVAL;
}
- BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+
+ BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
key_count);
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
hci_link_keys_clear(hdev);
- set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
+ set_bit(HCI_LINK_KEYS, &hdev->flags);
if (cp->debug_keys)
- set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+ set_bit(HCI_DEBUG_KEYS, &hdev->flags);
else
- clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+ clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
- for (i = 0; i < key_count; i++) {
- struct mgmt_link_key_info *key = &cp->keys[i];
+ len -= sizeof(*cp);
+ i = 0;
- hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
- key->type, key->pin_len);
+ while (i < len) {
+ struct mgmt_key_info *key = (void *) cp->keys + i;
+
+ i += sizeof(*key);
+
+ if (key->key_type == KEY_TYPE_LTK) {
+ struct key_master_id *id = (void *) key->data;
+
+ if (key->dlen != sizeof(struct key_master_id))
+ continue;
+
+ hci_add_ltk(hdev, 0, &key->bdaddr, key->addr_type,
+ key->pin_len, key->auth, id->ediv,
+ id->rand, key->val);
+
+ continue;
+ }
+
+ hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->key_type,
+ key->pin_len);
}
- cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
+ err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
- return 0;
-}
-
-static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 addr_type, struct sock *skip_sk)
-{
- struct mgmt_ev_device_unpaired ev;
-
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = addr_type;
-
- return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
- skip_sk);
-}
-
-static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_unpair_device *cp = data;
- struct mgmt_rp_unpair_device rp;
- struct hci_cp_disconnect dc;
- struct pending_cmd *cmd;
- struct hci_conn *conn;
- int err;
-
- hci_dev_lock(hdev);
-
- memset(&rp, 0, sizeof(rp));
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
-
- if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
- MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
- goto unlock;
- }
-
- if (cp->addr.type == MGMT_ADDR_BREDR)
- err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
- else
- err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
-
- if (err < 0) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
- MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
- goto unlock;
- }
-
- if (cp->disconnect) {
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
- else
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
- &cp->addr.bdaddr);
- } else {
- conn = NULL;
- }
-
- if (!conn) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
- &rp, sizeof(rp));
- device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
- sizeof(*cp));
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
-
- put_unaligned_le16(conn->handle, &dc.handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
- err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-unlock:
- hci_dev_unlock(hdev);
return err;
}
-static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
- struct mgmt_cp_disconnect *cp = data;
+ struct hci_dev *hdev;
+ struct mgmt_cp_remove_key *cp;
+ struct hci_conn *conn;
+ int err;
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ err = hci_remove_link_key(hdev, &cp->bdaddr);
+ if (err < 0) {
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+ goto unlock;
+ }
+
+ err = 0;
+
+ if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn) {
+ struct hci_cp_disconnect dc;
+
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
+ }
+
+unlock:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_disconnect *cp;
struct hci_cp_disconnect dc;
struct pending_cmd *cmd;
struct hci_conn *conn;
@@ -1570,32 +1185,38 @@
BT_DBG("");
- hci_dev_lock(hdev);
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+
+ hci_dev_lock_bh(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_POWERED);
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_BUSY);
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
goto failed;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
- else
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
-
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (!conn) {
- err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_CONNECTED);
- goto failed;
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ ENOTCONN);
+ goto failed;
+ }
}
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1609,218 +1230,266 @@
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static u8 link_to_mgmt(u8 link_type, u8 addr_type)
-{
- switch (link_type) {
- case LE_LINK:
- switch (addr_type) {
- case ADDR_LE_DEV_PUBLIC:
- return MGMT_ADDR_LE_PUBLIC;
- case ADDR_LE_DEV_RANDOM:
- return MGMT_ADDR_LE_RANDOM;
- default:
- return MGMT_ADDR_INVALID;
- }
- case ACL_LINK:
- return MGMT_ADDR_BREDR;
- default:
- return MGMT_ADDR_INVALID;
- }
-}
-
-static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 data_len)
+static int get_connections(struct sock *sk, u16 index)
{
struct mgmt_rp_get_connections *rp;
- struct hci_conn *c;
+ struct hci_dev *hdev;
+ struct list_head *p;
size_t rp_len;
- int err;
- u16 i;
+ u16 count;
+ int i, err;
BT_DBG("");
- hci_dev_lock(hdev);
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
- MGMT_STATUS_NOT_POWERED);
- goto unlock;
+ hci_dev_lock_bh(hdev);
+
+ count = 0;
+ list_for_each(p, &hdev->conn_hash.list) {
+ count++;
}
- i = 0;
- list_for_each_entry(c, &hdev->conn_hash.list, list) {
- if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
- i++;
- }
-
- rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+ rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
rp = kmalloc(rp_len, GFP_ATOMIC);
if (!rp) {
err = -ENOMEM;
goto unlock;
}
+ put_unaligned_le16(count, &rp->conn_count);
+
+ read_lock(&hci_dev_list_lock);
+
i = 0;
- list_for_each_entry(c, &hdev->conn_hash.list, list) {
- if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
- continue;
- bacpy(&rp->addr[i].bdaddr, &c->dst);
- rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
- if (rp->addr[i].type == MGMT_ADDR_INVALID)
- continue;
- i++;
+ list_for_each(p, &hdev->conn_hash.list) {
+ struct hci_conn *c = list_entry(p, struct hci_conn, list);
+
+ bacpy(&rp->conn[i++], &c->dst);
}
- put_unaligned_le16(i, &rp->conn_count);
+ read_unlock(&hci_dev_list_lock);
- /* Recalculate length in case of filtered SCO connections, etc */
- rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
-
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
- rp_len);
-
- kfree(rp);
+ err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
unlock:
- hci_dev_unlock(hdev);
+ kfree(rp);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
return err;
}
-static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
- struct mgmt_cp_pin_code_neg_reply *cp)
+static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct pending_cmd *cmd;
- int err;
-
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
- sizeof(*cp));
- if (!cmd)
- return -ENOMEM;
-
- err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
- sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
- return err;
-}
-
-static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct hci_conn *conn;
- struct mgmt_cp_pin_code_reply *cp = data;
+ struct hci_dev *hdev;
+ struct mgmt_cp_pin_code_reply *cp;
struct hci_cp_pin_code_reply reply;
struct pending_cmd *cmd;
int err;
BT_DBG("");
- hci_dev_lock(hdev);
+ cp = (void *) data;
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_NOT_POWERED);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
goto failed;
}
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
- if (!conn) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_NOT_CONNECTED);
- goto failed;
- }
-
- if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
- struct mgmt_cp_pin_code_neg_reply ncp;
-
- memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
-
- BT_ERR("PIN code is not 16 bytes long");
-
- err = send_pin_code_neg_reply(sk, hdev, &ncp);
- if (err >= 0)
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- bacpy(&reply.bdaddr, &cp->addr.bdaddr);
+ bacpy(&reply.bdaddr, &cp->bdaddr);
reply.pin_len = cp->pin_len;
- memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
+ memcpy(reply.pin_code, cp->pin_code, 16);
err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
+static int encrypt_link(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct mgmt_cp_pin_code_neg_reply *cp = data;
+ struct hci_dev *hdev;
+ struct mgmt_cp_encrypt_link *cp;
+ struct hci_cp_set_conn_encrypt enc;
+ struct hci_conn *conn;
+ int err = 0;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENETDOWN);
+ goto done;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENOTCONN);
+ goto done;
+ }
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, EINPROGRESS);
+ goto done;
+ }
+
+ if (conn->link_mode & HCI_LM_AUTH) {
+ enc.handle = cpu_to_le16(conn->handle);
+ enc.encrypt = cp->enable;
+ err = hci_send_cmd(hdev,
+ HCI_OP_SET_CONN_ENCRYPT, sizeof(enc), &enc);
+ } else {
+ conn->auth_initiator = 1;
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ struct hci_cp_auth_requested cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ err = hci_send_cmd(conn->hdev,
+ HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+ }
+ }
+
+done:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+
+static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pin_code_neg_reply *cp;
+ struct pending_cmd *cmd;
int err;
BT_DBG("");
- hci_dev_lock(hdev);
+ cp = (void *) data;
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_STATUS_NOT_POWERED);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+ ENETDOWN);
goto failed;
}
- err = send_pin_code_neg_reply(sk, hdev, cp);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
+ data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
+ &cp->bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct mgmt_cp_set_io_capability *cp = data;
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_io_capability *cp;
BT_DBG("");
- hci_dev_lock(hdev);
+ cp = (void *) data;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
hdev->io_capability);
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
- return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
- 0);
+ return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
}
static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- struct pending_cmd *cmd;
+ struct list_head *p;
- list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ list_for_each(p, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
continue;
+ if (cmd->index != hdev->id)
+ continue;
+
if (cmd->user_data != conn)
continue;
@@ -1835,19 +1504,18 @@
struct mgmt_rp_pair_device rp;
struct hci_conn *conn = cmd->user_data;
- bacpy(&rp.addr.bdaddr, &conn->dst);
- rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
+ BT_DBG(" %u", status);
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
- &rp, sizeof(rp));
+ bacpy(&rp.bdaddr, &conn->dst);
+ rp.status = status;
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
/* So we don't get further callbacks for this connection */
conn->connect_cfm_cb = NULL;
conn->security_cfm_cb = NULL;
conn->disconn_cfm_cb = NULL;
- hci_conn_put(conn);
-
mgmt_pending_remove(cmd);
}
@@ -1855,80 +1523,141 @@
{
struct pending_cmd *cmd;
- BT_DBG("status %u", status);
+ BT_DBG(" %u", status);
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- pairing_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ pairing_complete(cmd, status);
+ hci_conn_put(conn);
}
-static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static void pairing_security_complete_cb(struct hci_conn *conn, u8 status)
{
- struct mgmt_cp_pair_device *cp = data;
- struct mgmt_rp_pair_device rp;
struct pending_cmd *cmd;
- u8 sec_level, auth_type;
+
+ BT_DBG(" %u", status);
+
+ cmd = find_pairing(conn);
+ if (!cmd) {
+ BT_DBG("Unable to find a pending command");
+ return;
+ }
+
+ if (conn->type == LE_LINK)
+ smp_link_encrypt_cmplt(conn->l2cap_data, status,
+ status ? 0 : 1);
+ else
+ pairing_complete(cmd, status);
+}
+
+static void pairing_connect_complete_cb(struct hci_conn *conn, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("conn: %p %u", conn, status);
+
+ cmd = find_pairing(conn);
+ if (!cmd) {
+ BT_DBG("Unable to find a pending command");
+ return;
+ }
+
+ if (status)
+ pairing_complete(cmd, status);
+
+ hci_conn_put(conn);
+}
+
+static void discovery_terminated(struct pending_cmd *cmd, void *data)
+{
+ struct hci_dev *hdev;
+ struct mgmt_mode ev = {0};
+
+ BT_DBG("");
+ hdev = hci_dev_get(cmd->index);
+ if (!hdev)
+ goto not_found;
+
+ del_timer(&hdev->disco_le_timer);
+ del_timer(&hdev->disco_timer);
+ hci_dev_put(hdev);
+
+not_found:
+ mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL);
+
+ list_del(&cmd->list);
+
+ mgmt_pending_free(cmd);
+}
+
+static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pair_device *cp;
+ struct pending_cmd *cmd;
+ u8 sec_level, auth_type, io_cap;
struct hci_conn *conn;
+ struct adv_entry *entry;
int err;
BT_DBG("");
- hci_dev_lock(hdev);
+ cp = (void *) data;
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_NOT_POWERED);
- goto unlock;
- }
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+
+ hdev = hci_dev_get(index);
+
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ io_cap = cp->io_cap;
sec_level = BT_SECURITY_MEDIUM;
- if (cp->io_cap == 0x03)
- auth_type = HCI_AT_DEDICATED_BONDING;
- else
- auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+ auth_type = HCI_AT_DEDICATED_BONDING;
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_connect(hdev, ACL_LINK, 0, &cp->addr.bdaddr,
- sec_level, auth_type);
- else
- conn = hci_connect(hdev, LE_LINK, 0, &cp->addr.bdaddr,
- sec_level, auth_type);
-
- memset(&rp, 0, sizeof(rp));
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
+ entry = hci_find_adv_entry(hdev, &cp->bdaddr);
+ if (entry && entry->flags & 0x04) {
+ conn = hci_le_connect(hdev, 0, &cp->bdaddr, sec_level,
+ auth_type, NULL);
+ } else {
+ /* ACL-SSP does not support io_cap 0x04 (KeyboadDisplay) */
+ if (io_cap == 0x04)
+ io_cap = 0x01;
+ conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level,
+ auth_type);
+ conn->auth_initiator = 1;
+ }
if (IS_ERR(conn)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_CONNECT_FAILED, &rp,
- sizeof(rp));
+ err = PTR_ERR(conn);
goto unlock;
}
if (conn->connect_cfm_cb) {
hci_conn_put(conn);
- err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
if (!cmd) {
err = -ENOMEM;
hci_conn_put(conn);
goto unlock;
}
- /* For LE, just connecting isn't a proof that the pairing finished */
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn->connect_cfm_cb = pairing_complete_cb;
-
- conn->security_cfm_cb = pairing_complete_cb;
+ conn->connect_cfm_cb = pairing_connect_complete_cb;
+ conn->security_cfm_cb = pairing_security_complete_cb;
conn->disconn_cfm_cb = pairing_complete_cb;
- conn->io_capability = cp->io_cap;
+ conn->io_capability = io_cap;
cmd->user_data = conn;
if (conn->state == BT_CONNECTED &&
@@ -1938,249 +1667,581 @@
err = 0;
unlock:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
+ u16 len, u16 opcode)
{
- struct mgmt_addr_info *addr = data;
+ struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+ u16 mgmt_op = opcode, hci_op;
struct pending_cmd *cmd;
- struct hci_conn *conn;
+ struct hci_dev *hdev;
+ struct hci_conn *le_conn;
int err;
- BT_DBG("");
+ BT_DBG("%d", mgmt_op);
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_STATUS_NOT_POWERED);
- goto unlock;
- }
-
- cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
- if (!cmd) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
- goto unlock;
- }
-
- conn = cmd->user_data;
-
- if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
- goto unlock;
- }
-
- pairing_complete(cmd, MGMT_STATUS_CANCELLED);
-
- err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
- addr, sizeof(*addr));
-unlock:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
- u16 hci_op, __le32 passkey)
-{
- struct pending_cmd *cmd;
- struct hci_conn *conn;
- int err;
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, mgmt_op,
- MGMT_STATUS_NOT_POWERED);
- goto done;
- }
-
- if (type == MGMT_ADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+ if (mgmt_op == MGMT_OP_USER_CONFIRM_NEG_REPLY)
+ hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
else
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+ hci_op = HCI_OP_USER_CONFIRM_REPLY;
- if (!conn) {
- err = cmd_status(sk, hdev->id, mgmt_op,
- MGMT_STATUS_NOT_CONNECTED);
+ if (len < sizeof(*cp))
+ return cmd_status(sk, index, mgmt_op, EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, mgmt_op, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, mgmt_op, ENETDOWN);
goto done;
}
- if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
- /* Continue with pairing via SMP */
- err = smp_user_confirm_reply(conn, mgmt_op, passkey);
-
- if (!err)
- err = cmd_status(sk, hdev->id, mgmt_op,
- MGMT_STATUS_SUCCESS);
- else
- err = cmd_status(sk, hdev->id, mgmt_op,
- MGMT_STATUS_FAILED);
-
+ le_conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
+ if (le_conn) {
+ err = le_user_confirm_reply(le_conn, mgmt_op, (void *) cp);
goto done;
}
+ BT_DBG("BR/EDR: %s", mgmt_op == MGMT_OP_USER_CONFIRM_NEG_REPLY ?
+ "Reject" : "Accept");
- cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
+ cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
if (!cmd) {
err = -ENOMEM;
goto done;
}
- /* Continue with pairing via HCI */
- if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
- struct hci_cp_user_passkey_reply cp;
-
- bacpy(&cp.bdaddr, bdaddr);
- cp.passkey = passkey;
- err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
- } else
- err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
-
+ err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
if (err < 0)
mgmt_pending_remove(cmd);
done:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+static int resolve_name(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct mgmt_cp_user_confirm_reply *cp = data;
-
- BT_DBG("");
-
- if (len != sizeof(*cp))
- return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
-
- return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
- MGMT_OP_USER_CONFIRM_REPLY,
- HCI_OP_USER_CONFIRM_REPLY, 0);
-}
-
-static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
-{
- struct mgmt_cp_user_confirm_neg_reply *cp = data;
-
- BT_DBG("");
-
- return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
- MGMT_OP_USER_CONFIRM_NEG_REPLY,
- HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
-}
-
-static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_user_passkey_reply *cp = data;
-
- BT_DBG("");
-
- return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
- MGMT_OP_USER_PASSKEY_REPLY,
- HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
-}
-
-static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
-{
- struct mgmt_cp_user_passkey_neg_reply *cp = data;
-
- BT_DBG("");
-
- return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
- MGMT_OP_USER_PASSKEY_NEG_REPLY,
- HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
-}
-
-static int update_name(struct hci_dev *hdev, const char *name)
-{
- struct hci_cp_write_local_name cp;
-
- memcpy(cp.name, name, sizeof(cp.name));
-
- return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
-}
-
-static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_set_local_name *cp = data;
+ struct mgmt_cp_resolve_name *mgmt_cp = (void *) data;
+ struct hci_cp_remote_name_req hci_cp;
+ struct hci_dev *hdev;
struct pending_cmd *cmd;
int err;
BT_DBG("");
- hci_dev_lock(hdev);
+ if (len != sizeof(*mgmt_cp))
+ return cmd_status(sk, index, MGMT_OP_RESOLVE_NAME, EINVAL);
- memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_RESOLVE_NAME, ENODEV);
- if (!hdev_is_powered(hdev)) {
- memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+ hci_dev_lock_bh(hdev);
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
- data, len);
- if (err < 0)
- goto failed;
-
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
- sk);
-
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_RESOLVE_NAME, index, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- err = update_name(hdev, cp->name);
+ memset(&hci_cp, 0, sizeof(hci_cp));
+ bacpy(&hci_cp.bdaddr, &mgmt_cp->bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(hci_cp),
+ &hci_cp);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 data_len)
+static int set_connection_params(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
{
+ struct mgmt_cp_set_connection_params *cp = (void *) data;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ int err;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS,
+ ENOTCONN);
+ goto failed;
+ }
+
+ hci_le_conn_update(conn, le16_to_cpu(cp->interval_min),
+ le16_to_cpu(cp->interval_max),
+ le16_to_cpu(cp->slave_latency),
+ le16_to_cpu(cp->timeout_multiplier));
+
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS, 0);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_rssi_reporter(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
+{
+ struct mgmt_cp_set_rssi_reporter *cp = (void *) data;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ int err = 0;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
+
+ if (!conn) {
+ err = cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER,
+ ENOTCONN);
+ goto failed;
+ }
+
+ BT_DBG("updateOnThreshExceed %d ", cp->updateOnThreshExceed);
+ hci_conn_set_rssi_reporter(conn, cp->rssi_threshold,
+ __le16_to_cpu(cp->interval), cp->updateOnThreshExceed);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int unset_rssi_reporter(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
+{
+ struct mgmt_cp_unset_rssi_reporter *cp = (void *) data;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ int err = 0;
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
+
+ if (!conn) {
+ err = cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER,
+ ENOTCONN);
+ goto failed;
+ }
+
+ hci_conn_unset_rssi_reporter(conn);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
+ struct hci_cp_write_local_name hci_cp;
+ struct hci_dev *hdev;
struct pending_cmd *cmd;
int err;
- BT_DBG("%s", hdev->name);
+ BT_DBG("");
- hci_dev_lock(hdev);
+ if (len != sizeof(*mgmt_cp))
+ return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_NOT_POWERED);
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
+ &hci_cp);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static void discovery_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct mgmt_mode ev;
+
+ BT_DBG("");
+ if (cmd->opcode == MGMT_OP_START_DISCOVERY) {
+ ev.val = 1;
+ cmd_status(cmd->sk, cmd->index, MGMT_OP_START_DISCOVERY, 0);
+ } else {
+ ev.val = 0;
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_STOP_DISCOVERY,
+ NULL, 0);
+ if (cmd->opcode == MGMT_OP_STOP_DISCOVERY) {
+ struct hci_dev *hdev = hci_dev_get(cmd->index);
+ if (hdev) {
+ del_timer(&hdev->disco_le_timer);
+ del_timer(&hdev->disco_timer);
+ hci_dev_put(hdev);
+ }
+ }
+ }
+
+ mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL);
+
+ list_del(&cmd->list);
+
+ mgmt_pending_free(cmd);
+}
+
+void mgmt_inquiry_started(u16 index)
+{
+ BT_DBG("");
+ mgmt_pending_foreach(MGMT_OP_START_DISCOVERY, index,
+ discovery_rsp, NULL);
+}
+
+void mgmt_inquiry_complete_evt(u16 index, u8 status)
+{
+ struct hci_dev *hdev;
+ struct hci_cp_le_set_scan_enable le_cp = {1, 0};
+ struct mgmt_mode cp = {0};
+ int err = -1;
+
+ BT_DBG("");
+
+ hdev = hci_dev_get(index);
+
+ if (!hdev || !lmp_le_capable(hdev)) {
+
+ mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
+ discovery_terminated, NULL);
+
+ mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL);
+
+ if (hdev)
+ goto done;
+ else
+ return;
+ }
+
+ if (hdev->disco_state != SCAN_IDLE) {
+ err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(le_cp), &le_cp);
+ if (err >= 0) {
+ mod_timer(&hdev->disco_le_timer, jiffies +
+ msecs_to_jiffies(hdev->disco_int_phase * 1000));
+ hdev->disco_state = SCAN_LE;
+ } else
+ hdev->disco_state = SCAN_IDLE;
+ }
+
+ if (hdev->disco_state == SCAN_IDLE)
+ mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL);
+
+ if (err < 0)
+ mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
+ discovery_terminated, NULL);
+
+done:
+ hci_dev_put(hdev);
+}
+
+void mgmt_disco_timeout(unsigned long data)
+{
+ struct hci_dev *hdev = (void *) data;
+ struct pending_cmd *cmd;
+ struct mgmt_mode cp = {0};
+
+ BT_DBG("hci%d", hdev->id);
+
+ hdev = hci_dev_get(hdev->id);
+
+ if (!hdev)
+ return;
+
+ hci_dev_lock_bh(hdev);
+ del_timer(&hdev->disco_le_timer);
+
+ if (hdev->disco_state != SCAN_IDLE) {
+ struct hci_cp_le_set_scan_enable le_cp = {0, 0};
+
+ if (test_bit(HCI_UP, &hdev->flags)) {
+ if (hdev->disco_state == SCAN_LE)
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(le_cp), &le_cp);
+ else
+ hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0,
+ NULL);
+ }
+ hdev->disco_state = SCAN_IDLE;
+ }
+
+ mgmt_event(MGMT_EV_DISCOVERING, hdev->id, &cp, sizeof(cp), NULL);
+
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev->id);
+ if (cmd)
+ mgmt_pending_remove(cmd);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+}
+
+void mgmt_disco_le_timeout(unsigned long data)
+{
+ struct hci_dev *hdev = (void *)data;
+ struct hci_cp_le_set_scan_enable le_cp = {0, 0};
+
+ BT_DBG("hci%d", hdev->id);
+
+ hdev = hci_dev_get(hdev->id);
+
+ if (!hdev)
+ return;
+
+ hci_dev_lock_bh(hdev);
+
+ if (test_bit(HCI_UP, &hdev->flags)) {
+ if (hdev->disco_state == SCAN_LE)
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(le_cp), &le_cp);
+
+ /* re-start BR scan */
+ if (hdev->disco_state != SCAN_IDLE) {
+ struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 4, 0};
+ hdev->disco_int_phase *= 2;
+ hdev->disco_int_count = 0;
+ cp.num_rsp = (u8) hdev->disco_int_phase;
+ hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ hdev->disco_state = SCAN_BR;
+ }
+ }
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+}
+
+static int start_discovery(struct sock *sk, u16 index)
+{
+ struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 8, 0};
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("");
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (hdev->disco_state && timer_pending(&hdev->disco_timer)) {
+ err = -EBUSY;
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* If LE Capable, we will alternate between BR/EDR and LE */
+ if (lmp_le_capable(hdev)) {
+ struct hci_cp_le_set_scan_parameters le_cp;
+
+ /* Shorten BR scan params */
+ cp.num_rsp = 1;
+ cp.length /= 2;
+
+ /* Setup LE scan params */
+ memset(&le_cp, 0, sizeof(le_cp));
+ le_cp.type = 0x01; /* Active scanning */
+ /* The recommended value for scan interval and window is
+ * 11.25 msec. It is calculated by: time = n * 0.625 msec */
+ le_cp.interval = cpu_to_le16(0x0012);
+ le_cp.window = cpu_to_le16(0x0012);
+ le_cp.own_bdaddr_type = 0; /* Public address */
+ le_cp.filter = 0; /* Accept all adv packets */
+
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAMETERS,
+ sizeof(le_cp), &le_cp);
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+ else if (lmp_le_capable(hdev)) {
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
+ if (!cmd)
+ mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index,
+ NULL, 0);
+ hdev->disco_int_phase = 1;
+ hdev->disco_int_count = 0;
+ hdev->disco_state = SCAN_BR;
+ del_timer(&hdev->disco_le_timer);
+ del_timer(&hdev->disco_timer);
+ mod_timer(&hdev->disco_timer,
+ jiffies + msecs_to_jiffies(20000));
+ }
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ if (err < 0)
+ return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, -err);
+
+ return err;
+}
+
+static int stop_discovery(struct sock *sk, u16 index)
+{
+ struct hci_cp_le_set_scan_enable le_cp = {0, 0};
+ struct mgmt_mode mode_cp = {0};
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd = NULL;
+ int err = -EPERM;
+ u8 state;
+
+ BT_DBG("");
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ state = hdev->disco_state;
+ hdev->disco_state = SCAN_IDLE;
+ del_timer(&hdev->disco_le_timer);
+ del_timer(&hdev->disco_timer);
+
+ if (state == SCAN_LE) {
+ err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(le_cp), &le_cp);
+ if (err >= 0) {
+ mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
+ discovery_terminated, NULL);
+
+ err = cmd_complete(sk, index, MGMT_OP_STOP_DISCOVERY,
+ NULL, 0);
+ }
+ }
+
+ if (err < 0)
+ err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
+ if (err < 0 && cmd)
+ mgmt_pending_remove(cmd);
+
+ mgmt_event(MGMT_EV_DISCOVERING, index, &mode_cp, sizeof(mode_cp), NULL);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ if (err < 0)
+ return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, -err);
+ else
+ return err;
+}
+
+static int read_local_oob_data(struct sock *sk, u16 index)
+{
+ struct hci_dev *hdev;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("hci%u", index);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ ENETDOWN);
goto unlock;
}
if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_NOT_SUPPORTED);
+ err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ EOPNOTSUPP);
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_BUSY);
+ if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
+ err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -2191,473 +2252,85 @@
mgmt_pending_remove(cmd);
unlock:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
return err;
}
-static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
+static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
- struct mgmt_cp_add_remote_oob_data *cp = data;
- u8 status;
+ struct hci_dev *hdev;
+ struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
int err;
- BT_DBG("%s ", hdev->name);
+ BT_DBG("hci%u ", index);
- hci_dev_lock(hdev);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ EINVAL);
- if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_NOT_POWERED, &cp->addr,
- sizeof(cp->addr));
- goto unlock;
- }
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ ENODEV);
- err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
- cp->randomizer);
+ hci_dev_lock_bh(hdev);
+
+ err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
+ cp->randomizer);
if (err < 0)
- status = MGMT_STATUS_FAILED;
+ err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
else
- status = 0;
+ err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
+ 0);
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
- &cp->addr, sizeof(cp->addr));
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
-unlock:
- hci_dev_unlock(hdev);
return err;
}
-static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
+static int remove_remote_oob_data(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
{
- struct mgmt_cp_remove_remote_oob_data *cp = data;
- u8 status;
+ struct hci_dev *hdev;
+ struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
int err;
- BT_DBG("%s", hdev->name);
+ BT_DBG("hci%u ", index);
- hci_dev_lock(hdev);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ EINVAL);
- if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- MGMT_STATUS_NOT_POWERED, &cp->addr,
- sizeof(cp->addr));
- goto unlock;
- }
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ ENODEV);
- err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
+ hci_dev_lock_bh(hdev);
+
+ err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
if (err < 0)
- status = MGMT_STATUS_INVALID_PARAMS;
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ -err);
else
- status = 0;
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ NULL, 0);
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- status, &cp->addr, sizeof(cp->addr));
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
-}
-
-int mgmt_interleaved_discovery(struct hci_dev *hdev)
-{
- int err;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
- if (err < 0)
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
return err;
}
-static int start_discovery(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
-{
- struct mgmt_cp_start_discovery *cp = data;
- struct pending_cmd *cmd;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_POWERED);
- goto failed;
- }
-
- if (hdev->discovery.state != DISCOVERY_STOPPED) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- hdev->discovery.type = cp->type;
-
- switch (hdev->discovery.type) {
- case DISCOV_TYPE_BREDR:
- if (lmp_bredr_capable(hdev))
- err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
- else
- err = -ENOTSUPP;
- break;
-
- case DISCOV_TYPE_LE:
- if (lmp_host_le_capable(hdev))
- err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
- LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
- else
- err = -ENOTSUPP;
- break;
-
- case DISCOV_TYPE_INTERLEAVED:
- if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
- err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
- LE_SCAN_WIN,
- LE_SCAN_TIMEOUT_BREDR_LE);
- else
- err = -ENOTSUPP;
- break;
-
- default:
- err = -EINVAL;
- }
-
- if (err < 0)
- mgmt_pending_remove(cmd);
- else
- hci_discovery_set_state(hdev, DISCOVERY_STARTING);
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_stop_discovery *mgmt_cp = data;
- struct pending_cmd *cmd;
- struct hci_cp_remote_name_req_cancel cp;
- struct inquiry_entry *e;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!hci_discovery_active(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
- MGMT_STATUS_REJECTED, &mgmt_cp->type,
- sizeof(mgmt_cp->type));
- goto unlock;
- }
-
- if (hdev->discovery.type != mgmt_cp->type) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
- sizeof(mgmt_cp->type));
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
-
- if (hdev->discovery.state == DISCOVERY_FINDING) {
- err = hci_cancel_inquiry(hdev);
- if (err < 0)
- mgmt_pending_remove(cmd);
- else
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- goto unlock;
- }
-
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
- if (!e) {
- mgmt_pending_remove(cmd);
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
- }
-
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
- if (err < 0)
- mgmt_pending_remove(cmd);
- else
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_confirm_name *cp = data;
- struct inquiry_entry *e;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- if (!hci_discovery_active(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
- MGMT_STATUS_FAILED);
- goto failed;
- }
-
- e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
- if (!e) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
- MGMT_STATUS_INVALID_PARAMS);
- goto failed;
- }
-
- if (cp->name_known) {
- e->name_state = NAME_KNOWN;
- list_del(&e->list);
- } else {
- e->name_state = NAME_NEEDED;
- hci_inquiry_cache_update_resolve(hdev, e);
- }
-
- err = 0;
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_block_device *cp = data;
- u8 status;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
- if (err < 0)
- status = MGMT_STATUS_FAILED;
- else
- status = 0;
-
- err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
- &cp->addr, sizeof(cp->addr));
-
- hci_dev_unlock(hdev);
-
- return err;
-}
-
-static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct mgmt_cp_unblock_device *cp = data;
- u8 status;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- hci_dev_lock(hdev);
-
- err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
- if (err < 0)
- status = MGMT_STATUS_INVALID_PARAMS;
- else
- status = 0;
-
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
- &cp->addr, sizeof(cp->addr));
-
- hci_dev_unlock(hdev);
-
- return err;
-}
-
-static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
-{
- struct mgmt_mode *cp = data;
- struct hci_cp_write_page_scan_activity acp;
- u8 type;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- if (!hdev_is_powered(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_NOT_POWERED);
-
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_REJECTED);
-
- hci_dev_lock(hdev);
-
- if (cp->val) {
- type = PAGE_SCAN_TYPE_INTERLACED;
-
- /* 22.5 msec page scan interval */
- acp.interval = __constant_cpu_to_le16(0x0024);
- } else {
- type = PAGE_SCAN_TYPE_STANDARD; /* default */
-
- /* default 1.28 sec page scan */
- acp.interval = __constant_cpu_to_le16(0x0800);
- }
-
- /* default 11.25 msec page scan window */
- acp.window = __constant_cpu_to_le16(0x0012);
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
- &acp);
- if (err < 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
- goto done;
- }
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
- if (err < 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
- goto done;
- }
-
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
- NULL, 0);
-done:
- hci_dev_unlock(hdev);
- return err;
-}
-
-static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
- void *cp_data, u16 len)
-{
- struct mgmt_cp_load_long_term_keys *cp = cp_data;
- u16 key_count, expected_len;
- int i;
-
- key_count = get_unaligned_le16(&cp->key_count);
-
- expected_len = sizeof(*cp) + key_count *
- sizeof(struct mgmt_ltk_info);
- if (expected_len != len) {
- BT_ERR("load_keys: expected %u bytes, got %u bytes",
- len, expected_len);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
- EINVAL);
- }
-
- BT_DBG("%s key_count %u", hdev->name, key_count);
-
- hci_dev_lock(hdev);
-
- hci_smp_ltks_clear(hdev);
-
- for (i = 0; i < key_count; i++) {
- struct mgmt_ltk_info *key = &cp->keys[i];
- u8 type;
-
- if (key->master)
- type = HCI_SMP_LTK;
- else
- type = HCI_SMP_LTK_SLAVE;
-
- hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
- type, 0, key->authenticated, key->val,
- key->enc_size, key->ediv, key->rand);
- }
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-struct mgmt_handler {
- int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
- u16 data_len);
- bool var_len;
- size_t data_len;
-} mgmt_handlers[] = {
- { NULL }, /* 0x0000 (no command) */
- { read_version, false, MGMT_READ_VERSION_SIZE },
- { read_commands, false, MGMT_READ_COMMANDS_SIZE },
- { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
- { read_controller_info, false, MGMT_READ_INFO_SIZE },
- { set_powered, false, MGMT_SETTING_SIZE },
- { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
- { set_connectable, false, MGMT_SETTING_SIZE },
- { set_fast_connectable, false, MGMT_SETTING_SIZE },
- { set_pairable, false, MGMT_SETTING_SIZE },
- { set_link_security, false, MGMT_SETTING_SIZE },
- { set_ssp, false, MGMT_SETTING_SIZE },
- { set_hs, false, MGMT_SETTING_SIZE },
- { set_le, false, MGMT_SETTING_SIZE },
- { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
- { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
- { add_uuid, false, MGMT_ADD_UUID_SIZE },
- { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
- { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
- { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
- { disconnect, false, MGMT_DISCONNECT_SIZE },
- { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
- { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
- { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
- { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
- { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
- { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
- { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
- { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
- { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
- { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
- { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
- { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
- { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
- { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
- { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
- { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
- { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
- { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
- { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
-};
-
-
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
- void *buf;
- u8 *cp;
+ unsigned char *buf;
struct mgmt_hdr *hdr;
u16 opcode, index, len;
- struct hci_dev *hdev = NULL;
- struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2674,7 +2347,7 @@
goto done;
}
- hdr = buf;
+ hdr = (struct mgmt_hdr *) buf;
opcode = get_unaligned_le16(&hdr->opcode);
index = get_unaligned_le16(&hdr->index);
len = get_unaligned_le16(&hdr->len);
@@ -2684,54 +2357,122 @@
goto done;
}
- if (index != MGMT_INDEX_NONE) {
- hdev = hci_dev_get(index);
- if (!hdev) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
- }
- }
+ BT_DBG("got opcode %x", opcode);
+ switch (opcode) {
+ case MGMT_OP_READ_VERSION:
+ err = read_version(sk);
+ break;
+ case MGMT_OP_READ_INDEX_LIST:
+ err = read_index_list(sk);
+ break;
+ case MGMT_OP_READ_INFO:
+ err = read_controller_info(sk, index);
+ break;
+ case MGMT_OP_SET_POWERED:
+ err = set_powered(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_DISCOVERABLE:
+ err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_LIMIT_DISCOVERABLE:
+ err = set_limited_discoverable(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
+ case MGMT_OP_SET_CONNECTABLE:
+ err = set_connectable(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_PAIRABLE:
+ err = set_pairable(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_ADD_UUID:
+ err = add_uuid(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_UUID:
+ err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_DEV_CLASS:
+ err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_SERVICE_CACHE:
+ err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_LOAD_KEYS:
+ err = load_keys(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_KEY:
+ err = remove_key(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_DISCONNECT:
+ err = disconnect(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_GET_CONNECTIONS:
+ err = get_connections(sk, index);
+ break;
+ case MGMT_OP_PIN_CODE_REPLY:
+ err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_PIN_CODE_NEG_REPLY:
+ err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_IO_CAPABILITY:
+ err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_PAIR_DEVICE:
+ err = pair_device(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_USER_CONFIRM_REPLY:
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ err = user_confirm_reply(sk, index, buf + sizeof(*hdr),
+ len, opcode);
+ break;
+ case MGMT_OP_SET_LOCAL_NAME:
+ err = set_local_name(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_START_DISCOVERY:
+ err = start_discovery(sk, index);
+ break;
+ case MGMT_OP_STOP_DISCOVERY:
+ err = stop_discovery(sk, index);
+ break;
+ case MGMT_OP_RESOLVE_NAME:
+ err = resolve_name(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_CONNECTION_PARAMS:
+ err = set_connection_params(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_RSSI_REPORTER:
+ err = set_rssi_reporter(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_UNSET_RSSI_REPORTER:
+ err = unset_rssi_reporter(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_READ_LOCAL_OOB_DATA:
+ err = read_local_oob_data(sk, index);
+ break;
+ case MGMT_OP_ADD_REMOTE_OOB_DATA:
+ err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
+ err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
+ case MGMT_OP_ENCRYPT_LINK:
+ err = encrypt_link(sk, index, buf + sizeof(*hdr), len);
+ break;
- if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
- mgmt_handlers[opcode].func == NULL) {
+ default:
BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_UNKNOWN_COMMAND);
- goto done;
+ err = cmd_status(sk, index, opcode, 0x01);
+ break;
}
- if ((hdev && opcode < MGMT_OP_READ_INFO) ||
- (!hdev && opcode >= MGMT_OP_READ_INFO)) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
- }
-
- handler = &mgmt_handlers[opcode];
-
- if ((handler->var_len && len < handler->data_len) ||
- (!handler->var_len && len != handler->data_len)) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_PARAMS);
- goto done;
- }
-
- if (hdev)
- mgmt_init_hdev(sk, hdev);
-
- cp = buf + sizeof(*hdr);
-
- err = handler->func(sk, hdev, cp, len);
if (err < 0)
goto done;
err = msglen;
done:
- if (hdev)
- hci_dev_put(hdev);
-
kfree(buf);
return err;
}
@@ -2744,31 +2485,37 @@
mgmt_pending_remove(cmd);
}
-int mgmt_index_added(struct hci_dev *hdev)
+int mgmt_index_added(u16 index)
{
- return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+ BT_DBG("%d", index);
+ return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
}
-int mgmt_index_removed(struct hci_dev *hdev)
+int mgmt_index_removed(u16 index)
{
- u8 status = MGMT_STATUS_INVALID_INDEX;
+ u8 status = ENODEV;
- mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+ BT_DBG("%d", index);
- return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
+ mgmt_pending_foreach(0, index, cmd_status_rsp, &status);
+
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
}
struct cmd_lookup {
+ u8 val;
struct sock *sk;
- struct hci_dev *hdev;
- u8 mgmt_status;
};
-static void settings_rsp(struct pending_cmd *cmd, void *data)
+static void mode_rsp(struct pending_cmd *cmd, void *data)
{
+ struct mgmt_mode *cp = cmd->param;
struct cmd_lookup *match = data;
- send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
+ if (cp->val != match->val)
+ return;
+
+ send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
list_del(&cmd->list);
@@ -2780,174 +2527,104 @@
mgmt_pending_free(cmd);
}
-int mgmt_powered(struct hci_dev *hdev, u8 powered)
+int mgmt_powered(u16 index, u8 powered)
{
- struct cmd_lookup match = { NULL, hdev };
- int err;
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { powered, NULL };
+ int ret;
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
- return 0;
+ BT_DBG("hci%u %d", index, powered);
- mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
- if (powered) {
- u8 scan = 0;
-
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- scan |= SCAN_PAGE;
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- scan |= SCAN_INQUIRY;
-
- if (scan)
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-
- update_class(hdev);
- update_name(hdev, hdev->dev_name);
- update_eir(hdev);
- } else {
- u8 status = MGMT_STATUS_NOT_POWERED;
- mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+ if (!powered) {
+ u8 status = ENETDOWN;
+ mgmt_pending_foreach(0, index, cmd_status_rsp, &status);
}
- err = new_settings(hdev, match.sk);
+ ev.val = powered;
+
+ ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
if (match.sk)
sock_put(match.sk);
- return err;
+ return ret;
}
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
+int mgmt_discoverable(u16 index, u8 discoverable)
{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { discoverable, NULL };
+ int ret;
- if (discoverable) {
- if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- changed = true;
- }
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
- &match);
+ ev.val = discoverable;
- if (changed)
- err = new_settings(hdev, match.sk);
+ ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+ match.sk);
if (match.sk)
sock_put(match.sk);
- return err;
+ return ret;
}
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+int mgmt_connectable(u16 index, u8 connectable)
{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { connectable, NULL };
+ int ret;
- if (connectable) {
- if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- changed = true;
- }
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
- mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
- &match);
+ ev.val = connectable;
- if (changed)
- err = new_settings(hdev, match.sk);
+ ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
if (match.sk)
sock_put(match.sk);
+ return ret;
+}
+
+int mgmt_new_key(u16 index, struct link_key *key, u8 bonded)
+{
+ struct mgmt_ev_new_key *ev;
+ int err, total;
+
+ total = sizeof(struct mgmt_ev_new_key) + key->dlen;
+ ev = kzalloc(total, GFP_ATOMIC);
+ if (!ev)
+ return -ENOMEM;
+
+ bacpy(&ev->key.bdaddr, &key->bdaddr);
+ ev->key.addr_type = key->addr_type;
+ ev->key.key_type = key->key_type;
+ memcpy(ev->key.val, key->val, 16);
+ ev->key.pin_len = key->pin_len;
+ ev->key.auth = key->auth;
+ ev->store_hint = bonded;
+ ev->key.dlen = key->dlen;
+
+ memcpy(ev->key.data, key->data, key->dlen);
+
+ err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
+
+ kfree(ev);
+
return err;
}
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le)
{
- u8 mgmt_err = mgmt_status(status);
+ struct mgmt_ev_connected ev;
- if (scan & SCAN_PAGE)
- mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
- cmd_status_rsp, &mgmt_err);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.le = le;
- if (scan & SCAN_INQUIRY)
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
- cmd_status_rsp, &mgmt_err);
-
- return 0;
-}
-
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
-{
- struct mgmt_ev_new_link_key ev;
-
- memset(&ev, 0, sizeof(ev));
-
- ev.store_hint = persistent;
- bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = MGMT_ADDR_BREDR;
- ev.key.type = key->type;
- memcpy(ev.key.val, key->val, 16);
- ev.key.pin_len = key->pin_len;
-
- return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
-}
-
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
-{
- struct mgmt_ev_new_long_term_key ev;
-
- memset(&ev, 0, sizeof(ev));
-
- ev.store_hint = persistent;
- bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = key->bdaddr_type;
- ev.key.authenticated = key->authenticated;
- ev.key.enc_size = key->enc_size;
- ev.key.ediv = key->ediv;
-
- if (key->type == HCI_SMP_LTK)
- ev.key.master = 1;
-
- memcpy(ev.key.rand, key->rand, sizeof(key->rand));
- memcpy(ev.key.val, key->val, sizeof(key->val));
-
- return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
- NULL);
-}
-
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u32 flags, u8 *name, u8 name_len,
- u8 *dev_class)
-{
- char buf[512];
- struct mgmt_ev_device_connected *ev = (void *) buf;
- u16 eir_len = 0;
-
- bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
-
- ev->flags = __cpu_to_le32(flags);
-
- if (name_len > 0)
- eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
- name, name_len);
-
- if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
- eir_len = eir_append_data(ev->eir, eir_len,
- EIR_CLASS_OF_DEV, dev_class, 3);
-
- put_unaligned_le16(eir_len, &ev->eir_len);
-
- return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
- sizeof(*ev) + eir_len, NULL);
+ return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2956,11 +2633,9 @@
struct sock **sk = data;
struct mgmt_rp_disconnect rp;
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
+ bacpy(&rp.bdaddr, &cp->bdaddr);
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
- sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
*sk = cmd->sk;
sock_hold(*sk);
@@ -2968,402 +2643,243 @@
mgmt_pending_remove(cmd);
}
-static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
{
- struct hci_dev *hdev = data;
- struct mgmt_cp_unpair_device *cp = cmd->param;
- struct mgmt_rp_unpair_device rp;
-
- memset(&rp, 0, sizeof(rp));
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
-
- device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
-
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
-
- mgmt_pending_remove(cmd);
-}
-
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type)
-{
- struct mgmt_addr_info ev;
+ struct mgmt_ev_disconnected ev;
struct sock *sk = NULL;
int err;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
- ev.type = link_to_mgmt(link_type, addr_type);
- err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
- sk);
+ err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
if (sk)
- sock_put(sk);
-
- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
+ sock_put(sk);
return err;
}
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status)
+int mgmt_disconnect_failed(u16 index)
{
- struct mgmt_rp_disconnect rp;
struct pending_cmd *cmd;
int err;
- cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
if (!cmd)
return -ENOENT;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
-
- err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
- mgmt_status(status), &rp, sizeof(rp));
+ err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
mgmt_pending_remove(cmd);
- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
return err;
}
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
{
struct mgmt_ev_connect_failed ev;
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
- ev.status = mgmt_status(status);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
- return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
{
struct mgmt_ev_pin_code_request ev;
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = MGMT_ADDR_BREDR;
- ev.secure = secure;
+ BT_DBG("hci%u", index);
- return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.secure = 0;
+
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+ NULL);
}
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
if (!cmd)
return -ENOENT;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- mgmt_status(status), &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+ sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
if (!cmd)
return -ENOENT;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
- mgmt_status(status), &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+ sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, __le32 value,
- u8 confirm_hint)
+int mgmt_user_confirm_request(u16 index, u8 event,
+ bdaddr_t *bdaddr, __le32 value)
{
struct mgmt_ev_user_confirm_request ev;
+ struct hci_conn *conn = NULL;
+ struct hci_dev *hdev;
+ u8 loc_cap, rem_cap, loc_mitm, rem_mitm;
- BT_DBG("%s", hdev->name);
+ BT_DBG("hci%u", index);
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
- ev.confirm_hint = confirm_hint;
+ hdev = hci_dev_get(index);
+
+ if (!hdev)
+ return -ENODEV;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+
+ ev.auto_confirm = 0;
+
+ if (!conn || event != HCI_EV_USER_CONFIRM_REQUEST)
+ goto no_auto_confirm;
+
+ loc_cap = (conn->io_capability == 0x04) ? 0x01 : conn->io_capability;
+ rem_cap = conn->remote_cap;
+ loc_mitm = conn->auth_type & 0x01;
+ rem_mitm = conn->remote_auth & 0x01;
+
+ if ((conn->auth_type & HCI_AT_DEDICATED_BONDING) &&
+ conn->auth_initiator && rem_cap == 0x03)
+ ev.auto_confirm = 1;
+ else if (loc_cap == 0x01 && (rem_cap == 0x00 || rem_cap == 0x03)) {
+ if (!loc_mitm && !rem_mitm)
+ value = 0;
+ goto no_auto_confirm;
+ }
+
+
+ if ((!loc_mitm || rem_cap == 0x03) && (!rem_mitm || loc_cap == 0x03))
+ ev.auto_confirm = 1;
+
+no_auto_confirm:
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.event = event;
put_unaligned_le32(value, &ev.value);
- return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ hci_dev_put(hdev);
+
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+ NULL);
}
-int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type)
+int mgmt_user_passkey_request(u16 index, bdaddr_t *bdaddr)
{
struct mgmt_ev_user_passkey_request ev;
- BT_DBG("%s", hdev->name);
+ BT_DBG("hci%u", index);
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, index, &ev, sizeof(ev),
+ NULL);
}
-static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status,
- u8 opcode)
+static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
+ u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
int err;
- cmd = mgmt_pending_find(opcode, hdev);
+ cmd = mgmt_pending_find(opcode, index);
if (!cmd)
return -ENOENT;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
- err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
- &rp, sizeof(rp));
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
+ err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status)
+int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
{
- return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_CONFIRM_REPLY);
+ return confirm_reply_complete(index, bdaddr, status,
+ MGMT_OP_USER_CONFIRM_REPLY);
}
-int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status)
+int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
{
- return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
+ return confirm_reply_complete(index, bdaddr, status,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
-int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status)
-{
- return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_PASSKEY_REPLY);
-}
-
-int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status)
-{
- return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
-}
-
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
{
struct mgmt_ev_auth_failed ev;
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
- ev.status = mgmt_status(status);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
- return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
}
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
-{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
- mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
- cmd_status_rsp, &mgmt_err);
- return 0;
- }
-
- if (test_bit(HCI_AUTH, &hdev->flags)) {
- if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
- changed = true;
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
- &match);
-
- if (changed)
- err = new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- return err;
-}
-
-static int clear_eir(struct hci_dev *hdev)
-{
- struct hci_cp_write_eir cp;
-
- if (!(hdev->features[6] & LMP_EXT_INQ))
- return 0;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
-
- memset(&cp, 0, sizeof(cp));
-
- return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
-}
-
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
-
- if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
- &hdev->dev_flags))
- err = new_settings(hdev, NULL);
-
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
- &mgmt_err);
-
- return err;
- }
-
- if (enable) {
- if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- changed = true;
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
-
- if (changed)
- err = new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- update_eir(hdev);
- else
- clear_eir(hdev);
-
- return err;
-}
-
-static void class_rsp(struct pending_cmd *cmd, void *data)
-{
- struct cmd_lookup *match = data;
-
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
- match->hdev->dev_class, 3);
-
- list_del(&cmd->list);
-
- if (match->sk == NULL) {
- match->sk = cmd->sk;
- sock_hold(match->sk);
- }
-
- mgmt_pending_free(cmd);
-}
-
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
- u8 status)
-{
- struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
- int err = 0;
-
- clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
-
- mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
- mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
- mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
-
- if (!status)
- err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
- 3, NULL);
-
- if (match.sk)
- sock_put(match.sk);
-
- return err;
-}
-
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
+int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
{
struct pending_cmd *cmd;
+ struct hci_dev *hdev;
struct mgmt_cp_set_local_name ev;
- bool changed = false;
- int err = 0;
-
- if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
- memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
- changed = true;
- }
+ int err;
memset(&ev, 0, sizeof(ev));
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
if (!cmd)
goto send_event;
- /* Always assume that either the short or the complete name has
- * changed if there was a pending mgmt command */
- changed = true;
-
if (status) {
- err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
- mgmt_status(status));
+ err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
goto failed;
}
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
- sizeof(ev));
+ hdev = hci_dev_get(index);
+ if (hdev) {
+ update_eir(hdev);
+ hci_dev_put(hdev);
+ }
+
+ err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
+ sizeof(ev));
if (err < 0)
goto failed;
send_event:
- if (changed)
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
- sizeof(ev), cmd ? cmd->sk : NULL);
-
- update_eir(hdev);
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
failed:
if (cmd)
@@ -3371,30 +2887,29 @@
return err;
}
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
- u8 *randomizer, u8 status)
+int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
+ u8 status)
{
struct pending_cmd *cmd;
int err;
- BT_DBG("%s status %u", hdev->name, status);
+ BT_DBG("hci%u status %u", index, status);
- cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
+ cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
if (!cmd)
return -ENOENT;
if (status) {
- err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- mgmt_status(status));
+ err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ EIO);
} else {
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash, sizeof(rp.hash));
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
- err = cmd_complete(cmd->sk, hdev->id,
- MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
- sizeof(rp));
+ err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ &rp, sizeof(rp));
}
mgmt_pending_remove(cmd);
@@ -3402,198 +2917,182 @@
return err;
}
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+void mgmt_read_rssi_complete(u16 index, s8 rssi, bdaddr_t *bdaddr,
+ u16 handle, u8 status)
{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
+ struct mgmt_ev_rssi_update ev;
+ struct hci_conn *conn;
+ struct hci_dev *hdev;
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (status)
+ return;
- if (enable && test_and_clear_bit(HCI_LE_ENABLED,
- &hdev->dev_flags))
- err = new_settings(hdev, NULL);
+ hdev = hci_dev_get(index);
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
- cmd_status_rsp, &mgmt_err);
+ if (!conn)
+ return;
- return err;
- }
+ BT_DBG("rssi_update_thresh_exceed : %d ",
+ conn->rssi_update_thresh_exceed);
+ BT_DBG("RSSI Threshold : %d , recvd RSSI : %d ",
+ conn->rssi_threshold, rssi);
- if (enable) {
- if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- changed = true;
+ if (conn->rssi_update_thresh_exceed == 1) {
+ BT_DBG("rssi_update_thresh_exceed == 1");
+ if (rssi > conn->rssi_threshold) {
+ memset(&ev, 0, sizeof(ev));
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.rssi = rssi;
+ mgmt_event(MGMT_EV_RSSI_UPDATE, index, &ev,
+ sizeof(ev), NULL);
+ } else {
+ hci_conn_set_rssi_reporter(conn, conn->rssi_threshold,
+ conn->rssi_update_interval,
+ conn->rssi_update_thresh_exceed);
+ }
} else {
- if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- changed = true;
+ BT_DBG("rssi_update_thresh_exceed == 0");
+ if (rssi < conn->rssi_threshold) {
+ memset(&ev, 0, sizeof(ev));
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.rssi = rssi;
+ mgmt_event(MGMT_EV_RSSI_UPDATE, index, &ev,
+ sizeof(ev), NULL);
+ } else {
+ hci_conn_set_rssi_reporter(conn, conn->rssi_threshold,
+ conn->rssi_update_interval,
+ conn->rssi_update_thresh_exceed);
+ }
}
-
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
-
- if (changed)
- err = new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- return err;
}
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
- ssp, u8 *eir, u16 eir_len)
+
+int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 type, u8 le,
+ u8 *dev_class, s8 rssi, u8 eir_len, u8 *eir)
{
- char buf[512];
- struct mgmt_ev_device_found *ev = (void *) buf;
- size_t ev_size;
-
- /* Leave 5 bytes for a potential CoD field */
- if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
- return -EINVAL;
-
- memset(buf, 0, sizeof(buf));
-
- bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
- ev->rssi = rssi;
- if (cfm_name)
- ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
- if (!ssp)
- ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
-
- if (eir_len > 0)
- memcpy(ev->eir, eir, eir_len);
-
- if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
- eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
- dev_class, 3);
-
- put_unaligned_le16(eir_len, &ev->eir_len);
-
- ev_size = sizeof(*ev) + eir_len;
-
- return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
-}
-
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, s8 rssi, u8 *name, u8 name_len)
-{
- struct mgmt_ev_device_found *ev;
- char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
- u16 eir_len;
-
- ev = (struct mgmt_ev_device_found *) buf;
-
- memset(buf, 0, sizeof(buf));
-
- bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
- ev->rssi = rssi;
-
- eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
- name_len);
-
- put_unaligned_le16(eir_len, &ev->eir_len);
-
- return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
- sizeof(*ev) + eir_len, NULL);
-}
-
-int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
-{
- struct pending_cmd *cmd;
- u8 type;
+ struct mgmt_ev_device_found ev;
+ struct hci_dev *hdev;
int err;
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-
- cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
- if (!cmd)
- return -ENOENT;
-
- type = hdev->discovery.type;
-
- err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
- &type, sizeof(type));
- mgmt_pending_remove(cmd);
-
- return err;
-}
-
-int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
-{
- struct pending_cmd *cmd;
- int err;
-
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
- if (!cmd)
- return -ENOENT;
-
- err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
- &hdev->discovery.type, sizeof(hdev->discovery.type));
- mgmt_pending_remove(cmd);
-
- return err;
-}
-
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
-{
- struct mgmt_ev_discovering ev;
- struct pending_cmd *cmd;
-
- BT_DBG("%s discovering %u", hdev->name, discovering);
-
- if (discovering)
- cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
- else
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
-
- if (cmd != NULL) {
- u8 type = hdev->discovery.type;
-
- cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
- sizeof(type));
- mgmt_pending_remove(cmd);
- }
+ BT_DBG("le: %d", le);
memset(&ev, 0, sizeof(ev));
- ev.type = hdev->discovery.type;
- ev.discovering = discovering;
- return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.rssi = rssi;
+ ev.type = type;
+ ev.le = le;
+
+ if (dev_class)
+ memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
+
+ if (eir && eir_len)
+ memcpy(ev.eir, eir, eir_len);
+
+ err = mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+
+ if (err < 0)
+ return err;
+
+ hdev = hci_dev_get(index);
+
+ if (!hdev)
+ return 0;
+
+ if (hdev->disco_state == SCAN_IDLE)
+ goto done;
+
+ hdev->disco_int_count++;
+
+ if (hdev->disco_int_count >= hdev->disco_int_phase) {
+ /* Inquiry scan for General Discovery LAP */
+ struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 4, 0};
+ struct hci_cp_le_set_scan_enable le_cp = {0, 0};
+
+ hdev->disco_int_phase *= 2;
+ hdev->disco_int_count = 0;
+ if (hdev->disco_state == SCAN_LE) {
+ /* cancel LE scan */
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(le_cp), &le_cp);
+ /* start BR scan */
+ cp.num_rsp = (u8) hdev->disco_int_phase;
+ hci_send_cmd(hdev, HCI_OP_INQUIRY,
+ sizeof(cp), &cp);
+ hdev->disco_state = SCAN_BR;
+ del_timer_sync(&hdev->disco_le_timer);
+ }
+ }
+
+done:
+ hci_dev_put(hdev);
+ return 0;
}
-int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+
+int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 status, u8 *name)
{
- struct pending_cmd *cmd;
- struct mgmt_ev_device_blocked ev;
+ struct mgmt_ev_remote_name ev;
- cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
+ memset(&ev, 0, sizeof(ev));
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = type;
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
+ memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
}
-int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+int mgmt_encrypt_change(u16 index, bdaddr_t *bdaddr, u8 status)
{
- struct pending_cmd *cmd;
- struct mgmt_ev_device_unblocked ev;
+ struct mgmt_ev_encrypt_change ev;
- cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
+ BT_DBG("hci%u", index);
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = type;
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
- return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_ENCRYPT_CHANGE, index, &ev, sizeof(ev),
+ NULL);
}
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
+int mgmt_remote_class(u16 index, bdaddr_t *bdaddr, u8 dev_class[3])
+{
+ struct mgmt_ev_remote_class ev;
-module_param(enable_le, bool, 0644);
-MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
+ memset(&ev, 0, sizeof(ev));
+
+ bacpy(&ev.bdaddr, bdaddr);
+ memcpy(ev.dev_class, dev_class, 3);
+
+ return mgmt_event(MGMT_EV_REMOTE_CLASS, index, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_remote_version(u16 index, bdaddr_t *bdaddr, u8 ver, u16 mnf,
+ u16 sub_ver)
+{
+ struct mgmt_ev_remote_version ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.lmp_ver = ver;
+ ev.manufacturer = mnf;
+ ev.lmp_subver = sub_ver;
+
+ return mgmt_event(MGMT_EV_REMOTE_VERSION, index, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_remote_features(u16 index, bdaddr_t *bdaddr, u8 features[8])
+{
+ struct mgmt_ev_remote_features ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ bacpy(&ev.bdaddr, bdaddr);
+ memcpy(ev.features, features, sizeof(ev.features));
+
+ return mgmt_event(MGMT_EV_REMOTE_FEATURES, index, &ev, sizeof(ev),
+ NULL);
+}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 9652707..1390eca 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#include <net/sock.h>
#include <linux/uaccess.h>
@@ -50,6 +51,9 @@
#include <net/bluetooth/rfcomm.h>
#define VERSION "1.11"
+/* 1 Byte DLCI, 1 Byte Control filed, 2 Bytes Length, 1 Byte for Credits,
+ * 1 Byte FCS */
+#define RFCOMM_HDR_SIZE 6
static bool disable_cfc;
static bool l2cap_ertm;
@@ -62,6 +66,7 @@
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
+static unsigned long rfcomm_event;
static LIST_HEAD(session_list);
@@ -81,7 +86,9 @@
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
bdaddr_t *dst,
u8 sec_level,
- int *err);
+ int *err,
+ u8 channel,
+ struct rfcomm_dlc *d);
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
static void rfcomm_session_del(struct rfcomm_session *s);
@@ -115,10 +122,17 @@
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
+struct rfcomm_sock_release_work {
+ struct work_struct work;
+ struct socket *sock;
+ int state;
+};
+
static inline void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
+ set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
wake_up_process(rfcomm_thread);
}
@@ -230,8 +244,6 @@
static inline int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
-
__u8 auth_type;
switch (d->sec_level) {
@@ -246,7 +258,8 @@
break;
}
- return hci_conn_security(conn->hcon, d->sec_level, auth_type);
+ return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level,
+ auth_type);
}
static void rfcomm_session_timeout(unsigned long arg)
@@ -377,11 +390,13 @@
static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_dlc *d;
+ struct list_head *p;
- list_for_each_entry(d, &s->dlcs, list)
+ list_for_each(p, &s->dlcs) {
+ d = list_entry(p, struct rfcomm_dlc, list);
if (d->dlci == dlci)
return d;
-
+ }
return NULL;
}
@@ -402,31 +417,31 @@
s = rfcomm_session_get(src, dst);
if (!s) {
- s = rfcomm_session_create(src, dst, d->sec_level, &err);
+ s = rfcomm_session_create(src, dst,
+ d->sec_level, &err, channel, d);
if (!s)
return err;
+ } else {
+ dlci = __dlci(!s->initiator, channel);
+
+ /* Check if DLCI already exists */
+ if (rfcomm_dlc_get(s, dlci))
+ return -EBUSY;
+
+ rfcomm_dlc_clear_state(d);
+
+ d->dlci = dlci;
+ d->addr = __addr(s->initiator, dlci);
+ d->priority = 7;
+
+ d->state = BT_CONFIG;
+ rfcomm_dlc_link(s, d);
+
+ d->out = 1;
+
+ d->mtu = s->mtu;
+ d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
}
-
- dlci = __dlci(!s->initiator, channel);
-
- /* Check if DLCI already exists */
- if (rfcomm_dlc_get(s, dlci))
- return -EBUSY;
-
- rfcomm_dlc_clear_state(d);
-
- d->dlci = dlci;
- d->addr = __addr(s->initiator, dlci);
- d->priority = 7;
-
- d->state = BT_CONFIG;
- rfcomm_dlc_link(s, d);
-
- d->out = 1;
-
- d->mtu = s->mtu;
- d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
-
if (s->state == BT_CONNECTED) {
if (rfcomm_check_security(d))
rfcomm_send_pn(s, 1, d);
@@ -622,9 +637,25 @@
return s;
}
+static void rfcomm_sock_release_worker(struct work_struct *work)
+{
+ struct rfcomm_sock_release_work *release_work =
+ container_of(work, struct rfcomm_sock_release_work, work);
+
+ BT_DBG("sock %p", release_work->sock);
+
+ sock_release(release_work->sock);
+ if (release_work->state != BT_LISTEN)
+ module_put(THIS_MODULE);
+
+ kfree(release_work);
+}
+
static void rfcomm_session_del(struct rfcomm_session *s)
{
int state = s->state;
+ struct socket *sock = s->sock;
+ struct rfcomm_sock_release_work *release_work;
BT_DBG("session %p state %ld", s, s->state);
@@ -634,11 +665,19 @@
rfcomm_send_disc(s, 0);
rfcomm_session_clear_timer(s);
- sock_release(s->sock);
+
kfree(s);
- if (state != BT_LISTEN)
- module_put(THIS_MODULE);
+ release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC);
+ if (release_work) {
+ INIT_WORK(&release_work->work, rfcomm_sock_release_worker);
+ release_work->sock = sock;
+ release_work->state = state;
+
+ if (!schedule_work(&release_work->work))
+ kfree(release_work);
+ }
+
}
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
@@ -682,12 +721,15 @@
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
bdaddr_t *dst,
u8 sec_level,
- int *err)
+ int *err,
+ u8 channel,
+ struct rfcomm_dlc *d)
{
struct rfcomm_session *s = NULL;
struct sockaddr_l2 addr;
struct socket *sock;
struct sock *sk;
+ u8 dlci;
BT_DBG("%s %s", batostr(src), batostr(dst));
@@ -706,10 +748,10 @@
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->chan->imtu = l2cap_mtu;
- l2cap_pi(sk)->chan->sec_level = sec_level;
+ l2cap_pi(sk)->imtu = l2cap_mtu;
+ l2cap_pi(sk)->sec_level = sec_level;
if (l2cap_ertm)
- l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
+ l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
release_sock(sk);
s = rfcomm_session_add(sock, BT_BOUND);
@@ -724,11 +766,30 @@
addr.l2_family = AF_BLUETOOTH;
addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
addr.l2_cid = 0;
+ dlci = __dlci(!s->initiator, channel);
+
+ /* Check if DLCI already exists */
+ if (rfcomm_dlc_get(s, dlci))
+ return NULL;
+
+ rfcomm_dlc_clear_state(d);
+
+ d->dlci = dlci;
+ d->addr = __addr(s->initiator, dlci);
+ d->priority = 7;
+
+ d->state = BT_CONFIG;
+ rfcomm_dlc_link(s, d);
+
+ d->out = 1;
+
+ d->mtu = s->mtu;
+ d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
*err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
if (*err == 0 || *err == -EINPROGRESS)
return s;
-
- rfcomm_session_del(s);
+ BT_ERR("error ret is %d, going to delete session", *err);
+ rfcomm_dlc_unlink(d);
return NULL;
failed:
@@ -748,6 +809,7 @@
/* ---- RFCOMM frame sending ---- */
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
+ struct socket *sock = s->sock;
struct kvec iv = { data, len };
struct msghdr msg;
@@ -755,14 +817,7 @@
memset(&msg, 0, sizeof(msg));
- return kernel_sendmsg(s->sock, &msg, &iv, 1, len);
-}
-
-static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
-{
- BT_DBG("%p cmd %u", s, cmd->ctrl);
-
- return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd));
+ return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
@@ -776,7 +831,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_cmd(s, &cmd);
+ return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
}
static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -790,7 +845,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_cmd(s, &cmd);
+ return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
}
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -804,7 +859,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_cmd(s, &cmd);
+ return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
}
static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -840,7 +895,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_cmd(s, &cmd);
+ return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
}
static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -1163,18 +1218,12 @@
break;
case BT_DISCONN:
- /* rfcomm_session_put is called later so don't do
- * anything here otherwise we will mess up the session
- * reference counter:
- *
- * (a) when we are the initiator dlc_unlink will drive
- * the reference counter to 0 (there is no initial put
- * after session_add)
- *
- * (b) when we are not the initiator rfcomm_rx_process
- * will explicitly call put to balance the initial hold
- * done after session add.
- */
+ /* When socket is closed and we are not RFCOMM
+ * initiator rfcomm_process_rx already calls
+ * rfcomm_session_put() */
+ if (s->sock->sk->sk_state != BT_CLOSED)
+ if (list_empty(&s->dlcs))
+ rfcomm_session_put(s);
break;
}
}
@@ -1250,7 +1299,6 @@
void rfcomm_dlc_accept(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
BT_DBG("dlc %p", d);
@@ -1264,7 +1312,7 @@
rfcomm_dlc_unlock(d);
if (d->role_switch)
- hci_conn_switch_role(conn->hcon, 0x00);
+ hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00);
rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
}
@@ -1812,11 +1860,6 @@
continue;
}
- if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
- continue;
- }
-
if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (d->out) {
@@ -1907,9 +1950,10 @@
rfcomm_session_hold(s);
/* We should adjust MTU on incoming sessions.
- * L2CAP MTU minus UIH header and FCS. */
- s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
- l2cap_pi(nsock->sk)->chan->imtu) - 5;
+ * L2CAP MTU minus UIH header and FCS.
+ * Need to accomodate 1 Byte credits information */
+ s->mtu = min(l2cap_pi(nsock->sk)->omtu,
+ l2cap_pi(nsock->sk)->imtu) - RFCOMM_HDR_SIZE;
rfcomm_schedule();
} else
@@ -1927,8 +1971,9 @@
s->state = BT_CONNECT;
/* We can adjust MTU on outgoing sessions.
- * L2CAP MTU minus UIH header and FCS. */
- s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
+ * L2CAP MTU minus UIH header, Credits and FCS. */
+ s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) -
+ RFCOMM_HDR_SIZE;
rfcomm_send_sabm(s, 0);
break;
@@ -2011,7 +2056,7 @@
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->chan->imtu = l2cap_mtu;
+ l2cap_pi(sk)->imtu = l2cap_mtu;
release_sock(sk);
/* Start listening on the socket */
@@ -2054,18 +2099,19 @@
rfcomm_add_listener(BDADDR_ANY);
- while (1) {
+ while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- break;
+ if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
+ /* No pending events. Let's sleep.
+ * Incoming connections and data will wake us up. */
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
/* Process stuff */
+ clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
rfcomm_process_sessions();
-
- schedule();
}
- __set_current_state(TASK_RUNNING);
rfcomm_kill_listener();
@@ -2092,7 +2138,7 @@
if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (status || encrypt == 0x00) {
- set_bit(RFCOMM_ENC_DROP, &d->flags);
+ __rfcomm_dlc_close(d, ECONNREFUSED);
continue;
}
}
@@ -2103,7 +2149,7 @@
rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
continue;
} else if (d->sec_level == BT_SECURITY_HIGH) {
- set_bit(RFCOMM_ENC_DROP, &d->flags);
+ __rfcomm_dlc_close(d, ECONNREFUSED);
continue;
}
}
@@ -2111,7 +2157,7 @@
if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
continue;
- if (!status && hci_conn_check_secure(conn, d->sec_level))
+ if (!status)
set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
else
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
@@ -2130,13 +2176,15 @@
static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
{
struct rfcomm_session *s;
+ struct list_head *pp, *p;
rfcomm_lock();
- list_for_each_entry(s, &session_list, list) {
- struct rfcomm_dlc *d;
- list_for_each_entry(d, &s->dlcs, list) {
+ list_for_each(p, &session_list) {
+ s = list_entry(p, struct rfcomm_session, list);
+ list_for_each(pp, &s->dlcs) {
struct sock *sk = s->sock->sk;
+ struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
seq_printf(f, "%s %s %ld %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index a55a43e..66cc1f0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -42,9 +42,9 @@
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/security.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
@@ -264,8 +264,6 @@
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
-
- security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
@@ -369,7 +367,7 @@
goto done;
}
- write_lock(&rfcomm_sk_list.lock);
+ write_lock_bh(&rfcomm_sk_list.lock);
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
@@ -380,7 +378,7 @@
sk->sk_state = BT_BOUND;
}
- write_unlock(&rfcomm_sk_list.lock);
+ write_unlock_bh(&rfcomm_sk_list.lock);
done:
release_sock(sk);
@@ -454,7 +452,7 @@
err = -EINVAL;
- write_lock(&rfcomm_sk_list.lock);
+ write_lock_bh(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_sock_by_addr(channel, src)) {
@@ -463,7 +461,7 @@
break;
}
- write_unlock(&rfcomm_sk_list.lock);
+ write_unlock_bh(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
@@ -487,6 +485,11 @@
lock_sock(sk);
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
@@ -498,20 +501,19 @@
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (1) {
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
- nsk = bt_accept_dequeue(sk, newsock);
- if (nsk)
- break;
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
- if (!timeo) {
- err = -EAGAIN;
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
@@ -519,12 +521,8 @@
err = sock_intr_errno(timeo);
break;
}
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -599,8 +597,6 @@
break;
}
- skb->priority = sk->sk_priority;
-
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
@@ -683,8 +679,7 @@
{
struct sock *sk = sock->sk;
struct bt_security sec;
- int err = 0;
- size_t len;
+ int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
@@ -746,8 +741,8 @@
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
+ struct sock *l2cap_sk;
struct rfcomm_conninfo cinfo;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
@@ -790,9 +785,10 @@
break;
}
- memset(&cinfo, 0, sizeof(cinfo));
- cinfo.hci_handle = conn->hcon->handle;
- memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
+ l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
+
+ cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
@@ -955,8 +951,6 @@
if (!sk)
goto done;
- bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
-
rfcomm_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, &src);
bacpy(&bt_sk(sk)->dst, &dst);
@@ -983,7 +977,7 @@
struct sock *sk;
struct hlist_node *node;
- read_lock(&rfcomm_sk_list.lock);
+ read_lock_bh(&rfcomm_sk_list.lock);
sk_for_each(sk, node, &rfcomm_sk_list.head) {
seq_printf(f, "%s %s %d %d\n",
@@ -992,7 +986,7 @@
sk->sk_state, rfcomm_pi(sk)->channel);
}
- read_unlock(&rfcomm_sk_list.lock);
+ read_unlock_bh(&rfcomm_sk_list.lock);
return 0;
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 4bf54b3..e18781c 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -34,7 +35,6 @@
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
-#include <linux/workqueue.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -66,7 +66,7 @@
struct rfcomm_dlc *dlc;
struct tty_struct *tty;
wait_queue_head_t wait;
- struct work_struct wakeup_task;
+ struct tasklet_struct wakeup_task;
struct device *tty_dev;
@@ -76,13 +76,13 @@
};
static LIST_HEAD(rfcomm_dev_list);
-static DEFINE_SPINLOCK(rfcomm_dev_lock);
+static DEFINE_RWLOCK(rfcomm_dev_lock);
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(struct work_struct *work);
+static void rfcomm_tty_wakeup(unsigned long arg);
/* ---- Device functions ---- */
static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -134,10 +134,13 @@
static struct rfcomm_dev *__rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
+ struct list_head *p;
- list_for_each_entry(dev, &rfcomm_dev_list, list)
+ list_for_each(p, &rfcomm_dev_list) {
+ dev = list_entry(p, struct rfcomm_dev, list);
if (dev->id == id)
return dev;
+ }
return NULL;
}
@@ -146,7 +149,7 @@
{
struct rfcomm_dev *dev;
- spin_lock(&rfcomm_dev_lock);
+ read_lock(&rfcomm_dev_lock);
dev = __rfcomm_dev_get(id);
@@ -157,7 +160,7 @@
rfcomm_dev_hold(dev);
}
- spin_unlock(&rfcomm_dev_lock);
+ read_unlock(&rfcomm_dev_lock);
return dev;
}
@@ -195,8 +198,8 @@
static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
{
- struct rfcomm_dev *dev, *entry;
- struct list_head *head = &rfcomm_dev_list;
+ struct rfcomm_dev *dev;
+ struct list_head *head = &rfcomm_dev_list, *p;
int err = 0;
BT_DBG("id %d channel %d", req->dev_id, req->channel);
@@ -205,22 +208,24 @@
if (!dev)
return -ENOMEM;
- spin_lock(&rfcomm_dev_lock);
+ write_lock_bh(&rfcomm_dev_lock);
if (req->dev_id < 0) {
dev->id = 0;
- list_for_each_entry(entry, &rfcomm_dev_list, list) {
- if (entry->id != dev->id)
+ list_for_each(p, &rfcomm_dev_list) {
+ if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
break;
dev->id++;
- head = &entry->list;
+ head = p;
}
} else {
dev->id = req->dev_id;
- list_for_each_entry(entry, &rfcomm_dev_list, list) {
+ list_for_each(p, &rfcomm_dev_list) {
+ struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
+
if (entry->id == dev->id) {
err = -EADDRINUSE;
goto out;
@@ -229,7 +234,7 @@
if (entry->id > dev->id - 1)
break;
- head = &entry->list;
+ head = p;
}
}
@@ -253,7 +258,7 @@
atomic_set(&dev->opened, 0);
init_waitqueue_head(&dev->wait);
- INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
+ tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
skb_queue_head_init(&dev->pending);
@@ -290,7 +295,7 @@
__module_get(THIS_MODULE);
out:
- spin_unlock(&rfcomm_dev_lock);
+ write_unlock_bh(&rfcomm_dev_lock);
if (err < 0)
goto free;
@@ -327,9 +332,9 @@
if (atomic_read(&dev->opened) > 0)
return;
- spin_lock(&rfcomm_dev_lock);
+ write_lock_bh(&rfcomm_dev_lock);
list_del_init(&dev->list);
- spin_unlock(&rfcomm_dev_lock);
+ write_unlock_bh(&rfcomm_dev_lock);
rfcomm_dev_put(dev);
}
@@ -347,7 +352,7 @@
struct rfcomm_dev *dev = (void *) skb->sk;
atomic_sub(skb->truesize, &dev->wmem_alloc);
if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- queue_work(system_nrt_wq, &dev->wakeup_task);
+ tasklet_schedule(&dev->wakeup_task);
rfcomm_dev_put(dev);
}
@@ -451,9 +456,9 @@
static int rfcomm_get_dev_list(void __user *arg)
{
- struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
+ struct list_head *p;
int n = 0, size, err;
u16 dev_num;
@@ -473,9 +478,10 @@
di = dl->dev_info;
- spin_lock(&rfcomm_dev_lock);
+ read_lock_bh(&rfcomm_dev_lock);
- list_for_each_entry(dev, &rfcomm_dev_list, list) {
+ list_for_each(p, &rfcomm_dev_list) {
+ struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
continue;
(di + n)->id = dev->id;
@@ -488,7 +494,7 @@
break;
}
- spin_unlock(&rfcomm_dev_lock);
+ read_unlock_bh(&rfcomm_dev_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*di);
@@ -630,10 +636,9 @@
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(struct work_struct *work)
+static void rfcomm_tty_wakeup(unsigned long arg)
{
- struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
- wakeup_task);
+ struct rfcomm_dev *dev = (void *) arg;
struct tty_struct *tty = dev->tty;
if (!tty)
return;
@@ -758,7 +763,7 @@
rfcomm_dlc_close(dev->dlc, 0);
clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- cancel_work_sync(&dev->wakeup_task);
+ tasklet_kill(&dev->wakeup_task);
rfcomm_dlc_lock(dev->dlc);
tty->driver_data = NULL;
@@ -766,9 +771,9 @@
rfcomm_dlc_unlock(dev->dlc);
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
- spin_lock(&rfcomm_dev_lock);
+ write_lock_bh(&rfcomm_dev_lock);
list_del_init(&dev->list);
- spin_unlock(&rfcomm_dev_lock);
+ write_unlock_bh(&rfcomm_dev_lock);
rfcomm_dev_put(dev);
}
@@ -1151,12 +1156,11 @@
int __init rfcomm_init_ttys(void)
{
- int error;
-
rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
if (!rfcomm_tty_driver)
- return -ENOMEM;
+ return -1;
+ rfcomm_tty_driver->owner = THIS_MODULE;
rfcomm_tty_driver->driver_name = "rfcomm";
rfcomm_tty_driver->name = "rfcomm";
rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR;
@@ -1169,11 +1173,10 @@
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
- error = tty_register_driver(rfcomm_tty_driver);
- if (error) {
+ if (tty_register_driver(rfcomm_tty_driver)) {
BT_ERR("Can't register RFCOMM TTY driver");
put_tty_driver(rfcomm_tty_driver);
- return error;
+ return -1;
}
BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 458cec0..f090a77 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -24,6 +25,7 @@
/* Bluetooth SCO sockets. */
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -41,9 +43,9 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
-#include <linux/security.h>
#include <net/sock.h>
+#include <asm/system.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
@@ -61,7 +63,7 @@
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err);
+static int sco_conn_del(struct hci_conn *conn, int err, u8 is_process);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -134,7 +136,7 @@
return sk;
}
-static int sco_conn_del(struct hci_conn *hcon, int err)
+static int sco_conn_del(struct hci_conn *hcon, int err, u8 is_process)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
@@ -147,10 +149,16 @@
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
- bh_lock_sock(sk);
+ if (is_process)
+ lock_sock(sk);
+ else
+ bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
- bh_unlock_sock(sk);
+ if (is_process)
+ release_sock(sk);
+ else
+ bh_unlock_sock(sk);
sco_sock_kill(sk);
}
@@ -173,7 +181,7 @@
return err;
}
-static int sco_connect(struct sock *sk)
+static int sco_connect(struct sock *sk, __s8 is_wbs)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -189,21 +197,35 @@
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock(hdev);
+ hci_dev_lock_bh(hdev);
- if (lmp_esco_capable(hdev) && !disable_esco)
+ hdev->is_wbs = is_wbs;
+
+ if (lmp_esco_capable(hdev) && !disable_esco) {
type = ESCO_LINK;
- else {
+ } else if (is_wbs) {
+ return -ENAVAIL;
+ } else {
type = SCO_LINK;
pkt_type &= SCO_ESCO_MASK;
}
- hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ BT_DBG("type: %d, pkt_type: 0x%x", type, pkt_type);
+
+ hcon = hci_connect(hdev, type, pkt_type, dst,
+ BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
+ if (is_wbs && (hcon->type != ESCO_LINK)) {
+ BT_ERR("WBS [ hcon->type: 0x%x, hcon->pkt_type: 0x%x ]",
+ hcon->type, hcon->pkt_type);
+ err = -EREMOTEIO;
+ goto done;
+ }
+
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
@@ -227,7 +249,7 @@
}
done:
- hci_dev_unlock(hdev);
+ hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
@@ -372,15 +394,6 @@
case BT_CONNECTED:
case BT_CONFIG:
- if (sco_pi(sk)->conn) {
- sk->sk_state = BT_DISCONN;
- sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
- hci_conn_put(sco_pi(sk)->conn->hcon);
- sco_pi(sk)->conn->hcon = NULL;
- } else
- sco_chan_del(sk, ECONNRESET);
- break;
-
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
@@ -406,10 +419,8 @@
{
BT_DBG("sk %p", sk);
- if (parent) {
+ if (parent)
sk->sk_type = parent->sk_type;
- security_sk_clone(parent, sk);
- }
}
static struct proto sco_proto = {
@@ -488,7 +499,7 @@
goto done;
}
- write_lock(&sco_sk_list.lock);
+ write_lock_bh(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
@@ -499,7 +510,7 @@
sk->sk_state = BT_BOUND;
}
- write_unlock(&sco_sk_list.lock);
+ write_unlock_bh(&sco_sk_list.lock);
done:
release_sock(sk);
@@ -537,7 +548,7 @@
bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
sco_pi(sk)->pkt_type = sa.sco_pkt_type;
- err = sco_connect(sk);
+ err = sco_connect(sk, sa.is_wbs);
if (err)
goto done;
@@ -581,26 +592,30 @@
lock_sock(sk);
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (1) {
+ while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
- ch = bt_accept_dequeue(sk, newsock);
- if (ch)
- break;
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
- if (!timeo) {
- err = -EAGAIN;
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
@@ -608,12 +623,8 @@
err = sock_intr_errno(timeo);
break;
}
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -846,9 +857,7 @@
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
-
- if (conn->hcon)
- hci_conn_put(conn->hcon);
+ hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
@@ -908,12 +917,15 @@
}
/* ----- SCO interface with lower layer (HCI) ----- */
-int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
+static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
register struct sock *sk;
struct hlist_node *node;
int lm = 0;
+ if (type != SCO_LINK && type != ESCO_LINK)
+ return 0;
+
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets */
@@ -933,9 +945,13 @@
return lm;
}
-int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
+
+ if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+ return -EINVAL;
+
if (!status) {
struct sco_conn *conn;
@@ -943,20 +959,24 @@
if (conn)
sco_conn_ready(conn);
} else
- sco_conn_del(hcon, bt_to_errno(status));
+ sco_conn_del(hcon, bt_err(status), 0);
return 0;
}
-int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason, __u8 is_process)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- sco_conn_del(hcon, bt_to_errno(reason));
+ if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+ return -EINVAL;
+
+ sco_conn_del(hcon, bt_err(reason), is_process);
+
return 0;
}
-int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
@@ -980,14 +1000,14 @@
struct sock *sk;
struct hlist_node *node;
- read_lock(&sco_sk_list.lock);
+ read_lock_bh(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst), sk->sk_state);
}
- read_unlock(&sco_sk_list.lock);
+ read_unlock_bh(&sco_sk_list.lock);
return 0;
}
@@ -1032,6 +1052,15 @@
.create = sco_sock_create,
};
+static struct hci_proto sco_hci_proto = {
+ .name = "SCO",
+ .id = HCI_PROTO_SCO,
+ .connect_ind = sco_connect_ind,
+ .connect_cfm = sco_connect_cfm,
+ .disconn_cfm = sco_disconn_cfm,
+ .recv_scodata = sco_recv_scodata
+};
+
int __init sco_init(void)
{
int err;
@@ -1046,6 +1075,13 @@
goto error;
}
+ err = hci_register_proto(&sco_hci_proto);
+ if (err < 0) {
+ BT_ERR("SCO protocol registration failed");
+ bt_sock_unregister(BTPROTO_SCO);
+ goto error;
+ }
+
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
@@ -1069,6 +1105,9 @@
if (bt_sock_unregister(BTPROTO_SCO) < 0)
BT_ERR("SCO socket unregistration failed");
+ if (hci_unregister_proto(&sco_hci_proto) < 0)
+ BT_ERR("SCO protocol unregistration failed");
+
proto_unregister(&sco_proto);
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index deb1198..6bbb34b 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,16 +20,31 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
#include <linux/crypto.h>
-#include <linux/scatterlist.h>
#include <crypto/b128ops.h>
+#include <asm/unaligned.h>
-#define SMP_TIMEOUT msecs_to_jiffies(30000)
+#define SMP_TIMEOUT 30000 /* 30 seconds */
+
+#define SMP_MIN_CONN_INTERVAL 40 /* 50ms (40 * 1.25ms) */
+#define SMP_MAX_CONN_INTERVAL 56 /* 70ms (56 * 1.25ms) */
+#define SMP_MAX_CONN_LATENCY 0 /* 0ms (0 * 1.25ms) */
+#define SMP_SUPERVISION_TIMEOUT 500 /* 5 seconds (500 * 10ms) */
+
+#ifndef FALSE
+#define FALSE 0
+#define TRUE (!FALSE)
+#endif
+
+static int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
static inline void swap128(u8 src[16], u8 dst[16])
{
@@ -147,7 +162,7 @@
}
static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
- u16 dlen, void *data)
+ u16 dlen, void *data)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
@@ -182,28 +197,25 @@
if (!skb)
return;
- skb->priority = HCI_PRIO_MAX;
- hci_send_acl(conn->hchan, skb, 0);
-
- cancel_delayed_work_sync(&conn->security_timer);
- schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
+ hci_send_acl(conn->hcon, NULL, skb, 0);
}
static __u8 authreq_to_seclevel(__u8 authreq)
{
if (authreq & SMP_AUTH_MITM)
return BT_SECURITY_HIGH;
- else
+ else if (authreq & SMP_AUTH_BONDING)
return BT_SECURITY_MEDIUM;
+ else
+ return BT_SECURITY_LOW;
}
-static __u8 seclevel_to_authreq(__u8 sec_level)
+static __u8 seclevel_to_authreq(__u8 level)
{
- switch (sec_level) {
+ switch (level) {
case BT_SECURITY_HIGH:
return SMP_AUTH_MITM | SMP_AUTH_BONDING;
- case BT_SECURITY_MEDIUM:
- return SMP_AUTH_BONDING;
+
default:
return SMP_AUTH_NONE;
}
@@ -214,583 +226,570 @@
struct smp_cmd_pairing *rsp,
__u8 authreq)
{
+ struct hci_conn *hcon = conn->hcon;
+ u8 all_keys = 0;
u8 dist_keys = 0;
- if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
- dist_keys = SMP_DIST_ENC_KEY;
- authreq |= SMP_AUTH_BONDING;
- } else {
- authreq &= ~SMP_AUTH_BONDING;
- }
+ dist_keys = SMP_DIST_ENC_KEY;
+ authreq |= SMP_AUTH_BONDING;
+
+ BT_DBG("conn->hcon->io_capability:%d", conn->hcon->io_capability);
if (rsp == NULL) {
req->io_capability = conn->hcon->io_capability;
- req->oob_flag = SMP_OOB_NOT_PRESENT;
+ req->oob_flag = hcon->oob ? SMP_OOB_PRESENT :
+ SMP_OOB_NOT_PRESENT;
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- req->init_key_dist = 0;
+ req->init_key_dist = all_keys;
req->resp_key_dist = dist_keys;
req->auth_req = authreq;
+ BT_DBG("SMP_CMD_PAIRING_REQ %d %d %d %d %2.2x %2.2x",
+ req->io_capability, req->oob_flag,
+ req->auth_req, req->max_key_size,
+ req->init_key_dist, req->resp_key_dist);
return;
}
+ /* Only request OOB if remote AND we support it */
+ if (req->oob_flag)
+ rsp->oob_flag = hcon->oob ? SMP_OOB_PRESENT :
+ SMP_OOB_NOT_PRESENT;
+ else
+ rsp->oob_flag = SMP_OOB_NOT_PRESENT;
+
rsp->io_capability = conn->hcon->io_capability;
- rsp->oob_flag = SMP_OOB_NOT_PRESENT;
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- rsp->init_key_dist = 0;
+ rsp->init_key_dist = req->init_key_dist & all_keys;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
rsp->auth_req = authreq;
+ BT_DBG("SMP_CMD_PAIRING_RSP %d %d %d %d %2.2x %2.2x",
+ req->io_capability, req->oob_flag, req->auth_req,
+ req->max_key_size, req->init_key_dist,
+ req->resp_key_dist);
}
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
{
- struct smp_chan *smp = conn->smp_chan;
+ struct hci_conn *hcon = conn->hcon;
if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
(max_key_size < SMP_MIN_ENC_KEY_SIZE))
return SMP_ENC_KEY_SIZE;
- smp->enc_key_size = max_key_size;
+ hcon->smp_key_size = max_key_size;
return 0;
}
-static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
-{
- struct hci_conn *hcon = conn->hcon;
-
- if (send)
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
- &reason);
-
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
- mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
- hcon->dst_type, reason);
-
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
- cancel_delayed_work_sync(&conn->security_timer);
- smp_chan_destroy(conn);
- }
-}
-
-#define JUST_WORKS 0x00
-#define JUST_CFM 0x01
-#define REQ_PASSKEY 0x02
-#define CFM_PASSKEY 0x03
-#define REQ_OOB 0x04
-#define OVERLAP 0xFF
-
-static const u8 gen_method[5][5] = {
- { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
- { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
- { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
- { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM },
- { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
+#define JUST_WORKS SMP_JUST_WORKS
+#define REQ_PASSKEY SMP_REQ_PASSKEY
+#define CFM_PASSKEY SMP_CFM_PASSKEY
+#define JUST_CFM SMP_JUST_CFM
+#define OVERLAP SMP_OVERLAP
+static const u8 gen_method[5][5] = {
+ {JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY},
+ {JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY},
+ {CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY},
+ {JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM},
+ {CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP}
};
static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
u8 local_io, u8 remote_io)
{
struct hci_conn *hcon = conn->hcon;
- struct smp_chan *smp = conn->smp_chan;
u8 method;
u32 passkey = 0;
int ret = 0;
- /* Initialize key for JUST WORKS */
- memset(smp->tk, 0, sizeof(smp->tk));
- clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+ /* Initialize key to JUST WORKS */
+ memset(hcon->tk, 0, sizeof(hcon->tk));
+ hcon->tk_valid = FALSE;
+ hcon->auth = auth;
+
+ /* By definition, OOB data will be used if both sides have it available
+ */
+ if (remote_oob && hcon->oob) {
+ method = SMP_REQ_OOB;
+ goto agent_request;
+ }
BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
/* If neither side wants MITM, use JUST WORKS */
- /* If either side has unknown io_caps, use JUST WORKS */
- /* Otherwise, look up method from the table */
+ /* If either side has unknown io_caps, use JUST_WORKS */
if (!(auth & SMP_AUTH_MITM) ||
local_io > SMP_IO_KEYBOARD_DISPLAY ||
- remote_io > SMP_IO_KEYBOARD_DISPLAY)
- method = JUST_WORKS;
- else
- method = gen_method[remote_io][local_io];
-
- /* If not bonding, don't ask user to confirm a Zero TK */
- if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
- method = JUST_WORKS;
-
- /* If Just Works, Continue with Zero TK */
- if (method == JUST_WORKS) {
- set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+ remote_io > SMP_IO_KEYBOARD_DISPLAY) {
+ hcon->auth &= ~SMP_AUTH_MITM;
+ hcon->tk_valid = TRUE;
return 0;
}
- /* Not Just Works/Confirm results in MITM Authentication */
- if (method != JUST_CFM)
- set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags);
+ /* MITM is now officially requested, but not required */
+ /* Determine what we need (if anything) from the agent */
+ method = gen_method[local_io][remote_io];
- /* If both devices have Keyoard-Display I/O, the master
- * Confirms and the slave Enters the passkey.
- */
- if (method == OVERLAP) {
+ BT_DBG("tk_method: %d", method);
+
+ if (method == SMP_JUST_WORKS || method == SMP_JUST_CFM)
+ hcon->auth &= ~SMP_AUTH_MITM;
+
+ /* Don't bother confirming unbonded JUST_WORKS */
+ if (!(auth & SMP_AUTH_BONDING) && method == SMP_JUST_CFM) {
+ hcon->tk_valid = TRUE;
+ return 0;
+ } else if (method == SMP_JUST_WORKS) {
+ hcon->tk_valid = TRUE;
+ return 0;
+ } else if (method == SMP_OVERLAP) {
if (hcon->link_mode & HCI_LM_MASTER)
- method = CFM_PASSKEY;
+ method = SMP_CFM_PASSKEY;
else
- method = REQ_PASSKEY;
+ method = SMP_REQ_PASSKEY;
}
- /* Generate random passkey. Not valid until confirmed. */
- if (method == CFM_PASSKEY) {
- u8 key[16];
+ BT_DBG("tk_method-2: %d", method);
+ if (method == SMP_CFM_PASSKEY) {
+ u8 key[16];
+ /* Generate a passkey for display. It is not valid until
+ * confirmed.
+ */
memset(key, 0, sizeof(key));
get_random_bytes(&passkey, sizeof(passkey));
passkey %= 1000000;
put_unaligned_le32(passkey, key);
- swap128(key, smp->tk);
+ swap128(key, hcon->tk);
BT_DBG("PassKey: %d", passkey);
}
+agent_request:
hci_dev_lock(hcon->hdev);
- if (method == REQ_PASSKEY)
- ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
- hcon->type, hcon->dst_type);
- else
- ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
- hcon->type, hcon->dst_type,
- cpu_to_le32(passkey), 0);
+ switch (method) {
+ case SMP_REQ_PASSKEY:
+ ret = mgmt_user_confirm_request(hcon->hdev->id,
+ HCI_EV_USER_PASSKEY_REQUEST, conn->dst, 0);
+ break;
+ case SMP_CFM_PASSKEY:
+ default:
+ ret = mgmt_user_confirm_request(hcon->hdev->id,
+ HCI_EV_USER_CONFIRM_REQUEST, conn->dst, passkey);
+ break;
+ }
hci_dev_unlock(hcon->hdev);
return ret;
}
-static void confirm_work(struct work_struct *work)
+static int send_pairing_confirm(struct l2cap_conn *conn)
{
- struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
- struct l2cap_conn *conn = smp->conn;
- struct crypto_blkcipher *tfm;
+ struct hci_conn *hcon = conn->hcon;
+ struct crypto_blkcipher *tfm = hcon->hdev->tfm;
struct smp_cmd_pairing_confirm cp;
int ret;
- u8 res[16], reason;
-
- BT_DBG("conn %p", conn);
-
- tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- reason = SMP_UNSPECIFIED;
- goto error;
- }
-
- smp->tfm = tfm;
+ u8 res[16];
if (conn->hcon->out)
- ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst, res);
+ ret = smp_c1(tfm, hcon->tk, hcon->prnd, hcon->preq, hcon->prsp,
+ 0, conn->src, hcon->dst_type, conn->dst, res);
else
- ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
- conn->hcon->dst_type, conn->dst, 0, conn->src,
- res);
- if (ret) {
- reason = SMP_UNSPECIFIED;
- goto error;
- }
+ ret = smp_c1(tfm, hcon->tk, hcon->prnd, hcon->preq, hcon->prsp,
+ hcon->dst_type, conn->dst, 0, conn->src, res);
- clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+ if (ret)
+ return SMP_CONFIRM_FAILED;
swap128(res, cp.confirm_val);
- smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
- return;
+ hcon->cfm_pending = FALSE;
-error:
- smp_failure(conn, reason, 1);
-}
-
-static void random_work(struct work_struct *work)
-{
- struct smp_chan *smp = container_of(work, struct smp_chan, random);
- struct l2cap_conn *conn = smp->conn;
- struct hci_conn *hcon = conn->hcon;
- struct crypto_blkcipher *tfm = smp->tfm;
- u8 reason, confirm[16], res[16], key[16];
- int ret;
-
- if (IS_ERR_OR_NULL(tfm)) {
- reason = SMP_UNSPECIFIED;
- goto error;
- }
-
- BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
-
- if (hcon->out)
- ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
- conn->src, hcon->dst_type, conn->dst, res);
- else
- ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
- hcon->dst_type, conn->dst, 0, conn->src, res);
- if (ret) {
- reason = SMP_UNSPECIFIED;
- goto error;
- }
-
- swap128(res, confirm);
-
- if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
- BT_ERR("Pairing failed (confirmation values mismatch)");
- reason = SMP_CONFIRM_FAILED;
- goto error;
- }
-
- if (hcon->out) {
- u8 stk[16], rand[8];
- __le16 ediv;
-
- memset(rand, 0, sizeof(rand));
- ediv = 0;
-
- smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
- swap128(key, stk);
-
- memset(stk + smp->enc_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
-
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) {
- reason = SMP_UNSPECIFIED;
- goto error;
- }
-
- hci_le_start_enc(hcon, ediv, rand, stk);
- hcon->enc_key_size = smp->enc_key_size;
- } else {
- u8 stk[16], r[16], rand[8];
- __le16 ediv;
-
- memset(rand, 0, sizeof(rand));
- ediv = 0;
-
- swap128(smp->prnd, r);
- smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
-
- smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
- swap128(key, stk);
-
- memset(stk + smp->enc_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
-
- hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
- HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
- ediv, rand);
- }
-
- return;
-
-error:
- smp_failure(conn, reason, 1);
-}
-
-static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
-{
- struct smp_chan *smp;
-
- smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
- if (!smp)
- return NULL;
-
- INIT_WORK(&smp->confirm, confirm_work);
- INIT_WORK(&smp->random, random_work);
-
- smp->conn = conn;
- conn->smp_chan = smp;
- conn->hcon->smp_conn = conn;
-
- hci_conn_hold(conn->hcon);
-
- return smp;
-}
-
-void smp_chan_destroy(struct l2cap_conn *conn)
-{
- struct smp_chan *smp = conn->smp_chan;
-
- BUG_ON(!smp);
-
- if (smp->tfm)
- crypto_free_blkcipher(smp->tfm);
-
- kfree(smp);
- conn->smp_chan = NULL;
- conn->hcon->smp_conn = NULL;
- hci_conn_put(conn->hcon);
-}
-
-int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
-{
- struct l2cap_conn *conn = hcon->smp_conn;
- struct smp_chan *smp;
- u32 value;
- u8 key[16];
-
- BT_DBG("");
-
- if (!conn)
- return -ENOTCONN;
-
- smp = conn->smp_chan;
-
- switch (mgmt_op) {
- case MGMT_OP_USER_PASSKEY_REPLY:
- value = le32_to_cpu(passkey);
- memset(key, 0, sizeof(key));
- BT_DBG("PassKey: %d", value);
- put_unaligned_le32(value, key);
- swap128(key, smp->tk);
- /* Fall Through */
- case MGMT_OP_USER_CONFIRM_REPLY:
- set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
- break;
- case MGMT_OP_USER_PASSKEY_NEG_REPLY:
- case MGMT_OP_USER_CONFIRM_NEG_REPLY:
- smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
- return 0;
- default:
- smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
- return -EOPNOTSUPP;
- }
-
- /* If it is our turn to send Pairing Confirm, do so now */
- if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags))
- queue_work(hcon->hdev->workqueue, &smp->confirm);
+ smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
return 0;
}
+int le_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, void *cp)
+{
+ struct mgmt_cp_user_passkey_reply *psk_reply = cp;
+ struct l2cap_conn *conn = hcon->smp_conn;
+ u8 key[16];
+ u8 reason = 0;
+ int ret = 0;
+
+ BT_DBG("");
+
+ hcon->tk_valid = TRUE;
+
+ switch (mgmt_op) {
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ reason = SMP_CONFIRM_FAILED;
+ break;
+ case MGMT_OP_USER_CONFIRM_REPLY:
+ break;
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ memset(key, 0, sizeof(key));
+ BT_DBG("PassKey: %d", psk_reply->passkey);
+ put_unaligned_le32(psk_reply->passkey, key);
+ swap128(key, hcon->tk);
+ break;
+ default:
+ reason = SMP_CONFIRM_FAILED;
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (reason) {
+ BT_DBG("smp_send_cmd: SMP_CMD_PAIRING_FAIL");
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+ &reason);
+ del_timer(&hcon->smp_timer);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+ mgmt_auth_failed(hcon->hdev->id, conn->dst, reason);
+ hci_conn_put(hcon);
+ } else if (hcon->cfm_pending) {
+ BT_DBG("send_pairing_confirm");
+ ret = send_pairing_confirm(conn);
+ }
+
+ return ret;
+}
+
static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
- struct smp_chan *smp;
u8 key_size;
u8 auth = SMP_AUTH_NONE;
int ret;
BT_DBG("conn %p", conn);
- if (conn->hcon->link_mode & HCI_LM_MASTER)
- return SMP_CMD_NOTSUPP;
-
- if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
- smp = smp_chan_create(conn);
-
- smp = conn->smp_chan;
-
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], req, sizeof(*req));
+ hcon->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&hcon->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
- /* We didn't start the pairing, so match remote */
- if (req->auth_req & SMP_AUTH_BONDING)
- auth = req->auth_req;
+ if (req->oob_flag && hcon->oob) {
+ /* By definition, OOB data pairing will have MITM protection */
+ auth = req->auth_req | SMP_AUTH_MITM;
+ } else if (req->auth_req & SMP_AUTH_BONDING) {
+ /* We will attempt MITM for all Bonding attempts */
+ auth = SMP_AUTH_BONDING | SMP_AUTH_MITM;
+ }
- conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
-
+ /* We didn't start the pairing, so no requirements */
build_pairing_cmd(conn, req, &rsp, auth);
key_size = min(req->max_key_size, rsp.max_key_size);
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
- ret = smp_rand(smp->prnd);
+ ret = smp_rand(hcon->prnd);
if (ret)
return SMP_UNSPECIFIED;
- smp->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
+ /* Request setup of TK */
+ ret = tk_request(conn, req->oob_flag, auth, rsp.io_capability,
+ req->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ hcon->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&hcon->prsp[1], &rsp, sizeof(rsp));
smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
- /* Request setup of TK */
- ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
- if (ret)
- return SMP_UNSPECIFIED;
+ mod_timer(&hcon->smp_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT));
return 0;
}
static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
- struct hci_dev *hdev = conn->hcon->hdev;
u8 key_size, auth = SMP_AUTH_NONE;
int ret;
BT_DBG("conn %p", conn);
- if (!(conn->hcon->link_mode & HCI_LM_MASTER))
- return SMP_CMD_NOTSUPP;
-
skb_pull(skb, sizeof(*rsp));
- req = (void *) &smp->preq[1];
+ req = (void *) &hcon->preq[1];
key_size = min(req->max_key_size, rsp->max_key_size);
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
- ret = smp_rand(smp->prnd);
+ hcon->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&hcon->prsp[1], rsp, sizeof(*rsp));
+
+ ret = smp_rand(hcon->prnd);
if (ret)
return SMP_UNSPECIFIED;
- smp->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
-
if ((req->auth_req & SMP_AUTH_BONDING) &&
(rsp->auth_req & SMP_AUTH_BONDING))
auth = SMP_AUTH_BONDING;
auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
- ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+ ret = tk_request(conn, req->oob_flag, auth, rsp->io_capability,
+ req->io_capability);
if (ret)
return SMP_UNSPECIFIED;
- set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+ hcon->cfm_pending = TRUE;
/* Can't compose response until we have been confirmed */
- if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
+ if (!hcon->tk_valid)
return 0;
- queue_work(hdev->workqueue, &smp->confirm);
+ ret = send_pairing_confirm(conn);
+ if (ret)
+ return SMP_CONFIRM_FAILED;
return 0;
}
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct smp_chan *smp = conn->smp_chan;
- struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+ int ret;
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
- memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
- skb_pull(skb, sizeof(smp->pcnf));
+ memcpy(hcon->pcnf, skb->data, sizeof(hcon->pcnf));
+ skb_pull(skb, sizeof(hcon->pcnf));
if (conn->hcon->out) {
u8 random[16];
- swap128(smp->prnd, random);
+ swap128(hcon->prnd, random);
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
random);
- } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
- queue_work(hdev->workqueue, &smp->confirm);
- } else {
- set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
- }
+ } else if (hcon->tk_valid) {
+ ret = send_pairing_confirm(conn);
+
+ if (ret)
+ return SMP_CONFIRM_FAILED;
+ } else
+ hcon->cfm_pending = TRUE;
+
+
+ mod_timer(&hcon->smp_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT));
return 0;
}
static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct smp_chan *smp = conn->smp_chan;
- struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+ struct crypto_blkcipher *tfm = hcon->hdev->tfm;
+ int ret;
+ u8 key[16], res[16], random[16], confirm[16];
- BT_DBG("conn %p", conn);
+ swap128(skb->data, random);
+ skb_pull(skb, sizeof(random));
- swap128(skb->data, smp->rrnd);
- skb_pull(skb, sizeof(smp->rrnd));
+ if (conn->hcon->out)
+ ret = smp_c1(tfm, hcon->tk, random, hcon->preq, hcon->prsp, 0,
+ conn->src, hcon->dst_type, conn->dst,
+ res);
+ else
+ ret = smp_c1(tfm, hcon->tk, random, hcon->preq, hcon->prsp,
+ hcon->dst_type, conn->dst, 0, conn->src,
+ res);
+ if (ret)
+ return SMP_UNSPECIFIED;
- queue_work(hdev->workqueue, &smp->random);
+ BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+ swap128(res, confirm);
+
+ if (memcmp(hcon->pcnf, confirm, sizeof(hcon->pcnf)) != 0) {
+ BT_ERR("Pairing failed (confirmation values mismatch)");
+ return SMP_CONFIRM_FAILED;
+ }
+
+ if (conn->hcon->out) {
+ u8 stk[16], rand[8];
+ __le16 ediv;
+
+ memset(rand, 0, sizeof(rand));
+ ediv = 0;
+
+ smp_s1(tfm, hcon->tk, random, hcon->prnd, key);
+ swap128(key, stk);
+
+ memset(stk + hcon->smp_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - hcon->smp_key_size);
+
+ hci_le_start_enc(hcon, ediv, rand, stk);
+ hcon->enc_key_size = hcon->smp_key_size;
+ } else {
+ u8 stk[16], r[16], rand[8];
+ __le16 ediv;
+
+ memset(rand, 0, sizeof(rand));
+ ediv = 0;
+
+ swap128(hcon->prnd, r);
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+
+ smp_s1(tfm, hcon->tk, hcon->prnd, random, key);
+ swap128(key, stk);
+
+ memset(stk + hcon->smp_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - hcon->smp_key_size);
+
+ hci_add_ltk(conn->hcon->hdev, 0, conn->dst, hcon->dst_type,
+ hcon->smp_key_size, hcon->auth, ediv, rand, stk);
+ }
return 0;
}
-static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+static int smp_encrypt_link(struct hci_conn *hcon, struct link_key *key)
{
- struct smp_ltk *key;
- struct hci_conn *hcon = conn->hcon;
+ struct key_master_id *master;
+ u8 sec_level;
+ u8 zerobuf[8];
- key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
- if (!key)
- return 0;
+ if (!hcon || !key || !key->data)
+ return -EINVAL;
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
- return 1;
+ memset(zerobuf, 0, sizeof(zerobuf));
- hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
- hcon->enc_key_size = key->enc_size;
+ master = (void *) key->data;
- return 1;
+ if (!master->ediv && !memcmp(master->rand, zerobuf, sizeof(zerobuf)))
+ return -EINVAL;
+ hcon->enc_key_size = key->pin_len;
+ hcon->sec_req = TRUE;
+ sec_level = authreq_to_seclevel(key->auth);
+
+ BT_DBG("cur %d, req: %d", hcon->sec_level, sec_level);
+
+ if (sec_level > hcon->sec_level)
+ hcon->pending_sec_level = sec_level;
+
+
+ if (!(hcon->link_mode & HCI_LM_ENCRYPT))
+ hci_conn_hold(hcon);
+
+ hci_le_start_enc(hcon, master->ediv, master->rand, key->val);
+
+ return 0;
}
+
static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
struct smp_cmd_security_req *rp = (void *) skb->data;
struct smp_cmd_pairing cp;
- struct hci_conn *hcon = conn->hcon;
- struct smp_chan *smp;
+ struct link_key *key;
BT_DBG("conn %p", conn);
- hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
-
- if (smp_ltk_encrypt(conn))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
return 0;
- if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
- return 0;
+ key = hci_find_link_key_type(hcon->hdev, conn->dst, KEY_TYPE_LTK);
+ if (key && ((key->auth & SMP_AUTH_MITM) ||
+ !(rp->auth_req & SMP_AUTH_MITM))) {
- smp = smp_chan_create(conn);
+ if (smp_encrypt_link(hcon, key) < 0)
+ goto invalid_key;
+
+ return 0;
+ }
+
+invalid_key:
+ hcon->sec_req = FALSE;
+
+ /* Switch to Pairing Connection Parameters */
+ hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL, SMP_MAX_CONN_INTERVAL,
+ SMP_MAX_CONN_LATENCY, SMP_SUPERVISION_TIMEOUT);
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &cp, sizeof(cp));
+ hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
+ hcon->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&hcon->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ mod_timer(&hcon->smp_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT));
+
+ set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+
+ hci_conn_hold(hcon);
+
return 0;
}
int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
{
struct hci_conn *hcon = conn->hcon;
- struct smp_chan *smp = conn->smp_chan;
__u8 authreq;
- BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+ BT_DBG("conn %p hcon %p %d req: %d",
+ conn, hcon, hcon->sec_level, sec_level);
- if (!lmp_host_le_capable(hcon->hdev))
+ if (IS_ERR(hcon->hdev->tfm))
return 1;
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+ return -EINPROGRESS;
+
if (sec_level == BT_SECURITY_LOW)
return 1;
+
if (hcon->sec_level >= sec_level)
return 1;
- if (hcon->link_mode & HCI_LM_MASTER)
- if (smp_ltk_encrypt(conn))
- goto done;
-
- if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
- return 0;
-
- smp = smp_chan_create(conn);
- if (!smp)
- return 1;
-
authreq = seclevel_to_authreq(sec_level);
+ hcon->smp_conn = conn;
+ hcon->pending_sec_level = sec_level;
+
+ if ((hcon->link_mode & HCI_LM_MASTER) && !hcon->sec_req) {
+ struct link_key *key;
+
+ key = hci_find_link_key_type(hcon->hdev, conn->dst,
+ KEY_TYPE_LTK);
+
+ if (smp_encrypt_link(hcon, key) == 0)
+ goto done;
+ }
+
+ hcon->sec_req = FALSE;
+
if (hcon->link_mode & HCI_LM_MASTER) {
struct smp_cmd_pairing cp;
+ /* Switch to Pairing Connection Parameters */
+ hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL,
+ SMP_MAX_CONN_INTERVAL, SMP_MAX_CONN_LATENCY,
+ SMP_SUPERVISION_TIMEOUT);
+
build_pairing_cmd(conn, &cp, NULL, authreq);
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &cp, sizeof(cp));
+ hcon->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&hcon->preq[1], &cp, sizeof(cp));
+
+ mod_timer(&hcon->smp_timer, jiffies +
+ msecs_to_jiffies(SMP_TIMEOUT));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ hci_conn_hold(hcon);
} else {
struct smp_cmd_security_req cp;
cp.auth_req = authreq;
@@ -798,56 +797,81 @@
}
done:
- hcon->pending_sec_level = sec_level;
+ set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
return 0;
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
+ u8 rand[8];
+ int err;
skb_pull(skb, sizeof(*rp));
- memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
+ BT_DBG("conn %p", conn);
+
+ memset(rand, 0, sizeof(rand));
+
+ err = hci_add_ltk(hcon->hdev, 0, conn->dst, hcon->dst_type,
+ 0, 0, 0, rand, rp->ltk);
+ if (err)
+ return SMP_UNSPECIFIED;
return 0;
}
static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct smp_cmd_master_ident *rp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
- struct hci_dev *hdev = conn->hcon->hdev;
struct hci_conn *hcon = conn->hcon;
- u8 authenticated;
+ struct smp_cmd_master_ident *rp = (void *) skb->data;
+ struct smp_cmd_pairing *paircmd = (void *) &hcon->prsp[1];
+ struct link_key *key;
+ u8 *keydist;
skb_pull(skb, sizeof(*rp));
- hci_dev_lock(hdev);
- authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
- hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
- HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
- rp->ediv, rp->rand);
- smp_distribute_keys(conn, 1);
- hci_dev_unlock(hdev);
+ key = hci_find_link_key_type(hcon->hdev, conn->dst, KEY_TYPE_LTK);
+ if (key == NULL)
+ return SMP_UNSPECIFIED;
+
+ if (hcon->out)
+ keydist = &paircmd->resp_key_dist;
+ else
+ keydist = &paircmd->init_key_dist;
+
+ BT_DBG("keydist 0x%x", *keydist);
+
+ hci_add_ltk(hcon->hdev, 1, conn->dst, hcon->dst_type,
+ hcon->smp_key_size, hcon->auth, rp->ediv,
+ rp->rand, key->val);
+
+ *keydist &= ~SMP_DIST_ENC_KEY;
+ if (hcon->out) {
+ if (!(*keydist))
+ smp_distribute_keys(conn, 1);
+ }
return 0;
}
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
__u8 code = skb->data[0];
__u8 reason;
int err = 0;
- if (!lmp_host_le_capable(conn->hcon->hdev)) {
- err = -ENOTSUPP;
+ if (IS_ERR(hcon->hdev->tfm)) {
+ err = PTR_ERR(hcon->hdev->tfm);
reason = SMP_PAIRING_NOTSUPP;
+ BT_ERR("SMP_PAIRING_NOTSUPP %p", hcon->hdev->tfm);
goto done;
}
+ hcon->smp_conn = conn;
skb_pull(skb, sizeof(code));
switch (code) {
@@ -856,9 +880,12 @@
break;
case SMP_CMD_PAIRING_FAIL:
- smp_failure(conn, skb->data[0], 0);
reason = 0;
err = -EPERM;
+ del_timer(&hcon->smp_timer);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+ mgmt_auth_failed(hcon->hdev->id, conn->dst, skb->data[0]);
+ hci_conn_put(hcon);
break;
case SMP_CMD_PAIRING_RSP:
@@ -901,33 +928,40 @@
}
done:
- if (reason)
- smp_failure(conn, reason, 1);
+ if (reason) {
+ BT_ERR("SMP_CMD_PAIRING_FAIL: %d", reason);
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+ &reason);
+ del_timer(&hcon->smp_timer);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+ mgmt_auth_failed(hcon->hdev->id, conn->dst, reason);
+ hci_conn_put(hcon);
+ }
kfree_skb(skb);
return err;
}
-int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
+static int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
{
+ struct hci_conn *hcon = conn->hcon;
struct smp_cmd_pairing *req, *rsp;
- struct smp_chan *smp = conn->smp_chan;
__u8 *keydist;
BT_DBG("conn %p force %d", conn, force);
- if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
- return 0;
+ if (IS_ERR(hcon->hdev->tfm))
+ return PTR_ERR(hcon->hdev->tfm);
- rsp = (void *) &smp->prsp[1];
+ rsp = (void *) &hcon->prsp[1];
/* The responder sends its keys first */
- if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
+ if (!force && hcon->out && (rsp->resp_key_dist & 0x07))
return 0;
- req = (void *) &smp->preq[1];
+ req = (void *) &hcon->preq[1];
- if (conn->hcon->out) {
+ if (hcon->out) {
keydist = &rsp->init_key_dist;
*keydist &= req->init_key_dist;
} else {
@@ -941,8 +975,6 @@
if (*keydist & SMP_DIST_ENC_KEY) {
struct smp_cmd_encrypt_info enc;
struct smp_cmd_master_ident ident;
- struct hci_conn *hcon = conn->hcon;
- u8 authenticated;
__le16 ediv;
get_random_bytes(enc.ltk, sizeof(enc.ltk));
@@ -951,10 +983,9 @@
smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
- authenticated = hcon->sec_level == BT_SECURITY_HIGH;
- hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
- HCI_SMP_LTK_SLAVE, 1, authenticated,
- enc.ltk, smp->enc_key_size, ediv, ident.rand);
+ hci_add_ltk(hcon->hdev, 1, conn->dst, hcon->dst_type,
+ hcon->smp_key_size, hcon->auth, ediv,
+ ident.rand, enc.ltk);
ident.ediv = cpu_to_le16(ediv);
@@ -993,11 +1024,55 @@
*keydist &= ~SMP_DIST_SIGN;
}
- if (conn->hcon->out || force) {
- clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
- cancel_delayed_work_sync(&conn->security_timer);
- smp_chan_destroy(conn);
+ if (hcon->out) {
+ if (hcon->disconn_cfm_cb)
+ hcon->disconn_cfm_cb(hcon, 0);
+ del_timer(&hcon->smp_timer);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+ hci_conn_put(hcon);
+ } else if (rsp->resp_key_dist) {
+ if (hcon->disconn_cfm_cb)
+ hcon->disconn_cfm_cb(hcon, SMP_UNSPECIFIED);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+ mgmt_auth_failed(hcon->hdev->id, conn->dst, SMP_UNSPECIFIED);
+ hci_conn_put(hcon);
}
return 0;
}
+
+int smp_link_encrypt_cmplt(struct l2cap_conn *conn, u8 status, u8 encrypt)
+{
+ struct hci_conn *hcon = conn->hcon;
+
+ BT_DBG("smp: %d %d %d", status, encrypt, hcon->sec_req);
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+
+ if (!status && encrypt && hcon->sec_level < hcon->pending_sec_level)
+ hcon->sec_level = hcon->pending_sec_level;
+
+ if (!status && encrypt && !hcon->sec_req)
+ return smp_distribute_keys(conn, 0);
+
+ /* Fall back to Pairing request if failed a Link Security request */
+ else if (hcon->sec_req && (status || !encrypt))
+ smp_conn_security(conn, hcon->pending_sec_level);
+
+ hci_conn_put(hcon);
+
+ return 0;
+}
+
+void smp_timeout(unsigned long arg)
+{
+ struct l2cap_conn *conn = (void *) arg;
+ u8 reason = SMP_UNSPECIFIED;
+
+ BT_DBG("%p", conn);
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
+ mgmt_auth_failed(conn->hcon->hdev->id, conn->dst, SMP_UNSPECIFIED);
+ hci_conn_put(conn->hcon);
+}