Bluetooth: Backport BT manager 1.3
Backported from msm 3.10 kernel
Change-Id: I0c4ba93e9d590388efd562c3dbb3a2d1ac5f3c6a
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index ea1c27a..812ffb2 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -2,16 +2,6 @@
menu "Bluetooth device drivers"
depends on BT
-config BT_HCISMD
- tristate "HCI SMD driver"
- help
- Bluetooth HCI SMD driver.
- This driver is required if you want to use Bluetoth device with
- SMD interface.
-
- Say Y here to compile support for Bluetooth USB devices into the
- kernel or say M to compile is as a module (hci_smd).
-
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
@@ -91,16 +81,17 @@
Say Y here to compile support for HCILL protocol.
-config BT_HCIUART_IBS
- bool "HCI_IBS protocol support"
+config BT_HCIUART_3WIRE
+ bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
- default n
help
- HCI_IBS (HCI In-Band Sleep) is a serial protocol for communication
- between Bluetooth device and host. This protocol is required for
- UART clock control for some Qualcomm Bluetooth devices.
+ The HCI Three-wire UART Transport Layer makes it possible to
+ user the Bluetooth HCI over a serial port interface. The HCI
+ Three-wire UART Transport Layer assumes that the UART
+ communication may have bit errors, overrun errors or burst
+ errors and thereby making CTS/RTS lines unnecessary.
- Say Y here to compile support for HCI_IBS protocol.
+ Say Y here to compile support for Three-wire UART protocol.
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
@@ -128,11 +119,13 @@
config BT_MSM_SLEEP
tristate "MSM Bluesleep driver"
depends on BT && SERIAL_MSM_HS
- default n
help
Bluetooth MSM bluesleep driver.
This driver provides support for BTS sleep.
+ Say Y here to compile support for BTS sleep into the
+ kernel or say M to compile it as module (bfusb).
+
config BT_HCIBFUSB
tristate "HCI BlueFRITZ! USB driver"
depends on USB
@@ -217,7 +210,7 @@
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8897.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -230,20 +223,12 @@
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
-config MSM_BT_POWER
- tristate "MSM Bluetooth Power Control"
- depends on ARCH_MSM && RFKILL
- default m
- help
- Provides a parameter to switch on/off power from PMIC
- to Bluetooth device.
-
config BT_ATH3K
tristate "Atheros firmware download driver"
depends on BT_HCIBTUSB
@@ -266,4 +251,13 @@
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module.
+
+config MSM_BT_POWER
+ tristate "MSM Bluetooth Power Control"
+ depends on ARCH_MSM && RFKILL
+ default m
+ help
+ Provides a parameter to switch on/off power from PMIC
+ to Bluetooth device.
+
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index a20a056..8972a3a 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Linux Bluetooth HCI device drivers.
#
-obj-$(CONFIG_BT_HCISMD) += hci_smd.o
obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o
obj-$(CONFIG_BT_HCIUART) += hci_uart.o
obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o
@@ -20,6 +19,9 @@
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_MSM_BT_POWER) += bluetooth-power.o
+obj-$(CONFIG_BT_MSM_SLEEP) += msm_bt_sleep.o
+msm_bt_sleep-objs := bluesleep.o
btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
@@ -29,8 +31,5 @@
hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
-hci_uart-$(CONFIG_BT_HCIUART_IBS) += hci_ibs.o
+hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
-obj-$(CONFIG_BT_MSM_SLEEP) += msm_bt_sleep.o
-msm_bt_sleep-objs := bluesleep.o
-obj-$(CONFIG_MSM_BT_POWER) += bluetooth-power.o
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 9925c4e..0a327f4 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -30,6 +30,7 @@
#include <net/bluetooth/bluetooth.h>
#define VERSION "1.0"
+#define ATH3K_FIRMWARE "ath3k-1.fw"
#define ATH3K_DNLOAD 0x01
#define ATH3K_GETSTATE 0x05
@@ -160,7 +161,7 @@
pipe = usb_sndctrlpipe(udev, 0);
- send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+ send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
if (!send_buf) {
BT_ERR("Can't allocate memory chunk for firmware");
return -ENOMEM;
@@ -202,24 +203,44 @@
static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{
- int pipe = 0;
+ int ret, pipe = 0;
+ char *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
- state, 0x01, USB_CTRL_SET_TIMEOUT);
+ ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
+
+ *state = *buf;
+ kfree(buf);
+
+ return ret;
}
static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version)
{
- int pipe = 0;
+ int ret, pipe = 0;
+ struct ath3k_version *buf;
+ const int size = sizeof(*buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
- sizeof(struct ath3k_version),
- USB_CTRL_SET_TIMEOUT);
+ ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(version, buf, size);
+ kfree(buf);
+
+ return ret;
}
static int ath3k_load_fwfile(struct usb_device *udev,
@@ -231,7 +252,7 @@
count = firmware->size;
- send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+ send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
if (!send_buf) {
BT_ERR("Can't allocate memory chunk for firmware");
return -ENOMEM;
@@ -368,7 +389,7 @@
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
- BT_ERR("Can't get state to change to load configration err");
+ BT_ERR("Can't get state to change to load configuration err");
return -EBUSY;
}
@@ -455,9 +476,15 @@
return 0;
}
- if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
- BT_ERR("Error loading firmware");
- return -EIO;
+ ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ BT_ERR("Firmware file \"%s\" not found",
+ ATH3K_FIRMWARE);
+ else
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+ ATH3K_FIRMWARE, ret);
+ return ret;
}
ret = ath3k_load_firmware(udev, firmware);
@@ -476,24 +503,13 @@
.probe = ath3k_probe,
.disconnect = ath3k_disconnect,
.id_table = ath3k_table,
+ .disable_hub_initiated_lpm = 1,
};
-static int __init ath3k_init(void)
-{
- BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
- return usb_register(&ath3k_driver);
-}
-
-static void __exit ath3k_exit(void)
-{
- usb_deregister(&ath3k_driver);
-}
-
-module_init(ath3k_init);
-module_exit(ath3k_exit);
+module_usb_driver(ath3k_driver);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("ath3k-1.fw");
+MODULE_FIRMWARE(ATH3K_FIRMWARE);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 8b1b643..364f82b 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -65,6 +66,7 @@
unsigned long state;
struct work_struct work;
+ atomic_t shutdown;
struct urb *urb;
unsigned char *buffer;
@@ -97,6 +99,7 @@
data->state = BCM203X_SELECT_MEMORY;
+ /* use workqueue to have a small delay */
schedule_work(&data->work);
break;
@@ -155,7 +158,10 @@
struct bcm203x_data *data =
container_of(work, struct bcm203x_data, work);
- if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
+ if (atomic_read(&data->shutdown))
+ return;
+
+ if (usb_submit_urb(data->urb, GFP_KERNEL) < 0)
BT_ERR("Can't submit URB");
}
@@ -171,7 +177,7 @@
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
BT_ERR("Can't allocate memory for data structure");
return -ENOMEM;
@@ -183,14 +189,12 @@
data->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!data->urb) {
BT_ERR("Can't allocate URB");
- kfree(data);
return -ENOMEM;
}
if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
BT_ERR("Mini driver request failed");
usb_free_urb(data->urb);
- kfree(data);
return -EIO;
}
@@ -203,7 +207,6 @@
BT_ERR("Can't allocate memory for mini driver");
release_firmware(firmware);
usb_free_urb(data->urb);
- kfree(data);
return -ENOMEM;
}
@@ -218,7 +221,6 @@
BT_ERR("Firmware request failed");
usb_free_urb(data->urb);
kfree(data->buffer);
- kfree(data);
return -EIO;
}
@@ -230,7 +232,6 @@
release_firmware(firmware);
usb_free_urb(data->urb);
kfree(data->buffer);
- kfree(data);
return -ENOMEM;
}
@@ -243,6 +244,7 @@
usb_set_intfdata(intf, data);
+ /* use workqueue to have a small delay */
schedule_work(&data->work);
return 0;
@@ -254,6 +256,9 @@
BT_DBG("intf %p", intf);
+ atomic_inc(&data->shutdown);
+ cancel_work_sync(&data->work);
+
usb_kill_urb(data->urb);
usb_set_intfdata(intf, NULL);
@@ -261,7 +266,6 @@
usb_free_urb(data->urb);
kfree(data->fw_data);
kfree(data->buffer);
- kfree(data);
}
static struct usb_driver bcm203x_driver = {
@@ -269,28 +273,10 @@
.probe = bcm203x_probe,
.disconnect = bcm203x_disconnect,
.id_table = bcm203x_table,
+ .disable_hub_initiated_lpm = 1,
};
-static int __init bcm203x_init(void)
-{
- int err;
-
- BT_INFO("Broadcom Blutonium firmware driver ver %s", VERSION);
-
- err = usb_register(&bcm203x_driver);
- if (err < 0)
- BT_ERR("Failed to register USB driver");
-
- return err;
-}
-
-static void __exit bcm203x_exit(void)
-{
- usb_deregister(&bcm203x_driver);
-}
-
-module_init(bcm203x_init);
-module_exit(bcm203x_exit);
+module_usb_driver(bcm203x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 005919a..995aee9 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -411,7 +411,7 @@
static int bfusb_open(struct hci_dev *hdev)
{
- struct bfusb_data *data = hdev->driver_data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
unsigned long flags;
int i, err;
@@ -437,7 +437,7 @@
static int bfusb_flush(struct hci_dev *hdev)
{
- struct bfusb_data *data = hdev->driver_data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
BT_DBG("hdev %p bfusb %p", hdev, data);
@@ -448,7 +448,7 @@
static int bfusb_close(struct hci_dev *hdev)
{
- struct bfusb_data *data = hdev->driver_data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
unsigned long flags;
BT_DBG("hdev %p bfusb %p", hdev, data);
@@ -483,7 +483,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- data = hdev->driver_data;
+ data = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -544,15 +544,6 @@
return 0;
}
-static void bfusb_destruct(struct hci_dev *hdev)
-{
- struct bfusb_data *data = hdev->driver_data;
-
- BT_DBG("hdev %p bfusb %p", hdev, data);
-
- kfree(data);
-}
-
static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -568,22 +559,23 @@
BT_INFO("BlueFRITZ! USB loading firmware");
+ buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL);
+ if (!buf) {
+ BT_ERR("Can't allocate memory chunk for firmware");
+ return -ENOMEM;
+ }
+
pipe = usb_sndctrlpipe(data->udev, 0);
if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION,
0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) {
BT_ERR("Can't change to loading configuration");
+ kfree(buf);
return -EBUSY;
}
data->udev->toggle[0] = data->udev->toggle[1] = 0;
- buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC);
- if (!buf) {
- BT_ERR("Can't allocate memory chunk for firmware");
- return -ENOMEM;
- }
-
pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
while (count) {
@@ -661,7 +653,7 @@
}
/* Initialize control structure and load firmware */
- data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
+ data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
if (!data) {
BT_ERR("Can't allocate memory for control structure");
goto done;
@@ -682,7 +674,7 @@
if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
BT_ERR("Firmware request failed");
- goto error;
+ goto done;
}
BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -698,28 +690,25 @@
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can't allocate HCI device");
- goto error;
+ goto done;
}
data->hdev = hdev;
hdev->bus = HCI_USB;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
SET_HCIDEV_DEV(hdev, &intf->dev);
hdev->open = bfusb_open;
hdev->close = bfusb_close;
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
- hdev->destruct = bfusb_destruct;
hdev->ioctl = bfusb_ioctl;
- hdev->owner = THIS_MODULE;
-
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
- goto error;
+ goto done;
}
usb_set_intfdata(intf, data);
@@ -729,9 +718,6 @@
release:
release_firmware(firmware);
-error:
- kfree(data);
-
done:
return -EIO;
}
@@ -750,9 +736,7 @@
bfusb_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
@@ -761,28 +745,10 @@
.probe = bfusb_probe,
.disconnect = bfusb_disconnect,
.id_table = bfusb_table,
+ .disable_hub_initiated_lpm = 1,
};
-static int __init bfusb_init(void)
-{
- int err;
-
- BT_INFO("BlueFRITZ! USB driver ver %s", VERSION);
-
- err = usb_register(&bfusb_driver);
- if (err < 0)
- BT_ERR("Failed to register BlueFRITZ! USB driver");
-
- return err;
-}
-
-static void __exit bfusb_exit(void)
-{
- usb_deregister(&bfusb_driver);
-}
-
-module_init(bfusb_init);
-module_exit(bfusb_exit);
+module_usb_driver(bfusb_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 4104b7f..6c3e3d4 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
- register unsigned int offset;
- register unsigned char command;
- register unsigned long ready_bit;
+ unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int offset;
+ unsigned char command;
+ unsigned long ready_bit;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -561,7 +561,7 @@
static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
struct sk_buff *skb;
/* Ericsson baud rate command */
@@ -609,7 +609,7 @@
static int bluecard_hci_flush(struct hci_dev *hdev)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -620,8 +620,7 @@
static int bluecard_hci_open(struct hci_dev *hdev)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
- unsigned int iobase = info->p_dev->resource[0]->start;
+ bluecard_info_t *info = hci_get_drvdata(hdev);
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -630,6 +629,8 @@
return 0;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
+ unsigned int iobase = info->p_dev->resource[0]->start;
+
/* Enable LED */
outb(0x08 | 0x20, iobase + 0x30);
}
@@ -640,8 +641,7 @@
static int bluecard_hci_close(struct hci_dev *hdev)
{
- bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
- unsigned int iobase = info->p_dev->resource[0]->start;
+ bluecard_info_t *info = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
@@ -649,6 +649,8 @@
bluecard_hci_flush(hdev);
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
+ unsigned int iobase = info->p_dev->resource[0]->start;
+
/* Disable LED */
outb(0x00, iobase + 0x30);
}
@@ -667,7 +669,7 @@
return -ENODEV;
}
- info = (bluecard_info_t *)(hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -679,7 +681,7 @@
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
- };
+ }
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -691,11 +693,6 @@
}
-static void bluecard_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -734,18 +731,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = bluecard_hci_open;
hdev->close = bluecard_hci_close;
hdev->flush = bluecard_hci_flush;
hdev->send = bluecard_hci_send_frame;
- hdev->destruct = bluecard_hci_destruct;
hdev->ioctl = bluecard_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
id = inb(iobase + 0x30);
if ((id & 0x0f) == 0x02)
@@ -844,9 +838,7 @@
/* Turn FPGA off */
outb(0x80, iobase + 0x30);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
@@ -857,7 +849,7 @@
bluecard_info_t *info;
/* Create new info device */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -872,10 +864,7 @@
static void bluecard_detach(struct pcmcia_device *link)
{
- bluecard_info_t *info = link->priv;
-
bluecard_release(link);
- kfree(info);
}
@@ -930,7 +919,7 @@
pcmcia_disable_device(link);
}
-static struct pcmcia_device_id bluecard_ids[] = {
+static const struct pcmcia_device_id bluecard_ids[] = {
PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e),
PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c),
PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab),
@@ -945,17 +934,4 @@
.remove = bluecard_detach,
.id_table = bluecard_ids,
};
-
-static int __init init_bluecard_cs(void)
-{
- return pcmcia_register_driver(&bluecard_driver);
-}
-
-
-static void __exit exit_bluecard_cs(void)
-{
- pcmcia_unregister_driver(&bluecard_driver);
-}
-
-module_init(init_bluecard_cs);
-module_exit(exit_bluecard_cs);
+module_pcmcia_driver(bluecard_driver);
diff --git a/drivers/bluetooth/bluesleep.c b/drivers/bluetooth/bluesleep.c
index 6dc9862..8133b55 100644
--- a/drivers/bluetooth/bluesleep.c
+++ b/drivers/bluetooth/bluesleep.c
@@ -1,30 +1,33 @@
/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2 as
- published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
- Copyright (C) 2006-2007 - Motorola
- Copyright (c) 2008-2010, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2006-2007 - Motorola
+ * Copyright (c) 2008-2010, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, LGE Inc.
- Date Author Comment
- ----------- -------------- --------------------------------
- 2006-Apr-28 Motorola The kernel module for running the Bluetooth(R)
- Sleep-Mode Protocol from the Host side
- 2006-Sep-08 Motorola Added workqueue for handling sleep work.
- 2007-Jan-24 Motorola Added mbm_handle_ioi() call to ISR.
+ * Date Author Comment
+ * ----------- -------------- --------------------------------
+ * 2006-Apr-28 Motorola The kernel module for running the Bluetooth(R)
+ * Sleep-Mode Protocol from the Host side
+ * 2006-Sep-08 Motorola Added workqueue for handling sleep work.
+ * 2007-Jan-24 Motorola Added mbm_handle_ioi() call to ISR.
+ * 2009-Aug-10 Motorola Changed "add_timer" to "mod_timer" to solve
+ * race when flurry of queued work comes in.
+ */
-*/
+#define pr_fmt(fmt) "Bluetooth: %s: " fmt, __func__
#include <linux/module.h> /* kernel module definitions */
#include <linux/errno.h>
@@ -41,11 +44,15 @@
#include <linux/platform_device.h>
#include <linux/irq.h>
+#include <linux/ioport.h>
#include <linux/param.h>
#include <linux/bitops.h>
#include <linux/termios.h>
-#include <mach/gpio.h>
-#include <mach/msm_serial_hs.h>
+#include <linux/wakelock.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/serial_core.h>
+#include <linux/platform_data/msm_serial_hs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h> /* event notifications */
@@ -59,14 +66,37 @@
* Defines
*/
-#define VERSION "1.1"
+#define VERSION "1.2"
#define PROC_DIR "bluetooth/sleep"
+#define POLARITY_LOW 0
+#define POLARITY_HIGH 1
+
+#define BT_PORT_ID 0
+
+/* enable/disable wake-on-bluetooth */
+#define BT_ENABLE_IRQ_WAKE 1
+
+#define BT_BLUEDROID_SUPPORT 1
+
+enum {
+ DEBUG_USER_STATE = 1U << 0,
+ DEBUG_SUSPEND = 1U << 1,
+ DEBUG_BTWAKE = 1U << 2,
+ DEBUG_VERBOSE = 1U << 3,
+};
+
+static int debug_mask = DEBUG_USER_STATE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
struct bluesleep_info {
unsigned host_wake;
unsigned ext_wake;
unsigned host_wake_irq;
struct uart_port *uport;
+ struct wake_lock wake_lock;
+ int irq_polarity;
+ int has_ext_wake;
};
/* work function */
@@ -81,17 +111,33 @@
#define bluesleep_rx_idle() schedule_delayed_work(&sleep_workqueue, 0)
#define bluesleep_tx_idle() schedule_delayed_work(&sleep_workqueue, 0)
-/* 1 second timeout */
-#define TX_TIMER_INTERVAL 1
+/* 5 second timeout */
+#define TX_TIMER_INTERVAL 5
/* state variable names and bit positions */
#define BT_PROTO 0x01
#define BT_TXDATA 0x02
#define BT_ASLEEP 0x04
+#define BT_EXT_WAKE 0x08
+#define BT_SUSPEND 0x10
+#define PROC_BTWAKE 0
+#define PROC_HOSTWAKE 1
+#define PROC_PROTO 2
+#define PROC_ASLEEP 3
+#if BT_BLUEDROID_SUPPORT
+#define PROC_LPM 4
+#define PROC_BTWRITE 5
+#endif
+
+#if BT_BLUEDROID_SUPPORT
+static bool has_lpm_enabled;
+#else
/* global pointer to a single hci device. */
static struct hci_dev *bluesleep_hdev;
+#endif
+static struct platform_device *bluesleep_uart_dev;
static struct bluesleep_info *bsi;
/* module usage */
@@ -100,9 +146,12 @@
/*
* Local function prototypes
*/
-
+#if !BT_BLUEDROID_SUPPORT
static int bluesleep_hci_event(struct notifier_block *this,
- unsigned long event, void *data);
+ unsigned long event, void *data);
+#endif
+static int bluesleep_start(void);
+static void bluesleep_stop(void);
/*
* Global variables
@@ -115,15 +164,18 @@
static struct tasklet_struct hostwake_task;
/** Transmission timer */
-static struct timer_list tx_timer;
+static void bluesleep_tx_timer_expire(unsigned long data);
+static DEFINE_TIMER(tx_timer, bluesleep_tx_timer_expire, 0, 0);
/** Lock for state transitions */
static spinlock_t rw_lock;
+#if !BT_BLUEDROID_SUPPORT
/** Notifier block for HCI events */
struct notifier_block hci_event_nblock = {
.notifier_call = bluesleep_hci_event,
};
+#endif
struct proc_dir_entry *bluetooth_dir, *sleep_dir;
@@ -133,6 +185,8 @@
static void hsuart_power(int on)
{
+ if (test_bit(BT_SUSPEND, &flags))
+ return;
if (on) {
msm_hs_request_clock_on(bsi->uport);
msm_hs_set_mctrl(bsi->uport, TIOCM_RTS);
@@ -142,25 +196,30 @@
}
}
-
/**
* @return 1 if the Host can go to sleep, 0 otherwise.
*/
-static inline int bluesleep_can_sleep(void)
+int bluesleep_can_sleep(void)
{
- /* check if MSM_WAKE_BT_GPIO and BT_WAKE_MSM_GPIO are both deasserted */
- return gpio_get_value(bsi->ext_wake) &&
- gpio_get_value(bsi->host_wake) &&
- (bsi->uport != NULL);
+ /* check if WAKE_BT_GPIO and BT_WAKE_GPIO are both deasserted */
+ return ((gpio_get_value(bsi->host_wake) != bsi->irq_polarity) &&
+ (test_bit(BT_EXT_WAKE, &flags)) &&
+ (bsi->uport != NULL));
}
void bluesleep_sleep_wakeup(void)
{
if (test_bit(BT_ASLEEP, &flags)) {
- BT_DBG("waking up...");
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("waking up...\n");
+ wake_lock(&bsi->wake_lock);
/* Start the timer */
mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
- gpio_set_value(bsi->ext_wake, 0);
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
clear_bit(BT_ASLEEP, &flags);
/*Activating UART */
hsuart_power(1);
@@ -176,20 +235,34 @@
if (bluesleep_can_sleep()) {
/* already asleep, this is an error case */
if (test_bit(BT_ASLEEP, &flags)) {
- BT_DBG("already asleep");
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("already asleep\n");
return;
}
if (msm_hs_tx_empty(bsi->uport)) {
- BT_DBG("going to sleep...");
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("going to sleep...\n");
set_bit(BT_ASLEEP, &flags);
/*Deactivating UART */
hsuart_power(0);
+ /* UART clk is not turned off immediately. Release
+ * wakelock after 500 ms.
+ */
+ wake_lock_timeout(&bsi->wake_lock, HZ / 2);
} else {
mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
return;
}
+ } else if (test_bit(BT_EXT_WAKE, &flags)
+ && !test_bit(BT_ASLEEP, &flags)) {
+ mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
} else {
bluesleep_sleep_wakeup();
}
@@ -202,11 +275,11 @@
*/
static void bluesleep_hostwake_task(unsigned long data)
{
- BT_DBG("hostwake line change");
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("hostwake line change\n");
spin_lock(&rw_lock);
-
- if (gpio_get_value(bsi->host_wake))
+ if ((gpio_get_value(bsi->host_wake) == bsi->irq_polarity))
bluesleep_rx_busy();
else
bluesleep_rx_idle();
@@ -227,16 +300,18 @@
/* log data passing by */
set_bit(BT_TXDATA, &flags);
- /* if the tx side is sleeping... */
- if (gpio_get_value(bsi->ext_wake)) {
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
- BT_DBG("tx was sleeping");
+ /* if the tx side is sleeping... */
+ if (test_bit(BT_EXT_WAKE, &flags)) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("tx was sleeping\n");
bluesleep_sleep_wakeup();
}
-
- spin_unlock_irqrestore(&rw_lock, irq_flags);
}
+
+#if !BT_BLUEDROID_SUPPORT
/**
* Handles HCI device events.
* @param this Not used.
@@ -261,11 +336,15 @@
hu = (struct hci_uart *) hdev->driver_data;
state = (struct uart_state *) hu->tty->driver_data;
bsi->uport = state->uart_port;
+ /* if bluetooth started, start bluesleep*/
+ bluesleep_start();
}
break;
case HCI_DEV_UNREG:
+ bluesleep_stop();
bluesleep_hdev = NULL;
bsi->uport = NULL;
+ /* if bluetooth stopped, stop bluesleep also */
break;
case HCI_DEV_WRITE:
bluesleep_outgoing_data();
@@ -274,6 +353,7 @@
return NOTIFY_DONE;
}
+#endif
/**
* Handles transmission timer expiration.
@@ -283,17 +363,24 @@
{
unsigned long irq_flags;
- spin_lock_irqsave(&rw_lock, irq_flags);
+ if (debug_mask & DEBUG_VERBOSE)
+ pr_info("Tx timer expired\n");
- BT_DBG("Tx timer expired");
+ spin_lock_irqsave(&rw_lock, irq_flags);
/* were we silent during the last timeout? */
if (!test_bit(BT_TXDATA, &flags)) {
- BT_DBG("Tx has been idle");
- gpio_set_value(bsi->ext_wake, 1);
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("Tx has been idle\n");
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to sleep\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
bluesleep_tx_idle();
} else {
- BT_DBG("Tx data during last period");
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("Tx data during last period\n");
mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL*HZ));
}
@@ -345,23 +432,20 @@
mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL*HZ));
/* assert BT_WAKE */
- gpio_set_value(bsi->ext_wake, 0);
- retval = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
- "bluetooth hostwake", NULL);
- if (retval < 0) {
- BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
- goto fail;
- }
-
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
+#if BT_ENABLE_IRQ_WAKE
retval = enable_irq_wake(bsi->host_wake_irq);
if (retval < 0) {
BT_ERR("Couldn't enable BT_HOST_WAKE as wakeup interrupt");
- free_irq(bsi->host_wake_irq, NULL);
goto fail;
}
-
+#endif
set_bit(BT_PROTO, &flags);
+ wake_lock(&bsi->wake_lock);
return 0;
fail:
del_timer(&tx_timer);
@@ -385,222 +469,163 @@
}
/* assert BT_WAKE */
- gpio_set_value(bsi->ext_wake, 0);
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
del_timer(&tx_timer);
clear_bit(BT_PROTO, &flags);
if (test_bit(BT_ASLEEP, &flags)) {
clear_bit(BT_ASLEEP, &flags);
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
hsuart_power(1);
+ } else {
+ spin_unlock_irqrestore(&rw_lock, irq_flags);
}
atomic_inc(&open_count);
- spin_unlock_irqrestore(&rw_lock, irq_flags);
+#if BT_ENABLE_IRQ_WAKE
if (disable_irq_wake(bsi->host_wake_irq))
- BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
- free_irq(bsi->host_wake_irq, NULL);
-}
-/**
- * Read the <code>BT_WAKE</code> GPIO pin value via the proc interface.
- * When this function returns, <code>page</code> will contain a 1 if the
- * pin is high, 0 otherwise.
- * @param page Buffer for writing data.
- * @param start Not used.
- * @param offset Not used.
- * @param count Not used.
- * @param eof Whether or not there is more data to be read.
- * @param data Not used.
- * @return The number of bytes written.
- */
-static int bluepower_read_proc_btwake(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- *eof = 1;
- return sprintf(page, "btwake:%u\n", gpio_get_value(bsi->ext_wake));
+ BT_ERR("Couldn't disable hostwake IRQ wakeup mode");
+#endif
+ wake_lock_timeout(&bsi->wake_lock, HZ / 2);
}
-/**
- * Write the <code>BT_WAKE</code> GPIO pin value via the proc interface.
- * @param file Not used.
- * @param buffer The buffer to read from.
- * @param count The number of bytes to be written.
- * @param data Not used.
- * @return On success, the number of bytes written. On error, -1, and
- * <code>errno</code> is set appropriately.
- */
-static int bluepower_write_proc_btwake(struct file *file, const char *buffer,
- unsigned long count, void *data)
+void bluesleep_setup_uart_port(struct platform_device *uart_dev)
{
- char *buf;
+ bluesleep_uart_dev = uart_dev;
+}
- if (count < 1)
- return -EINVAL;
+static int bluesleep_populate_dt_pinfo(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int tmp;
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count)) {
- kfree(buf);
- return -EFAULT;
+ tmp = of_get_named_gpio(np, "bt_host_wake", 0);
+ if (tmp < 0) {
+ BT_ERR("couldn't find host_wake gpio");
+ return -ENODEV;
}
+ bsi->host_wake = tmp;
- if (buf[0] == '0') {
- gpio_set_value(bsi->ext_wake, 0);
- } else if (buf[0] == '1') {
- gpio_set_value(bsi->ext_wake, 1);
- } else {
- kfree(buf);
- return -EINVAL;
- }
-
- kfree(buf);
- return count;
-}
-
-/**
- * Read the <code>BT_HOST_WAKE</code> GPIO pin value via the proc interface.
- * When this function returns, <code>page</code> will contain a 1 if the pin
- * is high, 0 otherwise.
- * @param page Buffer for writing data.
- * @param start Not used.
- * @param offset Not used.
- * @param count Not used.
- * @param eof Whether or not there is more data to be read.
- * @param data Not used.
- * @return The number of bytes written.
- */
-static int bluepower_read_proc_hostwake(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- *eof = 1;
- return sprintf(page, "hostwake: %u \n", gpio_get_value(bsi->host_wake));
-}
-
-
-/**
- * Read the low-power status of the Host via the proc interface.
- * When this function returns, <code>page</code> contains a 1 if the Host
- * is asleep, 0 otherwise.
- * @param page Buffer for writing data.
- * @param start Not used.
- * @param offset Not used.
- * @param count Not used.
- * @param eof Whether or not there is more data to be read.
- * @param data Not used.
- * @return The number of bytes written.
- */
-static int bluesleep_read_proc_asleep(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- unsigned int asleep;
-
- asleep = test_bit(BT_ASLEEP, &flags) ? 1 : 0;
- *eof = 1;
- return sprintf(page, "asleep: %u\n", asleep);
-}
-
-/**
- * Read the low-power protocol being used by the Host via the proc interface.
- * When this function returns, <code>page</code> will contain a 1 if the Host
- * is using the Sleep Mode Protocol, 0 otherwise.
- * @param page Buffer for writing data.
- * @param start Not used.
- * @param offset Not used.
- * @param count Not used.
- * @param eof Whether or not there is more data to be read.
- * @param data Not used.
- * @return The number of bytes written.
- */
-static int bluesleep_read_proc_proto(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- unsigned int proto;
-
- proto = test_bit(BT_PROTO, &flags) ? 1 : 0;
- *eof = 1;
- return sprintf(page, "proto: %u\n", proto);
-}
-
-/**
- * Modify the low-power protocol used by the Host via the proc interface.
- * @param file Not used.
- * @param buffer The buffer to read from.
- * @param count The number of bytes to be written.
- * @param data Not used.
- * @return On success, the number of bytes written. On error, -1, and
- * <code>errno</code> is set appropriately.
- */
-static int bluesleep_write_proc_proto(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char proto;
-
- if (count < 1)
- return -EINVAL;
-
- if (copy_from_user(&proto, buffer, 1))
- return -EFAULT;
-
- if (proto == '0')
- bluesleep_stop();
+ tmp = of_get_named_gpio(np, "bt_ext_wake", 0);
+ if (tmp < 0)
+ bsi->has_ext_wake = 0;
else
- bluesleep_start();
+ bsi->has_ext_wake = 1;
- /* claim that we wrote everything */
- return count;
+ if (bsi->has_ext_wake)
+ bsi->ext_wake = tmp;
+
+ BT_INFO("bt_host_wake %d, bt_ext_wake %d",
+ bsi->host_wake,
+ bsi->ext_wake);
+ return 0;
}
-static int __init bluesleep_probe(struct platform_device *pdev)
+static int bluesleep_populate_pinfo(struct platform_device *pdev)
{
- int ret;
struct resource *res;
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_host_wake");
+ if (!res) {
+ BT_ERR("couldn't find host_wake gpio");
+ return -ENODEV;
+ }
+ bsi->host_wake = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_ext_wake");
+ if (!res)
+ bsi->has_ext_wake = 0;
+ else
+ bsi->has_ext_wake = 1;
+
+ if (bsi->has_ext_wake)
+ bsi->ext_wake = res->start;
+
+ return 0;
+}
+
+static int bluesleep_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
bsi = kzalloc(sizeof(struct bluesleep_info), GFP_KERNEL);
if (!bsi)
return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_IO,
- "gpio_host_wake");
- if (!res) {
- BT_ERR("couldn't find host_wake gpio\n");
- ret = -ENODEV;
+ if (pdev->dev.of_node) {
+ ret = bluesleep_populate_dt_pinfo(pdev);
+ if (ret < 0) {
+ BT_ERR("couldn't populate info from dt");
+ return ret;
+ }
+ } else {
+ ret = bluesleep_populate_pinfo(pdev);
+ if (ret < 0) {
+ BT_ERR("couldn't populate info");
+ return ret;
+ }
+ }
+
+ /* configure host_wake as input */
+ ret = gpio_request_one(bsi->host_wake, GPIOF_IN, "bt_host_wake");
+ if (ret < 0) {
+ BT_ERR("failed to configure input direction for GPIO %d err %d",
+ bsi->host_wake, ret);
goto free_bsi;
}
- bsi->host_wake = res->start;
- ret = gpio_request(bsi->host_wake, "bt_host_wake");
- if (ret)
- goto free_bsi;
- ret = gpio_direction_input(bsi->host_wake);
- if (ret)
- goto free_bt_host_wake;
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake) {
+ /* configure ext_wake as output mode*/
+ ret = gpio_request_one(bsi->ext_wake,
+ GPIOF_OUT_INIT_LOW, "bt_ext_wake");
+ if (ret < 0) {
+ BT_ERR("failed to configure output direction for GPIO %d err %d",
+ bsi->ext_wake, ret);
+ goto free_bt_host_wake;
+ }
+ }
+ clear_bit(BT_EXT_WAKE, &flags);
- res = platform_get_resource_byname(pdev, IORESOURCE_IO,
- "gpio_ext_wake");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "host_wake");
if (!res) {
- BT_ERR("couldn't find ext_wake gpio\n");
+ BT_ERR("couldn't find host_wake irq");
ret = -ENODEV;
goto free_bt_host_wake;
}
- bsi->ext_wake = res->start;
-
- ret = gpio_request(bsi->ext_wake, "bt_ext_wake");
- if (ret)
- goto free_bt_host_wake;
- /* assert bt wake */
- ret = gpio_direction_output(bsi->ext_wake, 0);
- if (ret)
- goto free_bt_ext_wake;
-
- bsi->host_wake_irq = platform_get_irq_byname(pdev, "host_wake");
+ bsi->host_wake_irq = res->start;
if (bsi->host_wake_irq < 0) {
- BT_ERR("couldn't find host_wake irq\n");
+ BT_ERR("couldn't find host_wake irq");
ret = -ENODEV;
goto free_bt_ext_wake;
}
+ bsi->irq_polarity = POLARITY_LOW;/*low edge (falling edge)*/
+
+ wake_lock_init(&bsi->wake_lock, WAKE_LOCK_SUSPEND, "bluesleep");
+ clear_bit(BT_SUSPEND, &flags);
+
+ BT_INFO("host_wake_irq %d, polarity %d",
+ bsi->host_wake_irq,
+ bsi->irq_polarity);
+
+ ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ "bluetooth hostwake", NULL);
+ if (ret < 0) {
+ BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
+ goto free_bt_ext_wake;
+ }
return 0;
@@ -615,30 +640,160 @@
static int bluesleep_remove(struct platform_device *pdev)
{
- /* assert bt wake */
- gpio_set_value(bsi->ext_wake, 0);
- if (test_bit(BT_PROTO, &flags)) {
- if (disable_irq_wake(bsi->host_wake_irq))
- BT_ERR("Couldn't disable hostwake IRQ wakeup mode \n");
- free_irq(bsi->host_wake_irq, NULL);
- del_timer(&tx_timer);
- if (test_bit(BT_ASLEEP, &flags))
- hsuart_power(1);
- }
-
+ free_irq(bsi->host_wake_irq, NULL);
gpio_free(bsi->host_wake);
gpio_free(bsi->ext_wake);
+ wake_lock_destroy(&bsi->wake_lock);
kfree(bsi);
return 0;
}
+
+static int bluesleep_resume(struct platform_device *pdev)
+{
+ if (test_bit(BT_SUSPEND, &flags)) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("bluesleep resuming...\n");
+ if ((bsi->uport != NULL) &&
+ (gpio_get_value(bsi->host_wake) == bsi->irq_polarity)) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("bluesleep resume from BT event...\n");
+ msm_hs_request_clock_on(bsi->uport);
+ msm_hs_set_mctrl(bsi->uport, TIOCM_RTS);
+ }
+ clear_bit(BT_SUSPEND, &flags);
+ }
+ return 0;
+}
+
+static int bluesleep_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("bluesleep suspending...\n");
+ set_bit(BT_SUSPEND, &flags);
+ return 0;
+}
+
+static struct of_device_id bluesleep_match_table[] = {
+ { .compatible = "qcom,bluesleep" },
+ {}
+};
+
static struct platform_driver bluesleep_driver = {
+ .probe = bluesleep_probe,
.remove = bluesleep_remove,
+ .suspend = bluesleep_suspend,
+ .resume = bluesleep_resume,
.driver = {
.name = "bluesleep",
.owner = THIS_MODULE,
+ .of_match_table = bluesleep_match_table,
},
};
+
+static int bluesleep_proc_show(struct seq_file *m, void *v)
+{
+ switch ((long)m->private) {
+ case PROC_BTWAKE:
+ seq_printf(m, "btwake:%u\n", test_bit(BT_EXT_WAKE, &flags));
+ break;
+ case PROC_HOSTWAKE:
+ seq_printf(m, "hostwake: %u\n", gpio_get_value(bsi->host_wake));
+ break;
+ case PROC_PROTO:
+ seq_printf(m, "proto: %u\n",
+ test_bit(BT_PROTO, &flags) ? 1 : 0);
+ break;
+ case PROC_ASLEEP:
+ seq_printf(m, "asleep: %u\n",
+ test_bit(BT_ASLEEP, &flags) ? 1 : 0);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static ssize_t bluesleep_proc_write(struct file *file, const char *buf,
+ size_t count, loff_t *pos)
+{
+ void *data = PDE_DATA(file_inode(file));
+ char lbuf[32];
+
+ if (count >= sizeof(lbuf))
+ count = sizeof(lbuf)-1;
+
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+ lbuf[count] = 0;
+
+ switch ((long)data) {
+ case PROC_BTWAKE:
+ if (lbuf[0] == '0') {
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
+ } else if (buf[0] == '1') {
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to sleep\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 1);
+ set_bit(BT_EXT_WAKE, &flags);
+ }
+ break;
+ case PROC_PROTO:
+ if (lbuf[0] == '0')
+ bluesleep_stop();
+ else
+ bluesleep_start();
+ break;
+ case PROC_LPM:
+ if (lbuf[0] == '0') {
+ /* HCI_DEV_UNREG */
+ bluesleep_stop();
+ has_lpm_enabled = false;
+ bsi->uport = NULL;
+ } else {
+ /* HCI_DEV_REG */
+ if (!has_lpm_enabled) {
+ has_lpm_enabled = true;
+ bsi->uport = msm_hs_get_uart_port(BT_PORT_ID);
+ /* if bluetooth started, start bluesleep*/
+ bluesleep_start();
+ }
+ }
+ break;
+ case PROC_BTWRITE:
+ /* HCI_DEV_WRITE */
+ if (lbuf[0] != '0')
+ bluesleep_outgoing_data();
+ break;
+ default:
+ return 0;
+ }
+
+ return count;
+}
+
+static int bluesleep_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bluesleep_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations bluesleep_proc_readwrite_fops = {
+ .owner = THIS_MODULE,
+ .open = bluesleep_proc_open,
+ .read = seq_read,
+ .write = bluesleep_proc_write,
+};
+static const struct file_operations bluesleep_proc_read_fops = {
+ .owner = THIS_MODULE,
+ .open = bluesleep_proc_open,
+ .read = seq_read,
+};
+
/**
* Initializes the module.
* @return On success, 0. On error, -1, and <code>errno</code> is set
@@ -649,13 +804,18 @@
int retval;
struct proc_dir_entry *ent;
- BT_INFO("MSM Sleep Mode Driver Ver %s", VERSION);
+ BT_INFO("BlueSleep Mode Driver Ver %s", VERSION);
- retval = platform_driver_probe(&bluesleep_driver, bluesleep_probe);
+ retval = platform_driver_register(&bluesleep_driver);
if (retval)
return retval;
+ if (bsi == NULL)
+ return 0;
+
+#if !BT_BLUEDROID_SUPPORT
bluesleep_hdev = NULL;
+#endif
bluetooth_dir = proc_mkdir("bluetooth", NULL);
if (bluetooth_dir == NULL) {
@@ -670,41 +830,68 @@
}
/* Creating read/write "btwake" entry */
- ent = create_proc_entry("btwake", 0, sleep_dir);
+ ent = proc_create_data("btwake", S_IRUGO | S_IWUSR | S_IWGRP,
+ sleep_dir, &bluesleep_proc_readwrite_fops,
+ (void *)PROC_BTWAKE);
if (ent == NULL) {
BT_ERR("Unable to create /proc/%s/btwake entry", PROC_DIR);
retval = -ENOMEM;
goto fail;
}
- ent->read_proc = bluepower_read_proc_btwake;
- ent->write_proc = bluepower_write_proc_btwake;
/* read only proc entries */
- if (create_proc_read_entry("hostwake", 0, sleep_dir,
- bluepower_read_proc_hostwake, NULL) == NULL) {
+ ent = proc_create_data("hostwake", S_IRUGO, sleep_dir,
+ &bluesleep_proc_read_fops,
+ (void *)PROC_HOSTWAKE);
+ if (ent == NULL) {
BT_ERR("Unable to create /proc/%s/hostwake entry", PROC_DIR);
retval = -ENOMEM;
goto fail;
}
/* read/write proc entries */
- ent = create_proc_entry("proto", 0, sleep_dir);
+ ent = proc_create_data("proto", S_IRUGO | S_IWUSR | S_IWGRP,
+ sleep_dir, &bluesleep_proc_readwrite_fops,
+ (void *)PROC_PROTO);
if (ent == NULL) {
BT_ERR("Unable to create /proc/%s/proto entry", PROC_DIR);
retval = -ENOMEM;
goto fail;
}
- ent->read_proc = bluesleep_read_proc_proto;
- ent->write_proc = bluesleep_write_proc_proto;
/* read only proc entries */
- if (create_proc_read_entry("asleep", 0,
- sleep_dir, bluesleep_read_proc_asleep, NULL) == NULL) {
+ ent = proc_create_data("asleep", S_IRUGO,
+ sleep_dir, &bluesleep_proc_read_fops,
+ (void *)PROC_ASLEEP);
+ if (ent == NULL) {
BT_ERR("Unable to create /proc/%s/asleep entry", PROC_DIR);
retval = -ENOMEM;
goto fail;
}
+#if BT_BLUEDROID_SUPPORT
+ /* read/write proc entries */
+ ent = proc_create_data("lpm", S_IRUGO | S_IWUSR | S_IWGRP,
+ sleep_dir, &bluesleep_proc_readwrite_fops,
+ (void *)PROC_LPM);
+ if (ent == NULL) {
+ BT_ERR("Unable to create /proc/%s/lpm entry", PROC_DIR);
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ /* read/write proc entries */
+ ent = proc_create_data("btwrite", S_IRUGO | S_IWUSR | S_IWGRP,
+ sleep_dir, &bluesleep_proc_readwrite_fops,
+ (void *)PROC_BTWRITE);
+ if (ent == NULL) {
+ BT_ERR("Unable to create /proc/%s/btwrite entry", PROC_DIR);
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+#endif
+
flags = 0; /* clear all status bits */
/* Initialize spinlock. */
@@ -718,11 +905,23 @@
/* initialize host wake tasklet */
tasklet_init(&hostwake_task, bluesleep_hostwake_task, 0);
+ /* assert bt wake */
+ if (debug_mask & DEBUG_BTWAKE)
+ pr_info("BT WAKE: set to wake\n");
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
+#if !BT_BLUEDROID_SUPPORT
hci_register_notifier(&hci_event_nblock);
+#endif
return 0;
fail:
+#if BT_BLUEDROID_SUPPORT
+ remove_proc_entry("btwrite", sleep_dir);
+ remove_proc_entry("lpm", sleep_dir);
+#endif
remove_proc_entry("asleep", sleep_dir);
remove_proc_entry("proto", sleep_dir);
remove_proc_entry("hostwake", sleep_dir);
@@ -737,9 +936,31 @@
*/
static void __exit bluesleep_exit(void)
{
+ if (bsi == NULL)
+ return;
+
+ /* assert bt wake */
+ if (bsi->has_ext_wake == 1)
+ gpio_set_value(bsi->ext_wake, 0);
+ clear_bit(BT_EXT_WAKE, &flags);
+ if (test_bit(BT_PROTO, &flags)) {
+ if (disable_irq_wake(bsi->host_wake_irq))
+ BT_ERR("Couldn't disable hostwake IRQ wakeup mode");
+ free_irq(bsi->host_wake_irq, NULL);
+ del_timer(&tx_timer);
+ if (test_bit(BT_ASLEEP, &flags))
+ hsuart_power(1);
+ }
+
+#if !BT_BLUEDROID_SUPPORT
hci_unregister_notifier(&hci_event_nblock);
+#endif
platform_driver_unregister(&bluesleep_driver);
+#if BT_BLUEDROID_SUPPORT
+ remove_proc_entry("btwrite", sleep_dir);
+ remove_proc_entry("lpm", sleep_dir);
+#endif
remove_proc_entry("asleep", sleep_dir);
remove_proc_entry("proto", sleep_dir);
remove_proc_entry("hostwake", sleep_dir);
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 718df02..0511422 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2010, 2013-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,15 +20,239 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/bluetooth-power.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#define BT_PWR_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_PWR_INFO(fmt, arg...) pr_info("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_PWR_ERR(fmt, arg...) pr_err("%s: " fmt "\n" , __func__ , ## arg)
+
+
+static struct of_device_id bt_power_match_table[] = {
+ { .compatible = "qca,ar3002" },
+ { .compatible = "qca,qca6174" },
+ {}
+};
+
+static struct bluetooth_power_platform_data *bt_power_pdata;
+static struct platform_device *btpdev;
static bool previous;
+static int bt_vreg_init(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+ struct device *dev = &btpdev->dev;
+
+ BT_PWR_DBG("vreg_get for : %s", vreg->name);
+
+ /* Get the regulator handle */
+ vreg->reg = regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ rc = PTR_ERR(vreg->reg);
+ pr_err("%s: regulator_get(%s) failed. rc=%d\n",
+ __func__, vreg->name, rc);
+ goto out;
+ }
+
+ if ((regulator_count_voltages(vreg->reg) > 0)
+ && (vreg->low_vol_level) && (vreg->high_vol_level))
+ vreg->set_voltage_sup = 1;
+
+out:
+ return rc;
+}
+
+static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+
+ BT_PWR_DBG("vreg_en for : %s", vreg->name);
+
+ if (!vreg->is_enabled) {
+ if (vreg->set_voltage_sup) {
+ rc = regulator_set_voltage(vreg->reg,
+ vreg->low_vol_level,
+ vreg->high_vol_level);
+ if (rc < 0) {
+ BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+ vreg->name, rc);
+ goto out;
+ }
+ }
+
+ rc = regulator_enable(vreg->reg);
+ if (rc < 0) {
+ BT_PWR_ERR("regulator_enable(%s) failed. rc=%d\n",
+ vreg->name, rc);
+ goto out;
+ }
+ vreg->is_enabled = true;
+ }
+out:
+ return rc;
+}
+
+static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+
+ if (!vreg)
+ return rc;
+
+ BT_PWR_DBG("vreg_disable for : %s", vreg->name);
+
+ if (vreg->is_enabled) {
+ rc = regulator_disable(vreg->reg);
+ if (rc < 0) {
+ BT_PWR_ERR("regulator_disable(%s) failed. rc=%d\n",
+ vreg->name, rc);
+ goto out;
+ }
+ vreg->is_enabled = false;
+
+ if (vreg->set_voltage_sup) {
+ /* Set the min voltage to 0 */
+ rc = regulator_set_voltage(vreg->reg,
+ 0,
+ vreg->high_vol_level);
+ if (rc < 0) {
+ BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+ vreg->name, rc);
+ goto out;
+
+ }
+ }
+ }
+out:
+ return rc;
+}
+
+static int bt_configure_vreg(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+
+ BT_PWR_DBG("config %s", vreg->name);
+
+ /* Get the regulator handle for vreg */
+ if (!(vreg->reg)) {
+ rc = bt_vreg_init(vreg);
+ if (rc < 0)
+ return rc;
+ }
+ rc = bt_vreg_enable(vreg);
+
+ return rc;
+}
+
+static int bt_configure_gpios(int on)
+{
+ int rc = 0;
+ int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst;
+
+ BT_PWR_DBG("%s bt_gpio= %d on: %d", __func__, bt_reset_gpio, on);
+
+ if (on) {
+ rc = gpio_request(bt_reset_gpio, "bt_sys_rst_n");
+ if (rc) {
+ BT_PWR_ERR("unable to request gpio %d (%d)\n",
+ bt_reset_gpio, rc);
+ return rc;
+ }
+
+ rc = gpio_direction_output(bt_reset_gpio, 0);
+ if (rc) {
+ BT_PWR_ERR("Unable to set direction\n");
+ return rc;
+ }
+ msleep(50);
+ rc = gpio_direction_output(bt_reset_gpio, 1);
+ if (rc) {
+ BT_PWR_ERR("Unable to set direction\n");
+ return rc;
+ }
+ msleep(50);
+ } else {
+ gpio_set_value(bt_reset_gpio, 0);
+ msleep(100);
+ }
+ return rc;
+}
+
+static int bluetooth_power(int on)
+{
+ int rc = 0;
+
+ BT_PWR_DBG("on: %d", on);
+
+ if (on) {
+ if (bt_power_pdata->bt_vdd_io) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_vdd_io);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddio config failed");
+ goto out;
+ }
+ }
+ if (bt_power_pdata->bt_vdd_pa) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_vdd_pa);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddpa config failed");
+ goto vdd_pa_fail;
+ }
+ }
+ if (bt_power_pdata->bt_vdd_ldo) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_vdd_ldo);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddldo config failed");
+ goto vdd_ldo_fail;
+ }
+ }
+ if (bt_power_pdata->bt_chip_pwd) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddldo config failed");
+ goto chip_pwd_fail;
+ }
+ }
+ if (bt_power_pdata->bt_gpio_sys_rst) {
+ rc = bt_configure_gpios(on);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power gpio config failed");
+ goto gpio_fail;
+ }
+ }
+ } else {
+ bt_configure_gpios(on);
+gpio_fail:
+ if (bt_power_pdata->bt_gpio_sys_rst)
+ gpio_free(bt_power_pdata->bt_gpio_sys_rst);
+ bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
+chip_pwd_fail:
+ bt_vreg_disable(bt_power_pdata->bt_vdd_pa);
+vdd_pa_fail:
+ bt_vreg_disable(bt_power_pdata->bt_vdd_ldo);
+vdd_ldo_fail:
+ bt_vreg_disable(bt_power_pdata->bt_vdd_io);
+ }
+
+out:
+ return rc;
+}
+
static int bluetooth_toggle_radio(void *data, bool blocked)
{
int ret = 0;
int (*power_control)(int enable);
- power_control = data;
+ power_control =
+ ((struct bluetooth_power_platform_data *)data)->bt_power_setup;
+
if (previous != blocked)
ret = (*power_control)(!blocked);
if (!ret)
@@ -40,6 +264,33 @@
.set_block = bluetooth_toggle_radio,
};
+#ifdef CONFIG_CNSS
+static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ bool enable = false;
+ struct cnss_platform_cap cap;
+
+ ret = cnss_get_platform_cap(&cap);
+ if (ret) {
+ BT_PWR_ERR("Platform capability info from CNSS not available!");
+ enable = false;
+ } else if (!ret && (cap.cap_flag & CNSS_HAS_EXTERNAL_SWREG)) {
+ enable = true;
+ }
+ return snprintf(buf, 6, "%s", (enable ? "true" : "false"));
+}
+#else
+static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 6, "%s", "false");
+}
+#endif
+
+static DEVICE_ATTR(extldo, S_IRUGO, enable_extldo, NULL);
+
static int bluetooth_power_rfkill_probe(struct platform_device *pdev)
{
struct rfkill *rfkill;
@@ -54,6 +305,11 @@
return -ENOMEM;
}
+ /* add file into rfkill0 to handle LDO27 */
+ ret = device_create_file(&pdev->dev, &dev_attr_extldo);
+ if (ret < 0)
+ BT_PWR_ERR("device create file error!");
+
/* force Bluetooth off during init to allow for user control */
rfkill_init_sw_state(rfkill, 1);
previous = 1;
@@ -83,37 +339,170 @@
platform_set_drvdata(pdev, NULL);
}
-static int __devinit bt_power_probe(struct platform_device *pdev)
+#define MAX_PROP_SIZE 32
+static int bt_dt_parse_vreg_info(struct device *dev,
+ struct bt_power_vreg_data **vreg_data, const char *vreg_name)
+{
+ int len, ret = 0;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ struct bt_power_vreg_data *vreg;
+ struct device_node *np = dev->of_node;
+
+ BT_PWR_DBG("vreg dev tree parse for %s", vreg_name);
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+ if (of_parse_phandle(np, prop_name, 0)) {
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ vreg->name = vreg_name;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "%s-voltage-level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->low_vol_level = be32_to_cpup(&prop[0]);
+ vreg->high_vol_level = be32_to_cpup(&prop[1]);
+ }
+
+ *vreg_data = vreg;
+ BT_PWR_DBG("%s: vol=[%d %d]uV\n",
+ vreg->name, vreg->low_vol_level,
+ vreg->high_vol_level);
+ } else
+ BT_PWR_INFO("%s: is not provided in device tree", vreg_name);
+
+err:
+ return ret;
+}
+
+static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
+{
+ int rc;
+
+ BT_PWR_DBG("");
+
+ if (!bt_power_pdata)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ bt_power_pdata->bt_gpio_sys_rst =
+ of_get_named_gpio(pdev->dev.of_node,
+ "qca,bt-reset-gpio", 0);
+ if (bt_power_pdata->bt_gpio_sys_rst < 0) {
+ BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+ return bt_power_pdata->bt_gpio_sys_rst;
+ }
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_vdd_io,
+ "qca,bt-vdd-io");
+ if (rc < 0)
+ return rc;
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_vdd_pa,
+ "qca,bt-vdd-pa");
+ if (rc < 0)
+ return rc;
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_vdd_ldo,
+ "qca,bt-vdd-ldo");
+ if (rc < 0)
+ return rc;
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_chip_pwd,
+ "qca,bt-chip-pwd");
+ if (rc < 0)
+ return rc;
+
+ }
+
+ bt_power_pdata->bt_power_setup = bluetooth_power;
+
+ return 0;
+}
+
+static int bt_power_probe(struct platform_device *pdev)
{
int ret = 0;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (!pdev->dev.platform_data) {
- dev_err(&pdev->dev, "platform data not initialized\n");
- return -ENOSYS;
+ bt_power_pdata =
+ kzalloc(sizeof(struct bluetooth_power_platform_data),
+ GFP_KERNEL);
+
+ if (!bt_power_pdata) {
+ BT_PWR_ERR("Failed to allocate memory");
+ return -ENOMEM;
}
- ret = bluetooth_power_rfkill_probe(pdev);
+ if (pdev->dev.of_node) {
+ ret = bt_power_populate_dt_pinfo(pdev);
+ if (ret < 0) {
+ BT_PWR_ERR("Failed to populate device tree info");
+ goto free_pdata;
+ }
+ pdev->dev.platform_data = bt_power_pdata;
+ } else if (pdev->dev.platform_data) {
+ /* Optional data set to default if not provided */
+ if (!((struct bluetooth_power_platform_data *)
+ (pdev->dev.platform_data))->bt_power_setup)
+ ((struct bluetooth_power_platform_data *)
+ (pdev->dev.platform_data))->bt_power_setup =
+ bluetooth_power;
+ memcpy(bt_power_pdata, pdev->dev.platform_data,
+ sizeof(struct bluetooth_power_platform_data));
+ } else {
+ BT_PWR_ERR("Failed to get platform data");
+ goto free_pdata;
+ }
+
+ if (bluetooth_power_rfkill_probe(pdev) < 0)
+ goto free_pdata;
+
+ btpdev = pdev;
+
+ return 0;
+
+free_pdata:
+ kfree(bt_power_pdata);
return ret;
}
-static int __devexit bt_power_remove(struct platform_device *pdev)
+static int bt_power_remove(struct platform_device *pdev)
{
dev_dbg(&pdev->dev, "%s\n", __func__);
bluetooth_power_rfkill_remove(pdev);
+ if (bt_power_pdata->bt_chip_pwd->reg)
+ regulator_put(bt_power_pdata->bt_chip_pwd->reg);
+
+ kfree(bt_power_pdata);
+
return 0;
}
static struct platform_driver bt_power_driver = {
.probe = bt_power_probe,
- .remove = __devexit_p(bt_power_remove),
+ .remove = bt_power_remove,
.driver = {
.name = "bt_power",
.owner = THIS_MODULE,
+ .of_match_table = bt_power_match_table,
},
};
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 751b338..2fe4a80 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -66,7 +66,7 @@
static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s queue %d buffer %p count %d", hdev->name,
queue, buf, count);
@@ -189,7 +189,7 @@
static void bpa10x_rx_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -219,7 +219,7 @@
static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -260,7 +260,7 @@
static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -301,7 +301,7 @@
static int bpa10x_open(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -329,7 +329,7 @@
static int bpa10x_close(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -343,7 +343,7 @@
static int bpa10x_flush(struct hci_dev *hdev)
{
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -355,7 +355,7 @@
static int bpa10x_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct bpa10x_data *data = hdev->driver_data;
+ struct bpa10x_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
unsigned int pipe;
@@ -432,17 +432,6 @@
return 0;
}
-static void bpa10x_destruct(struct hci_dev *hdev)
-{
- struct bpa10x_data *data = hdev->driver_data;
-
- BT_DBG("%s", hdev->name);
-
- kfree_skb(data->rx_skb[0]);
- kfree_skb(data->rx_skb[1]);
- kfree(data);
-}
-
static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct bpa10x_data *data;
@@ -454,7 +443,7 @@
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -464,13 +453,11 @@
init_usb_anchor(&data->rx_anchor);
hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
+ if (!hdev)
return -ENOMEM;
- }
hdev->bus = HCI_USB;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
data->hdev = hdev;
@@ -480,16 +467,12 @@
hdev->close = bpa10x_close;
hdev->flush = bpa10x_flush;
hdev->send = bpa10x_send_frame;
- hdev->destruct = bpa10x_destruct;
- hdev->owner = THIS_MODULE;
-
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
err = hci_register_dev(hdev);
if (err < 0) {
hci_free_dev(hdev);
- kfree(data);
return err;
}
@@ -512,6 +495,8 @@
hci_unregister_dev(data->hdev);
hci_free_dev(data->hdev);
+ kfree_skb(data->rx_skb[0]);
+ kfree_skb(data->rx_skb[1]);
}
static struct usb_driver bpa10x_driver = {
@@ -519,22 +504,10 @@
.probe = bpa10x_probe,
.disconnect = bpa10x_disconnect,
.id_table = bpa10x_table,
+ .disable_hub_initiated_lpm = 1,
};
-static int __init bpa10x_init(void)
-{
- BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION);
-
- return usb_register(&bpa10x_driver);
-}
-
-static void __exit bpa10x_exit(void)
-{
- usb_deregister(&bpa10x_driver);
-}
-
-module_init(bpa10x_init);
-module_exit(bpa10x_exit);
+module_usb_driver(bpa10x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 0c8a655..a1aaa3b 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -39,7 +39,6 @@
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <linux/device.h>
@@ -187,9 +186,9 @@
return;
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
if (!pcmcia_dev_present(info->p_dev))
break;
@@ -389,7 +388,7 @@
static int bt3c_hci_flush(struct hci_dev *hdev)
{
- bt3c_info_t *info = (bt3c_info_t *)(hdev->driver_data);
+ bt3c_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -428,7 +427,7 @@
return -ENODEV;
}
- info = (bt3c_info_t *) (hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -456,11 +455,6 @@
}
-static void bt3c_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -580,18 +574,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = bt3c_hci_open;
hdev->close = bt3c_hci_close;
hdev->flush = bt3c_hci_flush;
hdev->send = bt3c_hci_send_frame;
- hdev->destruct = bt3c_hci_destruct;
hdev->ioctl = bt3c_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
/* Load firmware */
err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
if (err < 0) {
@@ -636,9 +627,7 @@
bt3c_hci_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
@@ -649,7 +638,7 @@
bt3c_info_t *info;
/* Create new info device */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -665,17 +654,14 @@
static void bt3c_detach(struct pcmcia_device *link)
{
- bt3c_info_t *info = link->priv;
-
bt3c_release(link);
- kfree(info);
}
static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
- if (try == 0)
+ if (!try)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
@@ -761,7 +747,7 @@
}
-static struct pcmcia_device_id bt3c_ids[] = {
+static const struct pcmcia_device_id bt3c_ids[] = {
PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02),
PCMCIA_DEVICE_NULL
};
@@ -774,17 +760,4 @@
.remove = bt3c_detach,
.id_table = bt3c_ids,
};
-
-static int __init init_bt3c_cs(void)
-{
- return pcmcia_register_driver(&bt3c_driver);
-}
-
-
-static void __exit exit_bt3c_cs(void)
-{
- pcmcia_unregister_driver(&bt3c_driver);
-}
-
-module_init(init_bt3c_cs);
-module_exit(exit_bt3c_cs);
+module_pcmcia_driver(bt3c_driver);
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305b..db2c3c3 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -29,28 +29,8 @@
struct btmrvl_debugfs_data {
struct dentry *config_dir;
struct dentry *status_dir;
-
- /* config */
- struct dentry *psmode;
- struct dentry *pscmd;
- struct dentry *hsmode;
- struct dentry *hscmd;
- struct dentry *gpiogap;
- struct dentry *hscfgcmd;
-
- /* status */
- struct dentry *curpsmode;
- struct dentry *hsstate;
- struct dentry *psstate;
- struct dentry *txdnldready;
};
-static int btmrvl_open_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t btmrvl_hscfgcmd_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
@@ -64,6 +44,8 @@
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hscfgcmd = result;
@@ -91,46 +73,7 @@
static const struct file_operations btmrvl_hscfgcmd_fops = {
.read = btmrvl_hscfgcmd_read,
.write = btmrvl_hscfgcmd_write,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- long result, ret;
-
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = strict_strtol(buf, 10, &result);
-
- priv->btmrvl_dev.psmode = result;
-
- return count;
-}
-
-static ssize_t btmrvl_psmode_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "%d\n",
- priv->btmrvl_dev.psmode);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_psmode_fops = {
- .read = btmrvl_psmode_read,
- .write = btmrvl_psmode_write,
- .open = btmrvl_open_generic,
+ .open = simple_open,
.llseek = default_llseek,
};
@@ -147,6 +90,8 @@
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.pscmd = result;
@@ -174,46 +119,7 @@
static const struct file_operations btmrvl_pscmd_fops = {
.read = btmrvl_pscmd_read,
.write = btmrvl_pscmd_write,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- long result, ret;
-
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = strict_strtol(buf, 16, &result);
-
- priv->btmrvl_dev.gpio_gap = result;
-
- return count;
-}
-
-static ssize_t btmrvl_gpiogap_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "0x%x\n",
- priv->btmrvl_dev.gpio_gap);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_gpiogap_fops = {
- .read = btmrvl_gpiogap_read,
- .write = btmrvl_gpiogap_write,
- .open = btmrvl_open_generic,
+ .open = simple_open,
.llseek = default_llseek,
};
@@ -230,6 +136,8 @@
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hscmd = result;
if (priv->btmrvl_dev.hscmd) {
@@ -255,124 +163,13 @@
static const struct file_operations btmrvl_hscmd_fops = {
.read = btmrvl_hscmd_read,
.write = btmrvl_hscmd_write,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- long result, ret;
-
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = strict_strtol(buf, 10, &result);
-
- priv->btmrvl_dev.hsmode = result;
-
- return count;
-}
-
-static ssize_t btmrvl_hsmode_read(struct file *file, char __user * userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hsmode);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_hsmode_fops = {
- .read = btmrvl_hsmode_read,
- .write = btmrvl_hsmode_write,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->psmode);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_curpsmode_fops = {
- .read = btmrvl_curpsmode_read,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->ps_state);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_psstate_fops = {
- .read = btmrvl_psstate_read,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->hs_state);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_hsstate_fops = {
- .read = btmrvl_hsstate_read,
- .open = btmrvl_open_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct btmrvl_private *priv = file->private_data;
- char buf[16];
- int ret;
-
- ret = snprintf(buf, sizeof(buf) - 1, "%d\n",
- priv->btmrvl_dev.tx_dnld_rdy);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
-}
-
-static const struct file_operations btmrvl_txdnldready_fops = {
- .read = btmrvl_txdnldready_read,
- .open = btmrvl_open_generic,
+ .open = simple_open,
.llseek = default_llseek,
};
void btmrvl_debugfs_init(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_debugfs_data *dbg;
if (!hdev->debugfs)
@@ -388,55 +185,40 @@
dbg->config_dir = debugfs_create_dir("config", hdev->debugfs);
- dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_psmode_fops);
- dbg->pscmd = debugfs_create_file("pscmd", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_pscmd_fops);
- dbg->gpiogap = debugfs_create_file("gpiogap", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_gpiogap_fops);
- dbg->hsmode = debugfs_create_file("hsmode", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_hsmode_fops);
- dbg->hscmd = debugfs_create_file("hscmd", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_hscmd_fops);
- dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir,
- hdev->driver_data, &btmrvl_hscfgcmd_fops);
+ debugfs_create_u8("psmode", 0644, dbg->config_dir,
+ &priv->btmrvl_dev.psmode);
+ debugfs_create_file("pscmd", 0644, dbg->config_dir,
+ priv, &btmrvl_pscmd_fops);
+ debugfs_create_x16("gpiogap", 0644, dbg->config_dir,
+ &priv->btmrvl_dev.gpio_gap);
+ debugfs_create_u8("hsmode", 0644, dbg->config_dir,
+ &priv->btmrvl_dev.hsmode);
+ debugfs_create_file("hscmd", 0644, dbg->config_dir,
+ priv, &btmrvl_hscmd_fops);
+ debugfs_create_file("hscfgcmd", 0644, dbg->config_dir,
+ priv, &btmrvl_hscfgcmd_fops);
dbg->status_dir = debugfs_create_dir("status", hdev->debugfs);
- dbg->curpsmode = debugfs_create_file("curpsmode", 0444,
- dbg->status_dir,
- hdev->driver_data,
- &btmrvl_curpsmode_fops);
- dbg->psstate = debugfs_create_file("psstate", 0444, dbg->status_dir,
- hdev->driver_data, &btmrvl_psstate_fops);
- dbg->hsstate = debugfs_create_file("hsstate", 0444, dbg->status_dir,
- hdev->driver_data, &btmrvl_hsstate_fops);
- dbg->txdnldready = debugfs_create_file("txdnldready", 0444,
- dbg->status_dir,
- hdev->driver_data,
- &btmrvl_txdnldready_fops);
+ debugfs_create_u8("curpsmode", 0444, dbg->status_dir,
+ &priv->adapter->psmode);
+ debugfs_create_u8("psstate", 0444, dbg->status_dir,
+ &priv->adapter->ps_state);
+ debugfs_create_u8("hsstate", 0444, dbg->status_dir,
+ &priv->adapter->hs_state);
+ debugfs_create_u8("txdnldready", 0444, dbg->status_dir,
+ &priv->btmrvl_dev.tx_dnld_rdy);
}
void btmrvl_debugfs_remove(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_debugfs_data *dbg = priv->debugfs_data;
if (!dbg)
return;
- debugfs_remove(dbg->psmode);
- debugfs_remove(dbg->pscmd);
- debugfs_remove(dbg->gpiogap);
- debugfs_remove(dbg->hsmode);
- debugfs_remove(dbg->hscmd);
- debugfs_remove(dbg->hscfgcmd);
- debugfs_remove(dbg->config_dir);
-
- debugfs_remove(dbg->curpsmode);
- debugfs_remove(dbg->psstate);
- debugfs_remove(dbg->hsstate);
- debugfs_remove(dbg->txdnldready);
- debugfs_remove(dbg->status_dir);
+ debugfs_remove_recursive(dbg->config_dir);
+ debugfs_remove_recursive(dbg->status_dir);
kfree(dbg);
}
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 90bda50..27068d1 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -67,6 +67,7 @@
u8 wakeup_tries;
wait_queue_head_t cmd_wait_q;
u8 cmd_complete;
+ bool is_suspended;
};
struct btmrvl_private {
@@ -135,12 +136,14 @@
void btmrvl_interrupt(struct btmrvl_private *priv);
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
+int btmrvl_enable_hs(struct btmrvl_private *priv);
#ifdef CONFIG_DEBUG_FS
void btmrvl_debugfs_init(struct hci_dev *hdev);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 548d1d9..9a9f518 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -18,6 +18,8 @@
* this warranty disclaimer.
**/
+#include <linux/module.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -42,23 +44,35 @@
}
EXPORT_SYMBOL_GPL(btmrvl_interrupt);
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
- struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf;
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
+ struct hci_ev_cmd_complete *ec;
+ u16 opcode, ocf, ogf;
+
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
ocf = hci_opcode_ocf(opcode);
+ ogf = hci_opcode_ogf(opcode);
+
if (ocf == BT_CMD_MODULE_CFG_REQ &&
priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
}
+
+ if (ogf == OGF) {
+ BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
+ ogf, ocf);
+ kfree_skb(skb);
+ return false;
+ }
}
+
+ return true;
}
EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
@@ -198,6 +212,36 @@
}
EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
+int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
+{
+ struct sk_buff *skb;
+ struct btmrvl_cmd *cmd;
+
+ skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("No free skb");
+ return -ENOMEM;
+ }
+
+ cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
+ cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
+ BT_CMD_HOST_SLEEP_CONFIG));
+ cmd->length = 2;
+ cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+ cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+
+ bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+
+ skb->dev = (void *) priv->btmrvl_dev.hcidev;
+ skb_queue_head(&priv->adapter->tx_queue, skb);
+
+ BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
+ cmd->data[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
+
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
struct sk_buff *skb;
@@ -230,7 +274,7 @@
}
EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
-static int btmrvl_enable_hs(struct btmrvl_private *priv)
+int btmrvl_enable_hs(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
@@ -266,35 +310,15 @@
return ret;
}
+EXPORT_SYMBOL_GPL(btmrvl_enable_hs);
int btmrvl_prepare_command(struct btmrvl_private *priv)
{
- struct sk_buff *skb = NULL;
- struct btmrvl_cmd *cmd;
int ret = 0;
if (priv->btmrvl_dev.hscfgcmd) {
priv->btmrvl_dev.hscfgcmd = 0;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG));
- cmd->length = 2;
- cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
- cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
-
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
-
- BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x",
- cmd->data[0], cmd->data[1]);
+ btmrvl_send_hscfg_cmd(priv);
}
if (priv->btmrvl_dev.pscmd) {
@@ -385,10 +409,6 @@
return -ENOIOCTLCMD;
}
-static void btmrvl_destruct(struct hci_dev *hdev)
-{
-}
-
static int btmrvl_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
@@ -396,12 +416,13 @@
BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
- if (!hdev || !hdev->driver_data) {
+ if (!hdev) {
BT_ERR("Frame for unknown HCI device");
return -ENODEV;
}
- priv = (struct btmrvl_private *) hdev->driver_data;
+ priv = hci_get_drvdata(hdev);
+
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
@@ -432,7 +453,7 @@
static int btmrvl_flush(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
skb_queue_purge(&priv->adapter->tx_queue);
@@ -441,7 +462,7 @@
static int btmrvl_close(struct hci_dev *hdev)
{
- struct btmrvl_private *priv = hdev->driver_data;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
@@ -473,12 +494,14 @@
init_waitqueue_entry(&wait, current);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ BT_DBG("main_thread: break from main thread");
+ break;
+ }
if (adapter->wakeup_tries ||
((!adapter->int_count) &&
@@ -494,11 +517,6 @@
BT_DBG("main_thread woke up");
- if (kthread_should_stop()) {
- BT_DBG("main_thread: break from main thread");
- break;
- }
-
spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) {
adapter->int_count = 0;
@@ -546,16 +564,14 @@
}
priv->btmrvl_dev.hcidev = hdev;
- hdev->driver_data = priv;
+ hci_set_drvdata(hdev, priv);
hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
hdev->flush = btmrvl_flush;
hdev->send = btmrvl_send_frame;
- hdev->destruct = btmrvl_destruct;
hdev->ioctl = btmrvl_ioctl;
- hdev->owner = THIS_MODULE;
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 7f521d4..13693b7 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -23,6 +23,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -64,7 +65,7 @@
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -81,9 +82,26 @@
.io_port_2 = 0x7a,
};
-static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = {
- .helper = "sd8688_helper.bin",
- .firmware = "sd8688.bin",
+static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
+ .cfg = 0x00,
+ .host_int_mask = 0x02,
+ .host_intstatus = 0x03,
+ .card_status = 0x50,
+ .sq_read_base_addr_a0 = 0x60,
+ .sq_read_base_addr_a1 = 0x61,
+ .card_revision = 0xbc,
+ .card_fw_status0 = 0xc0,
+ .card_fw_status1 = 0xc1,
+ .card_rx_len = 0xc2,
+ .card_rx_unit = 0xc3,
+ .io_port_0 = 0xd8,
+ .io_port_1 = 0xd9,
+ .io_port_2 = 0xda,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
+ .helper = "mrvl/sd8688_helper.bin",
+ .firmware = "mrvl/sd8688.bin",
.reg = &btmrvl_reg_8688,
.sd_blksz_fw_dl = 64,
};
@@ -91,17 +109,40 @@
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8897_uapsta.bin",
+ .reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
- .driver_data = (unsigned long) &btmrvl_sdio_sd6888 },
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8688 },
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8787 Bluetooth AMP device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
+ /* Marvell SD8897 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
{ } /* Terminating entry */
};
@@ -214,24 +255,24 @@
static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card,
int pollnum)
{
- int ret = -ETIMEDOUT;
u16 firmwarestat;
- unsigned int tries;
+ int tries, ret;
/* Wait for firmware to become ready */
for (tries = 0; tries < pollnum; tries++) {
- if (btmrvl_sdio_read_fw_status(card, &firmwarestat) < 0)
+ sdio_claim_host(card->func);
+ ret = btmrvl_sdio_read_fw_status(card, &firmwarestat);
+ sdio_release_host(card->func);
+ if (ret < 0)
continue;
- if (firmwarestat == FIRMWARE_READY) {
- ret = 0;
- break;
- } else {
- msleep(10);
- }
+ if (firmwarestat == FIRMWARE_READY)
+ return 0;
+
+ msleep(10);
}
- return ret;
+ return -ETIMEDOUT;
}
static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card)
@@ -328,9 +369,7 @@
done:
kfree(tmphlprbuf);
- if (fw_helper)
- release_firmware(fw_helper);
-
+ release_firmware(fw_helper);
return ret;
}
@@ -473,17 +512,14 @@
done:
kfree(tmpfwbuf);
-
- if (fw_firmware)
- release_firmware(fw_firmware);
-
+ release_firmware(fw_firmware);
return ret;
}
static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
{
u16 buf_len = 0;
- int ret, buf_block_len, blksz;
+ int ret, num_blocks, blksz;
struct sk_buff *skb = NULL;
u32 type;
u8 *payload = NULL;
@@ -505,18 +541,17 @@
}
blksz = SDIO_BLOCK_SIZE;
- buf_block_len = (buf_len + blksz - 1) / blksz;
+ num_blocks = DIV_ROUND_UP(buf_len, blksz);
if (buf_len <= SDIO_HEADER_LEN
- || (buf_block_len * blksz) > ALLOC_BUF_SIZE) {
+ || (num_blocks * blksz) > ALLOC_BUF_SIZE) {
BT_ERR("invalid packet length: %d", buf_len);
ret = -EINVAL;
goto exit;
}
/* Allocate buffer */
- skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN,
- GFP_ATOMIC);
+ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
goto exit;
@@ -532,7 +567,7 @@
payload = skb->data;
ret = sdio_readsb(card->func, payload, card->ioport,
- buf_block_len * blksz);
+ num_blocks * blksz);
if (ret < 0) {
BT_ERR("readsb failed: %d", ret);
ret = -EIO;
@@ -544,7 +579,16 @@
*/
buf_len = payload[0];
- buf_len |= (u16) payload[1] << 8;
+ buf_len |= payload[1] << 8;
+ buf_len |= payload[2] << 16;
+
+ if (buf_len > blksz * num_blocks) {
+ BT_ERR("Skip incorrect packet: hdrlen %d buffer %d",
+ buf_len, blksz * num_blocks);
+ ret = -EIO;
+ goto exit;
+ }
+
type = payload[3];
switch (type) {
@@ -556,10 +600,13 @@
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
- if (type == HCI_EVENT_PKT)
- btmrvl_check_evtpkt(priv, skb);
+ if (type == HCI_EVENT_PKT) {
+ if (btmrvl_check_evtpkt(priv, skb))
+ hci_recv_frame(skb);
+ } else {
+ hci_recv_frame(skb);
+ }
- hci_recv_frame(skb);
hdev->stat.byte_rx += buf_len;
break;
@@ -577,8 +624,7 @@
default:
BT_ERR("Unknown packet type:%d", type);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload,
- blksz * buf_block_len);
+ BT_ERR("hex: %*ph", blksz * num_blocks, payload);
kfree_skb(skb);
skb = NULL;
@@ -588,8 +634,7 @@
exit:
if (ret) {
hdev->stat.err_rx++;
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
return ret;
@@ -838,8 +883,7 @@
if (ret < 0) {
i++;
BT_ERR("i=%d writesb failed: %d", i, ret);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- payload, nb);
+ BT_ERR("hex: %*ph", nb, payload);
ret = -EIO;
if (i > MAX_WRITE_IOMEM_RETRY)
goto exit;
@@ -857,7 +901,7 @@
static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
{
- int ret = 0;
+ int ret;
u8 fws0;
int pollnum = MAX_POLL_TRIES;
@@ -865,13 +909,14 @@
BT_ERR("card or function is NULL!");
return -EINVAL;
}
- sdio_claim_host(card->func);
if (!btmrvl_sdio_verify_fw_download(card, 1)) {
BT_DBG("Firmware already downloaded!");
- goto done;
+ return 0;
}
+ sdio_claim_host(card->func);
+
/* Check if other function driver is downloading the firmware */
fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret);
if (ret) {
@@ -901,15 +946,21 @@
}
}
+ sdio_release_host(card->func);
+
+ /*
+ * winner or not, with this test the FW synchronizes when the
+ * module can continue its initialization
+ */
if (btmrvl_sdio_verify_fw_download(card, pollnum)) {
BT_ERR("FW failed to be active in time!");
- ret = -ETIMEDOUT;
- goto done;
+ return -ETIMEDOUT;
}
+ return 0;
+
done:
sdio_release_host(card->func);
-
return ret;
}
@@ -944,11 +995,9 @@
BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
id->vendor, id->device, id->class, func->num);
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card) {
- ret = -ENOMEM;
- goto done;
- }
+ card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
card->func = func;
@@ -962,8 +1011,7 @@
if (btmrvl_sdio_register_dev(card) < 0) {
BT_ERR("Failed to register BT device!");
- ret = -ENODEV;
- goto free_card;
+ return -ENODEV;
}
/* Disable the interrupts on the card */
@@ -975,8 +1023,6 @@
goto unreg_dev;
}
- msleep(100);
-
btmrvl_sdio_enable_host_int(card);
priv = btmrvl_add_card(card);
@@ -1002,15 +1048,15 @@
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
+ priv->btmrvl_dev.gpio_gap = 0xffff;
+ btmrvl_send_hscfg_cmd(priv);
+
return 0;
disable_host_int:
btmrvl_sdio_disable_host_int(card);
unreg_dev:
btmrvl_sdio_unregister_dev(card);
-free_card:
- kfree(card);
-done:
return ret;
}
@@ -1032,16 +1078,115 @@
BT_DBG("unregester dev");
btmrvl_sdio_unregister_dev(card);
btmrvl_remove_card(card->priv);
- kfree(card);
}
}
}
+static int btmrvl_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmrvl_sdio_card *card;
+ struct btmrvl_private *priv;
+ mmc_pm_flag_t pm_flags;
+ struct hci_dev *hcidev;
+
+ if (func) {
+ pm_flags = sdio_get_host_pm_caps(func);
+ BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func),
+ pm_flags);
+ if (!(pm_flags & MMC_PM_KEEP_POWER)) {
+ BT_ERR("%s: cannot remain alive while suspended",
+ sdio_func_id(func));
+ return -ENOSYS;
+ }
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("card or priv structure is not valid");
+ return 0;
+ }
+ } else {
+ BT_ERR("sdio_func is not specified");
+ return 0;
+ }
+
+ priv = card->priv;
+
+ if (priv->adapter->hs_state != HS_ACTIVATED) {
+ if (btmrvl_enable_hs(priv)) {
+ BT_ERR("HS not actived, suspend failed!");
+ return -EBUSY;
+ }
+ }
+ hcidev = priv->btmrvl_dev.hcidev;
+ BT_DBG("%s: SDIO suspend", hcidev->name);
+ hci_suspend_dev(hcidev);
+ skb_queue_purge(&priv->adapter->tx_queue);
+
+ priv->adapter->is_suspended = true;
+
+ /* We will keep the power when hs enabled successfully */
+ if (priv->adapter->hs_state == HS_ACTIVATED) {
+ BT_DBG("suspend with MMC_PM_KEEP_POWER");
+ return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ } else {
+ BT_DBG("suspend without MMC_PM_KEEP_POWER");
+ return 0;
+ }
+}
+
+static int btmrvl_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmrvl_sdio_card *card;
+ struct btmrvl_private *priv;
+ mmc_pm_flag_t pm_flags;
+ struct hci_dev *hcidev;
+
+ if (func) {
+ pm_flags = sdio_get_host_pm_caps(func);
+ BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func),
+ pm_flags);
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("card or priv structure is not valid");
+ return 0;
+ }
+ } else {
+ BT_ERR("sdio_func is not specified");
+ return 0;
+ }
+ priv = card->priv;
+
+ if (!priv->adapter->is_suspended) {
+ BT_DBG("device already resumed");
+ return 0;
+ }
+
+ priv->adapter->is_suspended = false;
+ hcidev = priv->btmrvl_dev.hcidev;
+ BT_DBG("%s: SDIO resume", hcidev->name);
+ hci_resume_dev(hcidev);
+ priv->hw_wakeup_firmware(priv);
+ priv->adapter->hs_state = HS_DEACTIVATED;
+ BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name);
+
+ return 0;
+}
+
+static const struct dev_pm_ops btmrvl_sdio_pm_ops = {
+ .suspend = btmrvl_sdio_suspend,
+ .resume = btmrvl_sdio_resume,
+};
+
static struct sdio_driver bt_mrvl_sdio = {
.name = "btmrvl_sdio",
.id_table = btmrvl_sdio_ids,
.probe = btmrvl_sdio_probe,
.remove = btmrvl_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = &btmrvl_sdio_pm_ops,
+ }
};
static int __init btmrvl_sdio_init_module(void)
@@ -1072,6 +1217,8 @@
MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("sd8688_helper.bin");
-MODULE_FIRMWARE("sd8688.bin");
+MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
+MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 792e32d..4a99097 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -189,7 +189,7 @@
static int btsdio_open(struct hci_dev *hdev)
{
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -225,7 +225,7 @@
static int btsdio_close(struct hci_dev *hdev)
{
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -246,7 +246,7 @@
static int btsdio_flush(struct hci_dev *hdev)
{
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -258,7 +258,7 @@
static int btsdio_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btsdio_data *data = hdev->driver_data;
+ struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -289,15 +289,6 @@
return 0;
}
-static void btsdio_destruct(struct hci_dev *hdev)
-{
- struct btsdio_data *data = hdev->driver_data;
-
- BT_DBG("%s", hdev->name);
-
- kfree(data);
-}
-
static int btsdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -313,7 +304,7 @@
tuple = tuple->next;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -324,13 +315,11 @@
skb_queue_head_init(&data->txq);
hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
+ if (!hdev)
return -ENOMEM;
- }
hdev->bus = HCI_SDIO;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
if (id->class == SDIO_CLASS_BT_AMP)
hdev->dev_type = HCI_AMP;
@@ -345,14 +334,10 @@
hdev->close = btsdio_close;
hdev->flush = btsdio_flush;
hdev->send = btsdio_send_frame;
- hdev->destruct = btsdio_destruct;
-
- hdev->owner = THIS_MODULE;
err = hci_register_dev(hdev);
if (err < 0) {
hci_free_dev(hdev);
- kfree(data);
return err;
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index f8a0708..beb262f 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -38,7 +38,6 @@
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <pcmcia/cistpl.h>
@@ -141,9 +140,9 @@
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -397,7 +396,7 @@
static int btuart_hci_flush(struct hci_dev *hdev)
{
- btuart_info_t *info = (btuart_info_t *)(hdev->driver_data);
+ btuart_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -435,7 +434,7 @@
return -ENODEV;
}
- info = (btuart_info_t *)(hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -447,7 +446,7 @@
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
- };
+ }
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -459,11 +458,6 @@
}
-static void btuart_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -498,18 +492,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = btuart_hci_open;
hdev->close = btuart_hci_close;
hdev->flush = btuart_hci_flush;
hdev->send = btuart_hci_send_frame;
- hdev->destruct = btuart_hci_destruct;
hdev->ioctl = btuart_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
@@ -565,9 +556,7 @@
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
@@ -578,7 +567,7 @@
btuart_info_t *info;
/* Create new info device */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -594,17 +583,14 @@
static void btuart_detach(struct pcmcia_device *link)
{
- btuart_info_t *info = link->priv;
-
btuart_release(link);
- kfree(info);
}
static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
- if (try == 0)
+ if (!try)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
@@ -689,7 +675,7 @@
pcmcia_disable_device(link);
}
-static struct pcmcia_device_id btuart_ids[] = {
+static const struct pcmcia_device_id btuart_ids[] = {
/* don't use this driver. Use serial_cs + hci_uart instead */
PCMCIA_DEVICE_NULL
};
@@ -702,17 +688,4 @@
.remove = btuart_detach,
.id_table = btuart_ids,
};
-
-static int __init init_btuart_cs(void)
-{
- return pcmcia_register_driver(&btuart_driver);
-}
-
-
-static void __exit exit_btuart_cs(void)
-{
- pcmcia_unregister_driver(&btuart_driver);
-}
-
-module_init(init_btuart_cs);
-module_exit(exit_btuart_cs);
+module_pcmcia_driver(btuart_driver);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f64f7f3..58491f1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,29 +21,22 @@
*
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-
#include <linux/usb.h>
+#include <linux/firmware.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#define VERSION "0.6"
-static int ignore_dga;
-static int ignore_csr;
-static int ignore_sniffer;
-static int disable_scofix;
-static int force_scofix;
+static bool ignore_dga;
+static bool ignore_csr;
+static bool ignore_sniffer;
+static bool disable_scofix;
+static bool force_scofix;
-static int reset = 1;
+static bool reset = 1;
static struct usb_driver btusb_driver;
@@ -55,6 +48,7 @@
#define BTUSB_BROKEN_ISOC 0x20
#define BTUSB_WRONG_SCO_MTU 0x40
#define BTUSB_ATH3012 0x80
+#define BTUSB_INTEL 0x100
static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -67,7 +61,7 @@
{ USB_DEVICE(0x0e8d, 0x763f) },
/* Broadcom SoftSailing reporting vendor specific */
- { USB_DEVICE(0x05ac, 0x21e1) },
+ { USB_DEVICE(0x0a5c, 0x21e1) },
/* Apple MacBookPro 7,1 */
{ USB_DEVICE(0x05ac, 0x8213) },
@@ -224,6 +218,9 @@
/* Frontline ComProbe Bluetooth Sniffer */
{ USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER },
+ /* Intel Bluetooth device */
+ { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+
{ } /* Terminating entry */
};
@@ -286,7 +283,7 @@
static void btusb_intr_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -304,9 +301,6 @@
BT_ERR("%s corrupted event packet", hdev->name);
hdev->stat.err_rx++;
}
- } else if (urb->status == -ENOENT) {
- /* Avoid suspend failed when usb_kill_urb */
- return;
}
if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
@@ -317,7 +311,9 @@
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- if (err != -EPERM)
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
@@ -326,7 +322,7 @@
static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -361,7 +357,8 @@
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -374,7 +371,7 @@
static void btusb_bulk_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -392,9 +389,6 @@
BT_ERR("%s corrupted ACL packet", hdev->name);
hdev->stat.err_rx++;
}
- } else if (urb->status == -ENOENT) {
- /* Avoid suspend failed when usb_kill_urb */
- return;
}
if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
@@ -405,7 +399,9 @@
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- if (err != -EPERM)
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
@@ -414,7 +410,7 @@
static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -447,7 +443,8 @@
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -460,7 +457,7 @@
static void btusb_isoc_complete(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int i, err;
BT_DBG("%s urb %p status %d count %d", hdev->name,
@@ -486,9 +483,6 @@
hdev->stat.err_rx++;
}
}
- } else if (urb->status == -ENOENT) {
- /* Avoid suspend failed when usb_kill_urb */
- return;
}
if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
@@ -498,14 +492,16 @@
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- if (err != -EPERM)
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
}
-static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
{
int i, offset = 0;
@@ -528,7 +524,7 @@
static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
@@ -554,15 +550,10 @@
pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
- urb->dev = data->udev;
- urb->pipe = pipe;
- urb->context = hdev;
- urb->complete = btusb_isoc_complete;
- urb->interval = data->isoc_rx_ep->bInterval;
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
+ hdev, data->isoc_rx_ep->bInterval);
urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
- urb->transfer_buffer = buf;
- urb->transfer_buffer_length = size;
__fill_isoc_descriptor(urb, size,
le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
@@ -571,7 +562,8 @@
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -585,7 +577,7 @@
{
struct sk_buff *skb = urb->context;
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s urb %p status %d count %d", hdev->name,
urb, urb->status, urb->actual_length);
@@ -632,7 +624,7 @@
static int btusb_open(struct hci_dev *hdev)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -682,7 +674,7 @@
static int btusb_close(struct hci_dev *hdev)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
int err;
BT_DBG("%s", hdev->name);
@@ -712,7 +704,7 @@
static int btusb_flush(struct hci_dev *hdev)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
@@ -724,7 +716,7 @@
static int btusb_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
unsigned int pipe;
@@ -762,8 +754,7 @@
break;
case HCI_ACLDATA_PKT:
- if (!data->bulk_tx_ep || (hdev->conn_hash.acl_num < 1 &&
- hdev->conn_hash.le_num < 1))
+ if (!data->bulk_tx_ep)
return -ENODEV;
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -819,31 +810,23 @@
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p submission failed", hdev->name, urb);
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
} else {
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
-static void btusb_destruct(struct hci_dev *hdev)
-{
- struct btusb_data *data = hdev->driver_data;
-
- BT_DBG("%s", hdev->name);
-
- kfree(data);
-}
-
static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s evt %d", hdev->name, evt);
@@ -853,9 +836,9 @@
}
}
-static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
{
- struct btusb_data *data = hdev->driver_data;
+ struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_interface *intf = data->isoc;
struct usb_endpoint_descriptor *ep_desc;
int i, err;
@@ -900,6 +883,7 @@
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
+ int new_alts;
int err;
if (hdev->conn_hash.sco_num > 0) {
@@ -913,11 +897,19 @@
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
- if (data->isoc_altsetting != 2) {
+
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
+ new_alts = alts[hdev->conn_hash.sco_num - 1];
+ } else {
+ new_alts = hdev->conn_hash.sco_num;
+ }
+
+ if (data->isoc_altsetting != new_alts) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
- if (__set_isoc_interface(hdev, 2) < 0)
+ if (__set_isoc_interface(hdev, new_alts) < 0)
return;
}
@@ -949,6 +941,391 @@
usb_autopm_put_interface(data->intf);
}
+static int btusb_setup_bcm92035(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 val = 0x00;
+
+ BT_DBG("%s", hdev->name);
+
+ skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ BT_ERR("BCM92035 command failed (%ld)", -PTR_ERR(skb));
+ else
+ kfree_skb(skb);
+
+ return 0;
+}
+
+struct intel_version {
+ u8 status;
+ u8 hw_platform;
+ u8 hw_variant;
+ u8 hw_revision;
+ u8 fw_variant;
+ u8 fw_revision;
+ u8 fw_build_num;
+ u8 fw_build_ww;
+ u8 fw_build_yy;
+ u8 fw_patch_num;
+} __packed;
+
+static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
+ struct intel_version *ver)
+{
+ const struct firmware *fw;
+ char fwname[64];
+ int ret;
+
+ snprintf(fwname, sizeof(fwname),
+ "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.bseq",
+ ver->hw_platform, ver->hw_variant, ver->hw_revision,
+ ver->fw_variant, ver->fw_revision, ver->fw_build_num,
+ ver->fw_build_ww, ver->fw_build_yy);
+
+ ret = request_firmware(&fw, fwname, &hdev->dev);
+ if (ret < 0) {
+ if (ret == -EINVAL) {
+ BT_ERR("%s Intel firmware file request failed (%d)",
+ hdev->name, ret);
+ return NULL;
+ }
+
+ BT_ERR("%s failed to open Intel firmware file: %s(%d)",
+ hdev->name, fwname, ret);
+
+ /* If the correct firmware patch file is not found, use the
+ * default firmware patch file instead
+ */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq",
+ ver->hw_platform, ver->hw_variant);
+ if (request_firmware(&fw, fwname, &hdev->dev) < 0) {
+ BT_ERR("%s failed to open default Intel fw file: %s",
+ hdev->name, fwname);
+ return NULL;
+ }
+ }
+
+ BT_INFO("%s: Intel Bluetooth firmware file: %s", hdev->name, fwname);
+
+ return fw;
+}
+
+static int btusb_setup_intel_patching(struct hci_dev *hdev,
+ const struct firmware *fw,
+ const u8 **fw_ptr, int *disable_patch)
+{
+ struct sk_buff *skb;
+ struct hci_command_hdr *cmd;
+ const u8 *cmd_param;
+ struct hci_event_hdr *evt = NULL;
+ const u8 *evt_param = NULL;
+ int remain = fw->size - (*fw_ptr - fw->data);
+
+ /* The first byte indicates the types of the patch command or event.
+ * 0x01 means HCI command and 0x02 is HCI event. If the first bytes
+ * in the current firmware buffer doesn't start with 0x01 or
+ * the size of remain buffer is smaller than HCI command header,
+ * the firmware file is corrupted and it should stop the patching
+ * process.
+ */
+ if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) {
+ BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name);
+ return -EINVAL;
+ }
+ (*fw_ptr)++;
+ remain--;
+
+ cmd = (struct hci_command_hdr *)(*fw_ptr);
+ *fw_ptr += sizeof(*cmd);
+ remain -= sizeof(*cmd);
+
+ /* Ensure that the remain firmware data is long enough than the length
+ * of command parameter. If not, the firmware file is corrupted.
+ */
+ if (remain < cmd->plen) {
+ BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name);
+ return -EFAULT;
+ }
+
+ /* If there is a command that loads a patch in the firmware
+ * file, then enable the patch upon success, otherwise just
+ * disable the manufacturer mode, for example patch activation
+ * is not required when the default firmware patch file is used
+ * because there are no patch data to load.
+ */
+ if (*disable_patch && le16_to_cpu(cmd->opcode) == 0xfc8e)
+ *disable_patch = 0;
+
+ cmd_param = *fw_ptr;
+ *fw_ptr += cmd->plen;
+ remain -= cmd->plen;
+
+ /* This reads the expected events when the above command is sent to the
+ * device. Some vendor commands expects more than one events, for
+ * example command status event followed by vendor specific event.
+ * For this case, it only keeps the last expected event. so the command
+ * can be sent with __hci_cmd_sync_ev() which returns the sk_buff of
+ * last expected event.
+ */
+ while (remain > HCI_EVENT_HDR_SIZE && *fw_ptr[0] == 0x02) {
+ (*fw_ptr)++;
+ remain--;
+
+ evt = (struct hci_event_hdr *)(*fw_ptr);
+ *fw_ptr += sizeof(*evt);
+ remain -= sizeof(*evt);
+
+ if (remain < evt->plen) {
+ BT_ERR("%s Intel fw corrupted: invalid evt len",
+ hdev->name);
+ return -EFAULT;
+ }
+
+ evt_param = *fw_ptr;
+ *fw_ptr += evt->plen;
+ remain -= evt->plen;
+ }
+
+ /* Every HCI commands in the firmware file has its correspond event.
+ * If event is not found or remain is smaller than zero, the firmware
+ * file is corrupted.
+ */
+ if (!evt || !evt_param || remain < 0) {
+ BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name);
+ return -EFAULT;
+ }
+
+ skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
+ cmd_param, evt->evt, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
+ hdev->name, cmd->opcode, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ /* It ensures that the returned event matches the event data read from
+ * the firmware file. At fist, it checks the length and then
+ * the contents of the event.
+ */
+ if (skb->len != evt->plen) {
+ BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name,
+ le16_to_cpu(cmd->opcode));
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ if (memcmp(skb->data, evt_param, evt->plen)) {
+ BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)",
+ hdev->name, le16_to_cpu(cmd->opcode));
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int btusb_setup_intel(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ int disable_patch;
+ struct intel_version *ver;
+
+ const u8 mfg_enable[] = { 0x01, 0x00 };
+ const u8 mfg_disable[] = { 0x00, 0x00 };
+ const u8 mfg_reset_deactivate[] = { 0x00, 0x01 };
+ const u8 mfg_reset_activate[] = { 0x00, 0x02 };
+
+ BT_DBG("%s", hdev->name);
+
+ /* The controller has a bug with the first HCI command sent to it
+ * returning number of completed commands as zero. This would stall the
+ * command processing in the Bluetooth core.
+ *
+ * As a workaround, send HCI Reset command first which will reset the
+ * number of completed commands and allow normal command processing
+ * from now on.
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s sending initial HCI reset command failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ /* Read Intel specific controller version first to allow selection of
+ * which firmware file to load.
+ *
+ * The returned information are hardware variant and revision plus
+ * firmware variant, revision and build number.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s reading Intel fw version command failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s Intel version event length mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ ver = (struct intel_version *)skb->data;
+ if (ver->status) {
+ BT_ERR("%s Intel fw version event failed (%02x)", hdev->name,
+ ver->status);
+ kfree_skb(skb);
+ return -bt_to_errno(ver->status);
+ }
+
+ BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
+ hdev->name, ver->hw_platform, ver->hw_variant,
+ ver->hw_revision, ver->fw_variant, ver->fw_revision,
+ ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy,
+ ver->fw_patch_num);
+
+ /* fw_patch_num indicates the version of patch the device currently
+ * have. If there is no patch data in the device, it is always 0x00.
+ * So, if it is other than 0x00, no need to patch the deivce again.
+ */
+ if (ver->fw_patch_num) {
+ BT_INFO("%s: Intel device is already patched. patch num: %02x",
+ hdev->name, ver->fw_patch_num);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ /* Opens the firmware patch file based on the firmware version read
+ * from the controller. If it fails to open the matching firmware
+ * patch file, it tries to open the default firmware patch file.
+ * If no patch file is found, allow the device to operate without
+ * a patch.
+ */
+ fw = btusb_setup_intel_get_fw(hdev, ver);
+ if (!fw) {
+ kfree_skb(skb);
+ return 0;
+ }
+ fw_ptr = fw->data;
+
+ /* This Intel specific command enables the manufacturer mode of the
+ * controller.
+ *
+ * Only while this mode is enabled, the driver can download the
+ * firmware patch data and configuration parameters.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc11, 2, mfg_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ release_firmware(fw);
+ return PTR_ERR(skb);
+ }
+
+ if (skb->data[0]) {
+ u8 evt_status = skb->data[0];
+ BT_ERR("%s enable Intel manufacturer mode event failed (%02x)",
+ hdev->name, evt_status);
+ kfree_skb(skb);
+ release_firmware(fw);
+ return -bt_to_errno(evt_status);
+ }
+ kfree_skb(skb);
+
+ disable_patch = 1;
+
+ /* The firmware data file consists of list of Intel specific HCI
+ * commands and its expected events. The first byte indicates the
+ * type of the message, either HCI command or HCI event.
+ *
+ * It reads the command and its expected event from the firmware file,
+ * and send to the controller. Once __hci_cmd_sync_ev() returns,
+ * the returned event is compared with the event read from the firmware
+ * file and it will continue until all the messages are downloaded to
+ * the controller.
+ *
+ * Once the firmware patching is completed successfully,
+ * the manufacturer mode is disabled with reset and activating the
+ * downloaded patch.
+ *
+ * If the firmware patching fails, the manufacturer mode is
+ * disabled with reset and deactivating the patch.
+ *
+ * If the default patch file is used, no reset is done when disabling
+ * the manufacturer.
+ */
+ while (fw->size > fw_ptr - fw->data) {
+ int ret;
+
+ ret = btusb_setup_intel_patching(hdev, fw, &fw_ptr,
+ &disable_patch);
+ if (ret < 0)
+ goto exit_mfg_deactivate;
+ }
+
+ release_firmware(fw);
+
+ if (disable_patch)
+ goto exit_mfg_disable;
+
+ /* Patching completed successfully and disable the manufacturer mode
+ * with reset and activate the downloaded firmware patches.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_activate),
+ mfg_reset_activate, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
+ hdev->name);
+
+ return 0;
+
+exit_mfg_disable:
+ /* Disable the manufacturer mode without reset */
+ skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_disable), mfg_disable,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
+ return 0;
+
+exit_mfg_deactivate:
+ release_firmware(fw);
+
+ /* Patching failed. Disable the manufacturer mode with reset and
+ * deactivate the downloaded firmware patches.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_deactivate),
+ mfg_reset_deactivate, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
+ hdev->name);
+
+ return 0;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -991,7 +1368,7 @@
return -ENODEV;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1014,10 +1391,8 @@
}
}
- if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
- kfree(data);
+ if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
return -ENODEV;
- }
data->cmdreq_type = USB_TYPE_CLASS;
@@ -1037,32 +1412,33 @@
init_usb_anchor(&data->deferred);
hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
+ if (!hdev)
return -ENOMEM;
- }
hdev->bus = HCI_USB;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
- hdev->open = btusb_open;
- hdev->close = btusb_close;
- hdev->flush = btusb_flush;
- hdev->send = btusb_send_frame;
- hdev->destruct = btusb_destruct;
- hdev->notify = btusb_notify;
+ hdev->open = btusb_open;
+ hdev->close = btusb_close;
+ hdev->flush = btusb_flush;
+ hdev->send = btusb_send_frame;
+ hdev->notify = btusb_notify;
- hdev->owner = THIS_MODULE;
+ if (id->driver_info & BTUSB_BCM92035)
+ hdev->setup = btusb_setup_bcm92035;
+
+ if (id->driver_info & BTUSB_INTEL)
+ hdev->setup = btusb_setup_intel;
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
if (!reset)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
@@ -1074,7 +1450,7 @@
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_CSR) {
@@ -1082,7 +1458,7 @@
/* Old firmware would otherwise execute USB reset */
if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_SNIFFER) {
@@ -1095,23 +1471,11 @@
data->isoc = NULL;
}
- if (id->driver_info & BTUSB_BCM92035) {
- unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
- if (skb) {
- memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
- skb_queue_tail(&hdev->driver_init, skb);
- }
- }
-
if (data->isoc) {
err = usb_driver_claim_interface(&btusb_driver,
data->isoc, data);
if (err < 0) {
hci_free_dev(hdev);
- kfree(data);
return err;
}
}
@@ -1119,7 +1483,6 @@
err = hci_register_dev(hdev);
if (err < 0) {
hci_free_dev(hdev);
- kfree(data);
return err;
}
@@ -1139,9 +1502,6 @@
return;
hdev = data->hdev;
-
- __hci_dev_hold(hdev);
-
usb_set_intfdata(data->intf, NULL);
if (data->isoc)
@@ -1154,8 +1514,6 @@
else if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
- __hci_dev_put(hdev);
-
hci_free_dev(hdev);
}
@@ -1170,7 +1528,7 @@
return 0;
spin_lock_irq(&data->txlock);
- if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
+ if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
set_bit(BTUSB_SUSPENDING, &data->flags);
spin_unlock_irq(&data->txlock);
} else {
@@ -1270,22 +1628,10 @@
#endif
.id_table = btusb_table,
.supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
};
-static int __init btusb_init(void)
-{
- BT_INFO("Generic Bluetooth USB driver ver %s", VERSION);
-
- return usb_register(&btusb_driver);
-}
-
-static void __exit btusb_exit(void)
-{
- usb_deregister(&btusb_driver);
-}
-
-module_init(btusb_init);
-module_exit(btusb_exit);
+module_usb_driver(btusb_driver);
module_param(ignore_dga, bool, 0644);
MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index a3025e2..60abf59 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -22,15 +22,14 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-//#define DEBUG
-#include <linux/module.h>
+#define DEBUG
#include <linux/platform_device.h>
-#include <linux/interrupt.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci.h>
#include <linux/ti_wilink_st.h>
+#include <linux/module.h>
/* Bluetooth Driver Version */
#define VERSION "1.0"
@@ -115,7 +114,7 @@
err = hci_recv_frame(skb);
if (err < 0) {
BT_ERR("Unable to push skb to HCI core(%d)", err);
- return 0;
+ return err;
}
lhst->hdev->stat.byte_rx += skb->len;
@@ -127,6 +126,13 @@
/* protocol structure registered with shared transport */
static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
{
+ .chnl_id = HCI_EVENT_PKT, /* HCI Events */
+ .hdr_len = sizeof(struct hci_event_hdr),
+ .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
+ .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+ .reserve = 8,
+ },
+ {
.chnl_id = HCI_ACLDATA_PKT, /* ACL */
.hdr_len = sizeof(struct hci_acl_hdr),
.offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
@@ -140,13 +146,6 @@
.len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
.reserve = 8,
},
- {
- .chnl_id = HCI_EVENT_PKT, /* HCI Events */
- .hdr_len = sizeof(struct hci_event_hdr),
- .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
- .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
- .reserve = 8,
- },
};
/* Called from HCI core to initialize the device */
@@ -162,7 +161,7 @@
return -EBUSY;
/* provide contexts for callbacks from ST */
- hst = hdev->driver_data;
+ hst = hci_get_drvdata(hdev);
for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
ti_st_proto[i].priv_data = hst;
@@ -237,12 +236,12 @@
static int ti_st_close(struct hci_dev *hdev)
{
int err, i;
- struct ti_st *hst = hdev->driver_data;
+ struct ti_st *hst = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
err = st_unregister(&ti_st_proto[i]);
if (err)
BT_ERR("st_unregister(%d) failed with error %d",
@@ -265,7 +264,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- hst = hdev->driver_data;
+ hst = hci_get_drvdata(hdev);
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -292,47 +291,34 @@
return 0;
}
-static void ti_st_destruct(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
- /* do nothing here, since platform remove
- * would free the hdev->driver_data
- */
-}
-
static int bt_ti_probe(struct platform_device *pdev)
{
static struct ti_st *hst;
struct hci_dev *hdev;
int err;
- hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL);
+ hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
if (!hst)
return -ENOMEM;
/* Expose "hciX" device to user space */
hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(hst);
+ if (!hdev)
return -ENOMEM;
- }
BT_DBG("hdev %p", hdev);
hst->hdev = hdev;
hdev->bus = HCI_UART;
- hdev->driver_data = hst;
+ hci_set_drvdata(hdev, hst);
hdev->open = ti_st_open;
hdev->close = ti_st_close;
hdev->flush = NULL;
hdev->send = ti_st_send_frame;
- hdev->destruct = ti_st_destruct;
- hdev->owner = THIS_MODULE;
err = hci_register_dev(hdev);
if (err < 0) {
BT_ERR("Can't register HCI device error %d", err);
- kfree(hst);
hci_free_dev(hdev);
return err;
}
@@ -358,7 +344,6 @@
hci_unregister_dev(hdev);
hci_free_dev(hdev);
- kfree(hst);
dev_set_drvdata(&pdev->dev, NULL);
return 0;
@@ -373,21 +358,7 @@
},
};
-/* ------- Module Init/Exit interfaces ------ */
-static int __init btwilink_init(void)
-{
- BT_INFO("Bluetooth Driver for TI WiLink - Version %s", VERSION);
-
- return platform_driver_register(&btwilink_driver);
-}
-
-static void __exit btwilink_exit(void)
-{
- platform_driver_unregister(&btwilink_driver);
-}
-
-module_init(btwilink_init);
-module_exit(btwilink_exit);
+module_platform_driver(btwilink_driver);
/* ------ Module Info ------ */
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 26ee0cf..33f3a69 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -38,7 +38,6 @@
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <pcmcia/cistpl.h>
@@ -83,9 +82,6 @@
static int dtl1_config(struct pcmcia_device *link);
-static void dtl1_release(struct pcmcia_device *link);
-
-static void dtl1_detach(struct pcmcia_device *p_dev);
/* Transmit states */
@@ -148,9 +144,9 @@
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -367,7 +363,7 @@
static int dtl1_hci_flush(struct hci_dev *hdev)
{
- dtl1_info_t *info = (dtl1_info_t *)(hdev->driver_data);
+ dtl1_info_t *info = hci_get_drvdata(hdev);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
@@ -399,7 +395,7 @@
return -ENODEV;
}
- info = (dtl1_info_t *)(hdev->driver_data);
+ info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -442,11 +438,6 @@
}
-static void dtl1_hci_destruct(struct hci_dev *hdev)
-{
-}
-
-
static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
@@ -483,18 +474,15 @@
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
- hdev->driver_data = info;
+ hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = dtl1_hci_open;
hdev->close = dtl1_hci_close;
hdev->flush = dtl1_hci_flush;
hdev->send = dtl1_hci_send_frame;
- hdev->destruct = dtl1_hci_destruct;
hdev->ioctl = dtl1_hci_ioctl;
- hdev->owner = THIS_MODULE;
-
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
@@ -551,9 +539,7 @@
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
@@ -564,7 +550,7 @@
dtl1_info_t *info;
/* Create new info device */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -581,9 +567,8 @@
{
dtl1_info_t *info = link->priv;
- dtl1_release(link);
-
- kfree(info);
+ dtl1_close(info);
+ pcmcia_disable_device(link);
}
static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
@@ -600,43 +585,34 @@
static int dtl1_config(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
- int i;
+ int ret;
/* Look for a generic full-sized window */
link->resource[0]->end = 8;
- if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
+ ret = pcmcia_loop_config(link, dtl1_confcheck, NULL);
+ if (ret)
goto failed;
- i = pcmcia_request_irq(link, dtl1_interrupt);
- if (i != 0)
+ ret = pcmcia_request_irq(link, dtl1_interrupt);
+ if (ret)
goto failed;
- i = pcmcia_enable_device(link);
- if (i != 0)
+ ret = pcmcia_enable_device(link);
+ if (ret)
goto failed;
- if (dtl1_open(info) != 0)
+ ret = dtl1_open(info);
+ if (ret)
goto failed;
return 0;
failed:
- dtl1_release(link);
- return -ENODEV;
+ dtl1_detach(link);
+ return ret;
}
-
-static void dtl1_release(struct pcmcia_device *link)
-{
- dtl1_info_t *info = link->priv;
-
- dtl1_close(info);
-
- pcmcia_disable_device(link);
-}
-
-
-static struct pcmcia_device_id dtl1_ids[] = {
+static const struct pcmcia_device_id dtl1_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d),
PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82),
PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863),
@@ -652,17 +628,4 @@
.remove = dtl1_detach,
.id_table = dtl1_ids,
};
-
-static int __init init_dtl1_cs(void)
-{
- return pcmcia_register_driver(&dtl1_driver);
-}
-
-
-static void __exit exit_dtl1_cs(void)
-{
- pcmcia_unregister_driver(&dtl1_driver);
-}
-
-module_init(init_dtl1_cs);
-module_exit(exit_dtl1_cs);
+module_pcmcia_driver(dtl1_driver);
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 63c3dae..5f1a0cf 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -5,7 +5,7 @@
* power management protocol extension to H4 to support AR300x Bluetooth Chip.
*
* Copyright (c) 2009-2010 Atheros Communications Inc.
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_h4.c, which was written
@@ -38,22 +38,39 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/proc_fs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
+#ifdef CONFIG_SERIAL_MSM_HS
+#include <linux/platform_data/msm_serial_hs.h>
+#endif
-unsigned int enableuartsleep = 1;
-module_param(enableuartsleep, uint, 0644);
+static int enableuartsleep = 1;
+module_param(enableuartsleep, int, 0644);
+MODULE_PARM_DESC(enableuartsleep, "Enable Atheros Sleep Protocol");
+
/*
* Global variables
*/
+
+/** Device table */
+static struct of_device_id bluesleep_match_table[] = {
+ { .compatible = "qca,ar3002_bluesleep" },
+ {}
+};
+
/** Global state flags */
static unsigned long flags;
-/** Tasklet to respond to change in hostwake line */
-static struct tasklet_struct hostwake_task;
+/** To Check LPM is enabled */
+static bool is_lpm_enabled;
+
+/** Workqueue to respond to change in hostwake line */
+static void wakeup_host_work(struct work_struct *work);
/** Transmission timer */
static void bluesleep_tx_timer_expire(unsigned long data);
@@ -62,6 +79,8 @@
/** Lock for state transitions */
static spinlock_t rw_lock;
+#define PROC_DIR "bluetooth/sleep"
+
#define POLARITY_LOW 0
#define POLARITY_HIGH 1
@@ -70,8 +89,11 @@
unsigned ext_wake; /* wake up device */
unsigned host_wake_irq;
int irq_polarity;
+ struct uart_port *uport;
};
+struct work_struct ws_sleep;
+
/* 1 second timeout */
#define TX_TIMER_INTERVAL 1
@@ -91,9 +113,22 @@
struct work_struct ctxtsw;
};
-static void hostwake_interrupt(unsigned long data)
+static void hsuart_serial_clock_on(struct uart_port *port)
{
- BT_INFO(" wakeup host\n");
+ BT_DBG("");
+ if (port)
+ msm_hs_request_clock_on(port);
+ else
+ BT_INFO("Uart has not voted for Clock ON");
+}
+
+static void hsuart_serial_clock_off(struct uart_port *port)
+{
+ BT_DBG("");
+ if (port)
+ msm_hs_request_clock_off(port);
+ else
+ BT_INFO("Uart has not voted for Clock OFF");
}
static void modify_timer_task(void)
@@ -105,34 +140,46 @@
}
-static int ath_wakeup_ar3k(struct tty_struct *tty)
+static int ath_wakeup_ar3k(void)
{
int status = 0;
if (test_bit(BT_TXEXPIRED, &flags)) {
- BT_INFO("wakeup device\n");
+ hsuart_serial_clock_on(bsi->uport);
+ BT_DBG("wakeup device\n");
gpio_set_value(bsi->ext_wake, 0);
msleep(20);
gpio_set_value(bsi->ext_wake, 1);
}
- modify_timer_task();
+ if (!is_lpm_enabled)
+ modify_timer_task();
return status;
}
+static void wakeup_host_work(struct work_struct *work)
+{
+
+ BT_DBG("wake up host");
+ if (test_bit(BT_SLEEPENABLE, &flags)) {
+ if (test_bit(BT_TXEXPIRED, &flags))
+ hsuart_serial_clock_on(bsi->uport);
+ }
+ if (!is_lpm_enabled)
+ modify_timer_task();
+}
+
static void ath_hci_uart_work(struct work_struct *work)
{
int status;
struct ath_struct *ath;
struct hci_uart *hu;
- struct tty_struct *tty;
ath = container_of(work, struct ath_struct, ctxtsw);
hu = ath->hu;
- tty = hu->tty;
/* verify and wake up controller */
if (test_bit(BT_SLEEPENABLE, &flags))
- status = ath_wakeup_ar3k(tty);
+ status = ath_wakeup_ar3k();
/* Ready to send Data */
clear_bit(HCI_UART_SENDING, &hu->tx_state);
hci_uart_tx_wakeup(hu);
@@ -140,8 +187,11 @@
static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
{
- /* schedule a tasklet to handle the change in the host wake line */
- tasklet_schedule(&hostwake_task);
+ /* schedule a work to global shared workqueue to handle
+ * the change in the host wake line
+ */
+ schedule_work(&ws_sleep);
+
return IRQ_HANDLED;
}
@@ -195,9 +245,6 @@
tx_timer.function = bluesleep_tx_timer_expire;
tx_timer.data = 0;
- /* initialize host wake tasklet */
- tasklet_init(&hostwake_task, hostwake_interrupt, 0);
-
if (bsi->irq_polarity == POLARITY_LOW) {
ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
@@ -232,29 +279,81 @@
return ret;
}
-/* Initialize protocol */
-static int ath_open(struct hci_uart *hu)
+static int ath_lpm_start(void)
{
- struct ath_struct *ath;
+ BT_DBG("Start LPM mode");
- BT_DBG("hu %p, bsi %p", hu, bsi);
-
- if (!bsi)
+ if (!bsi) {
+ BT_ERR("HCIATH3K bluesleep info does not exist");
return -EIO;
+ }
+
+ bsi->uport = msm_hs_get_uart_port(0);
+ if (!bsi->uport) {
+ BT_ERR("UART Port is not available");
+ return -ENODEV;
+ }
+
+ INIT_WORK(&ws_sleep, wakeup_host_work);
if (ath_bluesleep_gpio_config(1) < 0) {
BT_ERR("HCIATH3K GPIO Config failed");
return -EIO;
}
+ return 0;
+}
+
+static int ath_lpm_stop(void)
+{
+ BT_DBG("Stop LPM mode");
+ cancel_work_sync(&ws_sleep);
+
+ if (bsi) {
+ bsi->uport = NULL;
+ ath_bluesleep_gpio_config(0);
+ }
+
+ return 0;
+}
+
+/* Initialize protocol */
+static int ath_open(struct hci_uart *hu)
+{
+ struct ath_struct *ath;
+ struct uart_state *state;
+
+ BT_DBG("hu %p, bsi %p", hu, bsi);
+
+ if (!bsi) {
+ BT_ERR("HCIATH3K bluesleep info does not exist");
+ return -EIO;
+ }
+
ath = kzalloc(sizeof(*ath), GFP_ATOMIC);
- if (!ath)
+ if (!ath) {
+ BT_ERR("HCIATH3K Memory not enough to init driver");
return -ENOMEM;
+ }
skb_queue_head_init(&ath->txq);
hu->priv = ath;
ath->hu = hu;
+ state = hu->tty->driver_data;
+
+ if (!state) {
+ BT_ERR("HCIATH3K tty driver data does not exist");
+ return -ENXIO;
+ }
+ bsi->uport = state->uart_port;
+
+ if (ath_bluesleep_gpio_config(1) < 0) {
+ BT_ERR("HCIATH3K GPIO Config failed");
+ hu->priv = NULL;
+ kfree(ath);
+ return -EIO;
+ }
ath->cur_sleep = enableuartsleep;
if (ath->cur_sleep == 1) {
@@ -262,7 +361,7 @@
modify_timer_task();
}
INIT_WORK(&ath->ctxtsw, ath_hci_uart_work);
-
+ INIT_WORK(&ws_sleep, wakeup_host_work);
return 0;
}
@@ -289,12 +388,15 @@
cancel_work_sync(&ath->ctxtsw);
- hu->priv = NULL;
- kfree(ath);
+ cancel_work_sync(&ws_sleep);
if (bsi)
ath_bluesleep_gpio_config(0);
+ hu->priv = NULL;
+ bsi->uport = NULL;
+ kfree(ath);
+
return 0;
}
@@ -383,11 +485,13 @@
static void bluesleep_tx_timer_expire(unsigned long data)
{
+
if (!test_bit(BT_SLEEPENABLE, &flags))
return;
BT_INFO("Tx timer expired\n");
set_bit(BT_TXEXPIRED, &flags);
+ hsuart_serial_clock_off(bsi->uport);
}
static struct hci_uart_proto athp = {
@@ -400,36 +504,166 @@
.flush = ath_flush,
};
-static int __init bluesleep_probe(struct platform_device *pdev)
+static int lpm_enabled;
+
+static int bluesleep_lpm_set(const char *val, const struct kernel_param *kp)
{
int ret;
+
+ ret = param_set_int(val, kp);
+
+ if (ret) {
+ BT_ERR("HCIATH3K: lpm enable parameter set failed");
+ return ret;
+ }
+
+ BT_DBG("lpm : %d", lpm_enabled);
+
+ if ((lpm_enabled == 0) && is_lpm_enabled) {
+ ath_lpm_stop();
+ clear_bit(BT_SLEEPENABLE, &flags);
+ is_lpm_enabled = false;
+ } else if ((lpm_enabled == 1) && !is_lpm_enabled) {
+ if (ath_lpm_start() < 0) {
+ BT_ERR("HCIATH3K LPM mode failed");
+ return -EIO;
+ }
+ set_bit(BT_SLEEPENABLE, &flags);
+ is_lpm_enabled = true;
+ } else {
+ BT_ERR("HCIATH3K invalid lpm value");
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static struct kernel_param_ops bluesleep_lpm_ops = {
+ .set = bluesleep_lpm_set,
+ .get = param_get_int,
+};
+
+module_param_cb(ath_lpm, &bluesleep_lpm_ops,
+ &lpm_enabled, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ath_lpm, "Enable Atheros LPM sleep Protocol");
+
+static int lpm_btwrite;
+
+static int bluesleep_lpm_btwrite(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+
+ if (ret) {
+ BT_ERR("HCIATH3K: lpm btwrite parameter set failed");
+ return ret;
+ }
+
+ BT_DBG("btwrite : %d", lpm_btwrite);
+ if (is_lpm_enabled) {
+ if (lpm_btwrite == 0) {
+ /*Setting TXEXPIRED bit to make it
+ compatible with current solution*/
+ set_bit(BT_TXEXPIRED, &flags);
+ hsuart_serial_clock_off(bsi->uport);
+ } else if (lpm_btwrite == 1) {
+ ath_wakeup_ar3k();
+ clear_bit(BT_TXEXPIRED, &flags);
+ } else {
+ BT_ERR("HCIATH3K invalid btwrite value");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct kernel_param_ops bluesleep_lpm_btwrite_ops = {
+ .set = bluesleep_lpm_btwrite,
+ .get = param_get_int,
+};
+
+module_param_cb(ath_btwrite, &bluesleep_lpm_btwrite_ops,
+ &lpm_btwrite, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ath_lpm, "Assert/Deassert the sleep");
+
+static int bluesleep_populate_dt_pinfo(struct platform_device *pdev)
+{
+ BT_DBG("");
+
+ if (!bsi)
+ return -ENOMEM;
+
+ bsi->host_wake = of_get_named_gpio(pdev->dev.of_node,
+ "host-wake-gpio", 0);
+ if (bsi->host_wake < 0) {
+ BT_ERR("couldn't find host_wake gpio\n");
+ return -ENODEV;
+ }
+
+ bsi->ext_wake = of_get_named_gpio(pdev->dev.of_node,
+ "ext-wake-gpio", 0);
+ if (bsi->ext_wake < 0) {
+ BT_ERR("couldn't find ext_wake gpio\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int bluesleep_populate_pinfo(struct platform_device *pdev)
+{
struct resource *res;
BT_DBG("");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_host_wake");
+ if (!res) {
+ BT_ERR("couldn't find host_wake gpio\n");
+ return -ENODEV;
+ }
+ bsi->host_wake = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_ext_wake");
+ if (!res) {
+ BT_ERR("couldn't find ext_wake gpio\n");
+ return -ENODEV;
+ }
+ bsi->ext_wake = res->start;
+
+ return 0;
+}
+
+static int bluesleep_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ BT_DBG("");
+
bsi = kzalloc(sizeof(struct bluesleep_info), GFP_KERNEL);
if (!bsi) {
ret = -ENOMEM;
goto failed;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IO,
- "gpio_host_wake");
- if (!res) {
- BT_ERR("couldn't find host_wake gpio\n");
- ret = -ENODEV;
- goto free_bsi;
+ if (pdev->dev.of_node) {
+ ret = bluesleep_populate_dt_pinfo(pdev);
+ if (ret < 0) {
+ BT_ERR("Failed to populate device tree info");
+ goto free_bsi;
+ }
+ } else {
+ ret = bluesleep_populate_pinfo(pdev);
+ if (ret < 0) {
+ BT_ERR("Failed to populate device info");
+ goto free_bsi;
+ }
}
- bsi->host_wake = res->start;
- res = platform_get_resource_byname(pdev, IORESOURCE_IO,
- "gpio_ext_wake");
- if (!res) {
- BT_ERR("couldn't find ext_wake gpio\n");
- ret = -ENODEV;
- goto free_bsi;
- }
- bsi->ext_wake = res->start;
+ BT_DBG("host_wake_gpio: %d ext_wake_gpio: %d",
+ bsi->host_wake, bsi->ext_wake);
bsi->host_wake_irq = platform_get_irq_byname(pdev, "host_wake");
if (bsi->host_wake_irq < 0) {
@@ -456,10 +690,12 @@
}
static struct platform_driver bluesleep_driver = {
+ .probe = bluesleep_probe,
.remove = bluesleep_remove,
.driver = {
.name = "bluesleep",
.owner = THIS_MODULE,
+ .of_match_table = bluesleep_match_table,
},
};
@@ -475,14 +711,19 @@
BT_ERR("HCIATH3K protocol registration failed");
return ret;
}
- ret = platform_driver_probe(&bluesleep_driver, bluesleep_probe);
- if (ret)
+
+ ret = platform_driver_register(&bluesleep_driver);
+ if (ret) {
+ BT_ERR("Failed to register bluesleep driver");
return ret;
+ }
+
return 0;
}
int __exit ath_deinit(void)
{
platform_driver_unregister(&bluesleep_driver);
+
return hci_uart_unregister_proto(&athp);
}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 8eb7c03..57e502e 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -49,8 +49,8 @@
#define VERSION "0.3"
-static bool txcrc = true;
-static bool hciextn = true;
+static bool txcrc = 1;
+static bool hciextn = 1;
#define BCSP_TXWINSIZE 4
@@ -552,7 +552,7 @@
static int bcsp_recv(struct hci_uart *hu, void *data, int count)
{
struct bcsp_struct *bcsp = hu->priv;
- register unsigned char *ptr;
+ unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
@@ -692,7 +692,7 @@
BT_DBG("hu %p", hu);
- bcsp = kzalloc(sizeof(*bcsp), GFP_ATOMIC);
+ bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL);
if (!bcsp)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 2fcd8b3..8ae9f1e 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -69,7 +69,7 @@
BT_DBG("hu %p", hu);
- h4 = kzalloc(sizeof(*h4), GFP_ATOMIC);
+ h4 = kzalloc(sizeof(*h4), GFP_KERNEL);
if (!h4)
return -ENOMEM;
@@ -126,7 +126,7 @@
static inline int h4_check_data_len(struct h4_struct *h4, int len)
{
- register int room = skb_tailroom(h4->rx_skb);
+ int room = skb_tailroom(h4->rx_skb);
BT_DBG("len %d room %d", len, room);
@@ -153,6 +153,9 @@
{
int ret;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
ret = hci_recv_stream_fragment(hu->hdev, data, count);
if (ret < 0) {
BT_ERR("Frame Reassembly Failed");
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
new file mode 100644
index 0000000..b6154d5
--- /dev/null
+++ b/drivers/bluetooth/hci_h5.c
@@ -0,0 +1,747 @@
+/*
+ *
+ * Bluetooth HCI Three-wire UART driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define HCI_3WIRE_ACK_PKT 0
+#define HCI_3WIRE_LINK_PKT 15
+
+/* Sliding window size */
+#define H5_TX_WIN_MAX 4
+
+#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
+#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
+
+/*
+ * Maximum Three-wire packet:
+ * 4 byte header + max value for 12-bit length + 2 bytes for CRC
+ */
+#define H5_MAX_LEN (4 + 0xfff + 2)
+
+/* Convenience macros for reading Three-wire header values */
+#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
+#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
+#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
+#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
+#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
+#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
+
+#define SLIP_DELIMITER 0xc0
+#define SLIP_ESC 0xdb
+#define SLIP_ESC_DELIM 0xdc
+#define SLIP_ESC_ESC 0xdd
+
+/* H5 state flags */
+enum {
+ H5_RX_ESC, /* SLIP escape mode */
+ H5_TX_ACK_REQ, /* Pending ack to send */
+};
+
+struct h5 {
+ struct sk_buff_head unack; /* Unack'ed packets queue */
+ struct sk_buff_head rel; /* Reliable packets queue */
+ struct sk_buff_head unrel; /* Unreliable packets queue */
+
+ unsigned long flags;
+
+ struct sk_buff *rx_skb; /* Receive buffer */
+ size_t rx_pending; /* Expecting more bytes */
+ u8 rx_ack; /* Last ack number received */
+
+ int (*rx_func) (struct hci_uart *hu, u8 c);
+
+ struct timer_list timer; /* Retransmission timer */
+
+ u8 tx_seq; /* Next seq number to send */
+ u8 tx_ack; /* Next ack number to send */
+ u8 tx_win; /* Sliding window size */
+
+ enum {
+ H5_UNINITIALIZED,
+ H5_INITIALIZED,
+ H5_ACTIVE,
+ } state;
+
+ enum {
+ H5_AWAKE,
+ H5_SLEEPING,
+ H5_WAKING_UP,
+ } sleep;
+};
+
+static void h5_reset_rx(struct h5 *h5);
+
+static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
+{
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *nskb;
+
+ nskb = alloc_skb(3, GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
+
+ memcpy(skb_put(nskb, len), data, len);
+
+ skb_queue_tail(&h5->unrel, nskb);
+}
+
+static u8 h5_cfg_field(struct h5 *h5)
+{
+ u8 field = 0;
+
+ /* Sliding window size (first 3 bits) */
+ field |= (h5->tx_win & 7);
+
+ return field;
+}
+
+static void h5_timed_event(unsigned long arg)
+{
+ const unsigned char sync_req[] = { 0x01, 0x7e };
+ unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
+ struct hci_uart *hu = (struct hci_uart *) arg;
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("%s", hu->hdev->name);
+
+ if (h5->state == H5_UNINITIALIZED)
+ h5_link_control(hu, sync_req, sizeof(sync_req));
+
+ if (h5->state == H5_INITIALIZED) {
+ conf_req[2] = h5_cfg_field(h5);
+ h5_link_control(hu, conf_req, sizeof(conf_req));
+ }
+
+ if (h5->state != H5_ACTIVE) {
+ mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+ goto wakeup;
+ }
+
+ if (h5->sleep != H5_AWAKE) {
+ h5->sleep = H5_SLEEPING;
+ goto wakeup;
+ }
+
+ BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
+
+ spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
+
+ while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
+ h5->tx_seq = (h5->tx_seq - 1) & 0x07;
+ skb_queue_head(&h5->rel, skb);
+ }
+
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+
+wakeup:
+ hci_uart_tx_wakeup(hu);
+}
+
+static int h5_open(struct hci_uart *hu)
+{
+ struct h5 *h5;
+ const unsigned char sync[] = { 0x01, 0x7e };
+
+ BT_DBG("hu %p", hu);
+
+ h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+
+ hu->priv = h5;
+
+ skb_queue_head_init(&h5->unack);
+ skb_queue_head_init(&h5->rel);
+ skb_queue_head_init(&h5->unrel);
+
+ h5_reset_rx(h5);
+
+ init_timer(&h5->timer);
+ h5->timer.function = h5_timed_event;
+ h5->timer.data = (unsigned long) hu;
+
+ h5->tx_win = H5_TX_WIN_MAX;
+
+ set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
+
+ /* Send initial sync request */
+ h5_link_control(hu, sync, sizeof(sync));
+ mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+
+ return 0;
+}
+
+static int h5_close(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ skb_queue_purge(&h5->unack);
+ skb_queue_purge(&h5->rel);
+ skb_queue_purge(&h5->unrel);
+
+ del_timer(&h5->timer);
+
+ kfree(h5);
+
+ return 0;
+}
+
+static void h5_pkt_cull(struct h5 *h5)
+{
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i, to_remove;
+ u8 seq;
+
+ spin_lock_irqsave(&h5->unack.lock, flags);
+
+ to_remove = skb_queue_len(&h5->unack);
+ if (to_remove == 0)
+ goto unlock;
+
+ seq = h5->tx_seq;
+
+ while (to_remove > 0) {
+ if (h5->rx_ack == seq)
+ break;
+
+ to_remove--;
+ seq = (seq - 1) % 8;
+ }
+
+ if (seq != h5->rx_ack)
+ BT_ERR("Controller acked invalid packet");
+
+ i = 0;
+ skb_queue_walk_safe(&h5->unack, skb, tmp) {
+ if (i++ >= to_remove)
+ break;
+
+ __skb_unlink(skb, &h5->unack);
+ kfree_skb(skb);
+ }
+
+ if (skb_queue_empty(&h5->unack))
+ del_timer(&h5->timer);
+
+unlock:
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+}
+
+static void h5_handle_internal_rx(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char sync_req[] = { 0x01, 0x7e };
+ const unsigned char sync_rsp[] = { 0x02, 0x7d };
+ unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
+ const unsigned char conf_rsp[] = { 0x04, 0x7b };
+ const unsigned char wakeup_req[] = { 0x05, 0xfa };
+ const unsigned char woken_req[] = { 0x06, 0xf9 };
+ const unsigned char sleep_req[] = { 0x07, 0x78 };
+ const unsigned char *hdr = h5->rx_skb->data;
+ const unsigned char *data = &h5->rx_skb->data[4];
+
+ BT_DBG("%s", hu->hdev->name);
+
+ if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
+ return;
+
+ if (H5_HDR_LEN(hdr) < 2)
+ return;
+
+ conf_req[2] = h5_cfg_field(h5);
+
+ if (memcmp(data, sync_req, 2) == 0) {
+ h5_link_control(hu, sync_rsp, 2);
+ } else if (memcmp(data, sync_rsp, 2) == 0) {
+ h5->state = H5_INITIALIZED;
+ h5_link_control(hu, conf_req, 3);
+ } else if (memcmp(data, conf_req, 2) == 0) {
+ h5_link_control(hu, conf_rsp, 2);
+ h5_link_control(hu, conf_req, 3);
+ } else if (memcmp(data, conf_rsp, 2) == 0) {
+ if (H5_HDR_LEN(hdr) > 2)
+ h5->tx_win = (data[2] & 7);
+ BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
+ h5->state = H5_ACTIVE;
+ hci_uart_init_ready(hu);
+ return;
+ } else if (memcmp(data, sleep_req, 2) == 0) {
+ BT_DBG("Peer went to sleep");
+ h5->sleep = H5_SLEEPING;
+ return;
+ } else if (memcmp(data, woken_req, 2) == 0) {
+ BT_DBG("Peer woke up");
+ h5->sleep = H5_AWAKE;
+ } else if (memcmp(data, wakeup_req, 2) == 0) {
+ BT_DBG("Peer requested wakeup");
+ h5_link_control(hu, woken_req, 2);
+ h5->sleep = H5_AWAKE;
+ } else {
+ BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
+ return;
+ }
+
+ hci_uart_tx_wakeup(hu);
+}
+
+static void h5_complete_rx_pkt(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ if (H5_HDR_RELIABLE(hdr)) {
+ h5->tx_ack = (h5->tx_ack + 1) % 8;
+ set_bit(H5_TX_ACK_REQ, &h5->flags);
+ hci_uart_tx_wakeup(hu);
+ }
+
+ h5->rx_ack = H5_HDR_ACK(hdr);
+
+ h5_pkt_cull(h5);
+
+ switch (H5_HDR_PKT_TYPE(hdr)) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
+
+ /* Remove Three-wire header */
+ skb_pull(h5->rx_skb, 4);
+
+ hci_recv_frame(h5->rx_skb);
+ h5->rx_skb = NULL;
+
+ break;
+
+ default:
+ h5_handle_internal_rx(hu);
+ break;
+ }
+
+ h5_reset_rx(h5);
+}
+
+static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ h5_complete_rx_pkt(hu);
+ h5_reset_rx(h5);
+
+ return 0;
+}
+
+static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ if (H5_HDR_CRC(hdr)) {
+ h5->rx_func = h5_rx_crc;
+ h5->rx_pending = 2;
+ } else {
+ h5_complete_rx_pkt(hu);
+ h5_reset_rx(h5);
+ }
+
+ return 0;
+}
+
+static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
+ hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
+ H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
+ H5_HDR_LEN(hdr));
+
+ if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
+ BT_ERR("Invalid header checksum");
+ h5_reset_rx(h5);
+ return 0;
+ }
+
+ if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
+ BT_ERR("Out-of-order packet arrived (%u != %u)",
+ H5_HDR_SEQ(hdr), h5->tx_ack);
+ h5_reset_rx(h5);
+ return 0;
+ }
+
+ if (h5->state != H5_ACTIVE &&
+ H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
+ BT_ERR("Non-link packet received in non-active state");
+ h5_reset_rx(h5);
+ }
+
+ h5->rx_func = h5_rx_payload;
+ h5->rx_pending = H5_HDR_LEN(hdr);
+
+ return 0;
+}
+
+static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (c == SLIP_DELIMITER)
+ return 1;
+
+ h5->rx_func = h5_rx_3wire_hdr;
+ h5->rx_pending = 4;
+
+ h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
+ if (!h5->rx_skb) {
+ BT_ERR("Can't allocate mem for new packet");
+ h5_reset_rx(h5);
+ return -ENOMEM;
+ }
+
+ h5->rx_skb->dev = (void *) hu->hdev;
+
+ return 0;
+}
+
+static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (c == SLIP_DELIMITER)
+ h5->rx_func = h5_rx_pkt_start;
+
+ return 1;
+}
+
+static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
+{
+ const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
+ const u8 *byte = &c;
+
+ if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
+ set_bit(H5_RX_ESC, &h5->flags);
+ return;
+ }
+
+ if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
+ switch (c) {
+ case SLIP_ESC_DELIM:
+ byte = &delim;
+ break;
+ case SLIP_ESC_ESC:
+ byte = &esc;
+ break;
+ default:
+ BT_ERR("Invalid esc byte 0x%02hhx", c);
+ h5_reset_rx(h5);
+ return;
+ }
+ }
+
+ memcpy(skb_put(h5->rx_skb, 1), byte, 1);
+ h5->rx_pending--;
+
+ BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
+}
+
+static void h5_reset_rx(struct h5 *h5)
+{
+ if (h5->rx_skb) {
+ kfree_skb(h5->rx_skb);
+ h5->rx_skb = NULL;
+ }
+
+ h5->rx_func = h5_rx_delimiter;
+ h5->rx_pending = 0;
+ clear_bit(H5_RX_ESC, &h5->flags);
+}
+
+static int h5_recv(struct hci_uart *hu, void *data, int count)
+{
+ struct h5 *h5 = hu->priv;
+ unsigned char *ptr = data;
+
+ BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
+ count);
+
+ while (count > 0) {
+ int processed;
+
+ if (h5->rx_pending > 0) {
+ if (*ptr == SLIP_DELIMITER) {
+ BT_ERR("Too short H5 packet");
+ h5_reset_rx(h5);
+ continue;
+ }
+
+ h5_unslip_one_byte(h5, *ptr);
+
+ ptr++; count--;
+ continue;
+ }
+
+ processed = h5->rx_func(hu, *ptr);
+ if (processed < 0)
+ return processed;
+
+ ptr += processed;
+ count -= processed;
+ }
+
+ return 0;
+}
+
+static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (skb->len > 0xfff) {
+ BT_ERR("Packet too long (%u bytes)", skb->len);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (h5->state != H5_ACTIVE) {
+ BT_ERR("Ignoring HCI data in non-active state");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_COMMAND_PKT:
+ skb_queue_tail(&h5->rel, skb);
+ break;
+
+ case HCI_SCODATA_PKT:
+ skb_queue_tail(&h5->unrel, skb);
+ break;
+
+ default:
+ BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
+ kfree_skb(skb);
+ break;
+ }
+
+ return 0;
+}
+
+static void h5_slip_delim(struct sk_buff *skb)
+{
+ const char delim = SLIP_DELIMITER;
+
+ memcpy(skb_put(skb, 1), &delim, 1);
+}
+
+static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
+{
+ const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
+ const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
+
+ switch (c) {
+ case SLIP_DELIMITER:
+ memcpy(skb_put(skb, 2), &esc_delim, 2);
+ break;
+ case SLIP_ESC:
+ memcpy(skb_put(skb, 2), &esc_esc, 2);
+ break;
+ default:
+ memcpy(skb_put(skb, 1), &c, 1);
+ }
+}
+
+static bool valid_packet_type(u8 type)
+{
+ switch (type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_COMMAND_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_3WIRE_LINK_PKT:
+ case HCI_3WIRE_ACK_PKT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
+ const u8 *data, size_t len)
+{
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *nskb;
+ u8 hdr[4];
+ int i;
+
+ if (!valid_packet_type(pkt_type)) {
+ BT_ERR("Unknown packet type %u", pkt_type);
+ return NULL;
+ }
+
+ /*
+ * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
+ * (because bytes 0xc0 and 0xdb are escaped, worst case is when
+ * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
+ * delimiters at start and end).
+ */
+ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ bt_cb(nskb)->pkt_type = pkt_type;
+
+ h5_slip_delim(nskb);
+
+ hdr[0] = h5->tx_ack << 3;
+ clear_bit(H5_TX_ACK_REQ, &h5->flags);
+
+ /* Reliable packet? */
+ if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
+ hdr[0] |= 1 << 7;
+ hdr[0] |= h5->tx_seq;
+ h5->tx_seq = (h5->tx_seq + 1) % 8;
+ }
+
+ hdr[1] = pkt_type | ((len & 0x0f) << 4);
+ hdr[2] = len >> 4;
+ hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
+
+ BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
+ hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
+ H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
+ H5_HDR_LEN(hdr));
+
+ for (i = 0; i < 4; i++)
+ h5_slip_one_byte(nskb, hdr[i]);
+
+ for (i = 0; i < len; i++)
+ h5_slip_one_byte(nskb, data[i]);
+
+ h5_slip_delim(nskb);
+
+ return nskb;
+}
+
+static struct sk_buff *h5_dequeue(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ unsigned long flags;
+ struct sk_buff *skb, *nskb;
+
+ if (h5->sleep != H5_AWAKE) {
+ const unsigned char wakeup_req[] = { 0x05, 0xfa };
+
+ if (h5->sleep == H5_WAKING_UP)
+ return NULL;
+
+ h5->sleep = H5_WAKING_UP;
+ BT_DBG("Sending wakeup request");
+
+ mod_timer(&h5->timer, jiffies + HZ / 100);
+ return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
+ }
+
+ if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
+ nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
+ skb->data, skb->len);
+ if (nskb) {
+ kfree_skb(skb);
+ return nskb;
+ }
+
+ skb_queue_head(&h5->unrel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
+
+ spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
+
+ if (h5->unack.qlen >= h5->tx_win)
+ goto unlock;
+
+ if ((skb = skb_dequeue(&h5->rel)) != NULL) {
+ nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
+ skb->data, skb->len);
+ if (nskb) {
+ __skb_queue_tail(&h5->unack, skb);
+ mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+ return nskb;
+ }
+
+ skb_queue_head(&h5->rel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
+
+unlock:
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+
+ if (test_bit(H5_TX_ACK_REQ, &h5->flags))
+ return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
+
+ return NULL;
+}
+
+static int h5_flush(struct hci_uart *hu)
+{
+ BT_DBG("hu %p", hu);
+ return 0;
+}
+
+static struct hci_uart_proto h5p = {
+ .id = HCI_UART_3WIRE,
+ .open = h5_open,
+ .close = h5_close,
+ .recv = h5_recv,
+ .enqueue = h5_enqueue,
+ .dequeue = h5_dequeue,
+ .flush = h5_flush,
+};
+
+int __init h5_init(void)
+{
+ int err = hci_uart_register_proto(&h5p);
+
+ if (!err)
+ BT_INFO("HCI Three-wire UART (H5) protocol initialized");
+ else
+ BT_ERR("HCI Three-wire UART (H5) protocol init failed");
+
+ return err;
+}
+
+int __exit h5_deinit(void)
+{
+ return hci_uart_unregister_proto(&h5p);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index f094df3..bc68a44 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -2,9 +2,9 @@
*
* Bluetooth HCI UART driver
*
+ * Copyright (C) 2000-2001 Qualcomm Incorporated
* Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
* Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
- * Copyright (c) 2000-2001, 2010-2012, The Linux Foundation. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
@@ -48,10 +48,7 @@
#define VERSION "2.2"
-static bool reset = 0;
-
static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
-static void hci_uart_tty_wakeup_action(unsigned long data);
int hci_uart_register_proto(struct hci_uart_proto *p)
{
@@ -121,6 +118,10 @@
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
+ struct tty_struct *tty = hu->tty;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
return 0;
@@ -128,22 +129,6 @@
BT_DBG("");
- schedule_work(&hu->write_work);
-
- return 0;
-}
-
-static void hci_uart_write_work(struct work_struct *work)
-{
- struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
- struct tty_struct *tty = hu->tty;
- struct hci_dev *hdev = hu->hdev;
- struct sk_buff *skb;
-
- /* REVISIT: should we cope with bad skbs or ->write() returning
- * and error value ?
- */
-
restart:
clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@@ -168,6 +153,36 @@
goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ return 0;
+}
+
+static void hci_uart_init_work(struct work_struct *work)
+{
+ struct hci_uart *hu = container_of(work, struct hci_uart, init_ready);
+ int err;
+
+ if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return;
+
+ err = hci_register_dev(hu->hdev);
+ if (err < 0) {
+ BT_ERR("Can't register HCI device");
+ hci_free_dev(hu->hdev);
+ hu->hdev = NULL;
+ hu->proto->close(hu);
+ }
+
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+}
+
+int hci_uart_init_ready(struct hci_uart *hu)
+{
+ if (!test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return -EALREADY;
+
+ schedule_work(&hu->init_ready);
+
+ return 0;
}
/* ------- Interface to HCI layer ------ */
@@ -186,7 +201,7 @@
/* Reset device */
static int hci_uart_flush(struct hci_dev *hdev)
{
- struct hci_uart *hu = (struct hci_uart *) hdev->driver_data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
struct tty_struct *tty = hu->tty;
BT_DBG("hdev %p tty %p", hdev, tty);
@@ -232,7 +247,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- hu = (struct hci_uart *) hdev->driver_data;
+ hu = hci_get_drvdata(hdev);
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
@@ -243,36 +258,22 @@
return 0;
}
-static void hci_uart_destruct(struct hci_dev *hdev)
-{
- if (!hdev)
- return;
-
- BT_DBG("%s", hdev->name);
- kfree(hdev->driver_data);
-}
-
/* ------ LDISC part ------ */
/* hci_uart_tty_open
- *
+ *
* Called when line discipline changed to HCI_UART.
*
* Arguments:
* tty pointer to tty info structure
- * Return Value:
+ * Return Value:
* 0 if success, otherwise error code
*/
static int hci_uart_tty_open(struct tty_struct *tty)
{
- struct hci_uart *hu = (void *) tty->disc_data;
+ struct hci_uart *hu;
BT_DBG("tty %p", tty);
- /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
- the pointer */
- if (hu)
- return -EEXIST;
-
/* Error if the tty has no write op instead of leaving an exploitable
hole */
if (tty->ops->write == NULL)
@@ -287,11 +288,9 @@
hu->tty = tty;
tty->receive_room = 65536;
- INIT_WORK(&hu->write_work, hci_uart_write_work);
+ INIT_WORK(&hu->init_ready, hci_uart_init_work);
spin_lock_init(&hu->rx_lock);
- tasklet_init(&hu->tty_wakeup_task, hci_uart_tty_wakeup_action,
- (unsigned long)hu);
/* Flush any pending characters in the driver and line discipline. */
@@ -313,38 +312,36 @@
static void hci_uart_tty_close(struct tty_struct *tty)
{
struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_dev *hdev;
BT_DBG("tty %p", tty);
/* Detach from the tty */
tty->disc_data = NULL;
- if (hu) {
- struct hci_dev *hdev = hu->hdev;
+ if (!hu)
+ return;
- if (hdev)
- hci_uart_close(hdev);
+ hdev = hu->hdev;
+ if (hdev)
+ hci_uart_close(hdev);
- tasklet_kill(&hu->tty_wakeup_task);
-
- cancel_work_sync(&hu->write_work);
-
- if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
- hu->proto->close(hu);
- if (hdev) {
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ if (hdev) {
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- }
+ hci_free_dev(hdev);
}
+ hu->proto->close(hu);
}
+
+ kfree(hu);
}
/* hci_uart_tty_wakeup()
*
* Callback for transmit wakeup. Called when low level
* device driver can accept more send data.
- * This callback gets called from the isr context so
- * schedule the send data operation to tasklet.
*
* Arguments: tty pointer to associated tty instance data
* Return Value: None
@@ -352,26 +349,12 @@
static void hci_uart_tty_wakeup(struct tty_struct *tty)
{
struct hci_uart *hu = (void *)tty->disc_data;
- tasklet_schedule(&hu->tty_wakeup_task);
-}
-
-/* hci_uart_tty_wakeup_action()
- *
- * Scheduled action to transmit data when low level device
- * driver can accept more data.
- */
-static void hci_uart_tty_wakeup_action(unsigned long data)
-{
- struct hci_uart *hu = (struct hci_uart *)data;
- struct tty_struct *tty;
BT_DBG("");
if (!hu)
return;
- tty = hu->tty;
-
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (tty != hu->tty)
@@ -382,20 +365,19 @@
}
/* hci_uart_tty_receive()
- *
+ *
* Called by tty low level driver when receive data is
* available.
- *
+ *
* Arguments: tty pointer to tty isntance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes
- *
+ *
* Return Value: None
*/
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
- int ret;
struct hci_uart *hu = (void *)tty->disc_data;
if (!hu || tty != hu->tty)
@@ -405,9 +387,11 @@
return;
spin_lock(&hu->rx_lock);
- ret = hu->proto->recv(hu, (void *) data, count);
- if (ret > 0)
+ hu->proto->recv(hu, (void *) data, count);
+
+ if (hu->hdev)
hu->hdev->stat.byte_rx += count;
+
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
@@ -429,29 +413,36 @@
hu->hdev = hdev;
hdev->bus = HCI_UART;
- hdev->driver_data = hu;
+ hci_set_drvdata(hdev, hu);
hdev->open = hci_uart_open;
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
- hdev->destruct = hci_uart_destruct;
- hdev->parent = hu->tty->dev;
-
- hdev->owner = THIS_MODULE;
-
- if (!reset)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+ hdev->dev_type = HCI_AMP;
+ else
+ hdev->dev_type = HCI_BREDR;
+
+ if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return 0;
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
return -ENODEV;
}
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+
return 0;
}
@@ -506,18 +497,11 @@
switch (cmd) {
case HCIUARTSETPROTO:
- if (!test_and_set_bit(HCI_UART_PROTO_SET_IN_PROGRESS,
- &hu->flags) && !test_bit(HCI_UART_PROTO_SET,
- &hu->flags)) {
+ if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
err = hci_uart_set_proto(hu, arg);
if (err) {
- clear_bit(HCI_UART_PROTO_SET_IN_PROGRESS,
- &hu->flags);
+ clear_bit(HCI_UART_PROTO_SET, &hu->flags);
return err;
- } else {
- set_bit(HCI_UART_PROTO_SET, &hu->flags);
- clear_bit(HCI_UART_PROTO_SET_IN_PROGRESS,
- &hu->flags);
}
} else
return -EBUSY;
@@ -545,7 +529,7 @@
default:
err = n_tty_ioctl_helper(tty, file, cmd, arg);
break;
- };
+ }
return err;
}
@@ -610,8 +594,8 @@
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_init();
#endif
-#ifdef CONFIG_BT_HCIUART_IBS
- ibs_init();
+#ifdef CONFIG_BT_HCIUART_3WIRE
+ h5_init();
#endif
return 0;
@@ -633,8 +617,8 @@
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_deinit();
#endif
-#ifdef CONFIG_BT_HCIUART_IBS
- ibs_deinit();
+#ifdef CONFIG_BT_HCIUART_3WIRE
+ h5_deinit();
#endif
/* Release tty registration of line discipline */
@@ -645,9 +629,6 @@
module_init(hci_uart_init);
module_exit(hci_uart_exit);
-module_param(reset, bool, 0644);
-MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
-
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 38595e7..cfc7679 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -125,7 +125,7 @@
BT_DBG("hu %p", hu);
- ll = kzalloc(sizeof(*ll), GFP_ATOMIC);
+ ll = kzalloc(sizeof(*ll), GFP_KERNEL);
if (!ll)
return -ENOMEM;
@@ -207,7 +207,7 @@
/*
* This state means that both the host and the BRF chip
* have simultaneously sent a wake-up-indication packet.
- * Traditionaly, in this case, receiving a wake-up-indication
+ * Traditionally, in this case, receiving a wake-up-indication
* was enough and an additional wake-up-ack wasn't needed.
* This has changed with the BRF6350, which does require an
* explicit wake-up-ack. Other BRF versions, which do not
@@ -348,7 +348,7 @@
static inline int ll_check_data_len(struct ll_struct *ll, int len)
{
- register int room = skb_tailroom(ll->rx_skb);
+ int room = skb_tailroom(ll->rx_skb);
BT_DBG("len %d room %d", len, room);
@@ -374,11 +374,11 @@
static int ll_recv(struct hci_uart *hu, void *data, int count)
{
struct ll_struct *ll = hu->priv;
- register char *ptr;
+ char *ptr;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
- register int len, type, dlen;
+ int len, type, dlen;
BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
@@ -481,7 +481,7 @@
hu->hdev->stat.err_rx++;
ptr++; count--;
continue;
- };
+ }
ptr++; count--;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 09f3497..fffa61f 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -2,9 +2,9 @@
*
* Bluetooth HCI UART driver
*
+ * Copyright (C) 2000-2001 Qualcomm Incorporated
* Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
* Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
- * Copyright (c) 2000-2001, 2010, 2012 The Linux Foundation. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
@@ -35,17 +35,19 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 7
+#define HCI_UART_MAX_PROTO 6
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
#define HCI_UART_3WIRE 2
#define HCI_UART_H4DS 3
#define HCI_UART_LL 4
-#define HCI_UART_IBS 5
-#define HCI_UART_ATH3K 6
+#define HCI_UART_ATH3K 5
#define HCI_UART_RAW_DEVICE 0
+#define HCI_UART_RESET_ON_INIT 1
+#define HCI_UART_CREATE_AMP 2
+#define HCI_UART_INIT_PENDING 3
struct hci_uart;
@@ -65,10 +67,9 @@
unsigned long flags;
unsigned long hdev_flags;
- struct work_struct write_work;
+ struct work_struct init_ready;
struct hci_uart_proto *proto;
- struct tasklet_struct tty_wakeup_task;
void *priv;
struct sk_buff *tx_skb;
@@ -77,8 +78,8 @@
};
/* HCI_UART proto flag bits */
-#define HCI_UART_PROTO_SET 0
-#define HCI_UART_PROTO_SET_IN_PROGRESS 1
+#define HCI_UART_PROTO_SET 0
+#define HCI_UART_REGISTERED 1
/* TX states */
#define HCI_UART_SENDING 1
@@ -87,6 +88,7 @@
int hci_uart_register_proto(struct hci_uart_proto *p);
int hci_uart_unregister_proto(struct hci_uart_proto *p);
int hci_uart_tx_wakeup(struct hci_uart *hu);
+int hci_uart_init_ready(struct hci_uart *hu);
#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
@@ -108,7 +110,7 @@
int ath_deinit(void);
#endif
-#ifdef CONFIG_BT_HCIUART_IBS
-int ibs_init(void);
-int ibs_deinit(void);
+#ifdef CONFIG_BT_HCIUART_3WIRE
+int h5_init(void);
+int h5_deinit(void);
#endif
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 67c180c..d8b7aed 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,6 +41,8 @@
#define VERSION "1.3"
+static bool amp;
+
struct vhci_data {
struct hci_dev *hdev;
@@ -59,7 +61,7 @@
static int vhci_close_dev(struct hci_dev *hdev)
{
- struct vhci_data *data = hdev->driver_data;
+ struct vhci_data *data = hci_get_drvdata(hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
@@ -71,7 +73,7 @@
static int vhci_flush(struct hci_dev *hdev)
{
- struct vhci_data *data = hdev->driver_data;
+ struct vhci_data *data = hci_get_drvdata(hdev);
skb_queue_purge(&data->readq);
@@ -91,7 +93,7 @@
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- data = hdev->driver_data;
+ data = hci_get_drvdata(hdev);
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb_queue_tail(&data->readq, skb);
@@ -101,11 +103,6 @@
return 0;
}
-static void vhci_destruct(struct hci_dev *hdev)
-{
- kfree(hdev->driver_data);
-}
-
static inline ssize_t vhci_get_user(struct vhci_data *data,
const char __user *buf, size_t count)
{
@@ -159,7 +156,7 @@
case HCI_SCODATA_PKT:
data->hdev->stat.sco_tx++;
break;
- };
+ }
return total;
}
@@ -237,15 +234,15 @@
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
- hdev->driver_data = data;
+ hci_set_drvdata(hdev, data);
+
+ if (amp)
+ hdev->dev_type = HCI_AMP;
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
- hdev->destruct = vhci_destruct;
-
- hdev->owner = THIS_MODULE;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
@@ -255,8 +252,9 @@
}
file->private_data = data;
+ nonseekable_open(inode, file);
- return nonseekable_open(inode, file);
+ return 0;
}
static int vhci_release(struct inode *inode, struct file *file)
@@ -264,13 +262,11 @@
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
- if (hci_unregister_dev(hdev) < 0) {
- BT_ERR("Can't unregister HCI device %s", hdev->name);
- }
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
file->private_data = NULL;
+ kfree(data);
return 0;
}
@@ -306,6 +302,9 @@
module_init(vhci_init);
module_exit(vhci_exit);
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6a49e5c..7ca2a20 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1181,6 +1181,22 @@
* device.
*/
+/*
+ * Allocator for buffer that is going to be passed to hid_output_report()
+ */
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+{
+ /*
+ * 7 extra bytes are necessary to achieve proper functionality
+ * of implement() working on 8 byte chunks
+ */
+
+ int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+
+ return kmalloc(len, flags);
+}
+EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
+
int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
{
unsigned size;
@@ -2238,7 +2254,7 @@
{ }
};
-static bool hid_ignore(struct hid_device *hdev)
+bool hid_ignore(struct hid_device *hdev)
{
switch (hdev->vendor) {
case USB_VENDOR_ID_CODEMERCS:
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index bf6e238..98781ac 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -33,6 +33,46 @@
}
EXPORT_SYMBOL(tty_port_init);
+/**
+ * tty_port_link_device - link tty and tty_port
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ *
+ * Provide the tty layer wit ha link from a tty (specified by @index) to a
+ * tty_port (@port). Use this only if neither tty_port_register_device nor
+ * tty_port_install is used in the driver. If used, this has to be called before
+ * tty_register_driver.
+ */
+void tty_port_link_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index)
+{
+ if (WARN_ON(index >= driver->num))
+ return;
+ driver->ports[index] = port;
+}
+EXPORT_SYMBOL_GPL(tty_port_link_device);
+
+/**
+ * tty_port_register_device - register tty device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ * @device: parent if exists, otherwise NULL
+ *
+ * It is the same as tty_register_device except the provided @port is linked to
+ * a concrete tty specified by @index. Use this or tty_port_install (or both).
+ * Call tty_port_link_device as a last resort.
+ */
+struct device *tty_port_register_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device)
+{
+ tty_port_link_device(port, driver, index);
+ return tty_register_device(driver, index, device);
+}
+EXPORT_SYMBOL_GPL(tty_port_register_device);
+
int tty_port_alloc_xmit_buf(struct tty_port *port)
{
/* We may sleep in get_zeroed_page() */
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 2edf34f..b6bd196 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -850,3 +850,9 @@
pde_put(de);
}
EXPORT_SYMBOL(remove_proc_entry);
+
+void *PDE_DATA(const struct inode *inode)
+{
+ return __PDE_DATA(inode);
+}
+EXPORT_SYMBOL(PDE_DATA);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5f79bb8..75b9145 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -92,6 +92,11 @@
return PROC_I(inode)->fd;
}
+static inline void *__PDE_DATA(const struct inode *inode)
+{
+ return PDE(inode)->data;
+}
+
struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *ino,
struct dentry *dentry);
int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index fe5136d..326b193 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -171,6 +171,18 @@
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
}
+/**
+ * ether_addr_equal - Compare two Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true if equal
+ */
+static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+ return !compare_ether_addr(addr1, addr2);
+}
+
static inline unsigned long zap_last_2bytes(unsigned long value)
{
#ifdef __BIG_ENDIAN
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 4f01897..0c6e4c9 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -723,6 +723,7 @@
extern int hid_debug;
+extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
@@ -746,6 +747,7 @@
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
void hid_output_report(struct hid_report *report, __u8 *data);
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
int hid_open_report(struct hid_device *device);
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 56d907a..f0af312 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -91,6 +91,8 @@
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
+ * then the frame is Ethernet II. Else it is 802.3 */
/*
* Non DIX types. Won't clash for 1500 types.
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 85c5073..ae513bd 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -174,6 +174,7 @@
struct proc_dir_entry *parent);
extern struct file *proc_ns_fget(int fd);
+extern void *PDE_DATA(const struct inode *);
#else
@@ -229,6 +230,8 @@
return ERR_PTR(-EINVAL);
}
+static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+
#endif /* CONFIG_PROC_FS */
#if !defined(CONFIG_PROC_KCORE)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index e156ce1..1849844 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -142,6 +142,16 @@
int seq_put_decimal_ll(struct seq_file *m, char delimiter,
long long num);
+static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
+{
+#ifdef CONFIG_USER_NS
+ return seq->user_ns;
+#else
+ extern struct user_namespace init_user_ns;
+ return &init_user_ns;
+#endif
+}
+
#define SEQ_START_TOKEN ((void *)1)
/*
* Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6a0259d..62040f4 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -496,8 +496,16 @@
extern void tty_write_unlock(struct tty_struct *tty);
extern int tty_write_lock(struct tty_struct *tty, int ndelay);
#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
-
extern void tty_port_init(struct tty_port *port);
+extern void tty_port_link_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index);
+extern struct device *tty_port_register_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device);
+extern struct device *tty_port_register_device_attr(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_put(struct tty_port *port);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 6e6dbb7..04419c1 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -313,6 +313,7 @@
* Pointer to the tty data structures
*/
struct tty_struct **ttys;
+ struct tty_port **ports;
struct ktermios **termios;
void *driver_state;
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
new file mode 100644
index 0000000..8e522cbc
--- /dev/null
+++ b/include/linux/uidgid.h
@@ -0,0 +1,200 @@
+#ifndef _LINUX_UIDGID_H
+#define _LINUX_UIDGID_H
+
+/*
+ * A set of types for the internal kernel types representing uids and gids.
+ *
+ * The types defined in this header allow distinguishing which uids and gids in
+ * the kernel are values used by userspace and which uid and gid values are
+ * the internal kernel values. With the addition of user namespaces the values
+ * can be different. Using the type system makes it possible for the compiler
+ * to detect when we overlook these differences.
+ *
+ */
+#include <linux/types.h>
+#include <linux/highuid.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+#ifdef CONFIG_UIDGID_STRICT_TYPE_CHECKS
+
+typedef struct {
+ uid_t val;
+} kuid_t;
+
+
+typedef struct {
+ gid_t val;
+} kgid_t;
+
+#define KUIDT_INIT(value) (kuid_t){ value }
+#define KGIDT_INIT(value) (kgid_t){ value }
+
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return uid.val;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return gid.val;
+}
+
+#else
+
+typedef uid_t kuid_t;
+typedef gid_t kgid_t;
+
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return uid;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return gid;
+}
+
+#define KUIDT_INIT(value) ((kuid_t) value )
+#define KGIDT_INIT(value) ((kgid_t) value )
+
+#endif
+
+#define GLOBAL_ROOT_UID KUIDT_INIT(0)
+#define GLOBAL_ROOT_GID KGIDT_INIT(0)
+
+#define INVALID_UID KUIDT_INIT(-1)
+#define INVALID_GID KGIDT_INIT(-1)
+
+static inline bool uid_eq(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) == __kuid_val(right);
+}
+
+static inline bool gid_eq(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) == __kgid_val(right);
+}
+
+static inline bool uid_gt(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) > __kuid_val(right);
+}
+
+static inline bool gid_gt(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) > __kgid_val(right);
+}
+
+static inline bool uid_gte(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) >= __kuid_val(right);
+}
+
+static inline bool gid_gte(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) >= __kgid_val(right);
+}
+
+static inline bool uid_lt(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) < __kuid_val(right);
+}
+
+static inline bool gid_lt(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) < __kgid_val(right);
+}
+
+static inline bool uid_lte(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) <= __kuid_val(right);
+}
+
+static inline bool gid_lte(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) <= __kgid_val(right);
+}
+
+static inline bool uid_valid(kuid_t uid)
+{
+ return !uid_eq(uid, INVALID_UID);
+}
+
+static inline bool gid_valid(kgid_t gid)
+{
+ return !gid_eq(gid, INVALID_GID);
+}
+
+#ifdef CONFIG_USER_NS
+
+extern kuid_t make_kuid(struct user_namespace *from, uid_t uid);
+extern kgid_t make_kgid(struct user_namespace *from, gid_t gid);
+
+extern uid_t from_kuid(struct user_namespace *to, kuid_t uid);
+extern gid_t from_kgid(struct user_namespace *to, kgid_t gid);
+extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid);
+extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid);
+
+static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
+{
+ return from_kuid(ns, uid) != (uid_t) -1;
+}
+
+static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+{
+ return from_kgid(ns, gid) != (gid_t) -1;
+}
+
+#else
+
+static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
+{
+ return KUIDT_INIT(uid);
+}
+
+static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid)
+{
+ return KGIDT_INIT(gid);
+}
+
+static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid)
+{
+ return __kuid_val(kuid);
+}
+
+static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid)
+{
+ return __kgid_val(kgid);
+}
+
+static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid)
+{
+ uid_t uid = from_kuid(to, kuid);
+ if (uid == (uid_t)-1)
+ uid = overflowuid;
+ return uid;
+}
+
+static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid)
+{
+ gid_t gid = from_kgid(to, kgid);
+ if (gid == (gid_t)-1)
+ gid = overflowgid;
+ return gid;
+}
+
+static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
+{
+ return true;
+}
+
+static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+{
+ return true;
+}
+
+#endif /* CONFIG_USER_NS */
+
+#endif /* _LINUX_UIDGID_H */
diff --git a/include/net/bluetooth/a2mp.h b/include/net/bluetooth/a2mp.h
new file mode 100644
index 0000000..487b54c
--- /dev/null
+++ b/include/net/bluetooth/a2mp.h
@@ -0,0 +1,150 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#ifndef __A2MP_H
+#define __A2MP_H
+
+#include <net/bluetooth/l2cap.h>
+
+#define A2MP_FEAT_EXT 0x8000
+
+enum amp_mgr_state {
+ READ_LOC_AMP_INFO,
+ READ_LOC_AMP_ASSOC,
+ READ_LOC_AMP_ASSOC_FINAL,
+ WRITE_REMOTE_AMP_ASSOC,
+};
+
+struct amp_mgr {
+ struct list_head list;
+ struct l2cap_conn *l2cap_conn;
+ struct l2cap_chan *a2mp_chan;
+ struct l2cap_chan *bredr_chan;
+ struct kref kref;
+ __u8 ident;
+ __u8 handle;
+ unsigned long state;
+ unsigned long flags;
+
+ struct list_head amp_ctrls;
+ struct mutex amp_ctrls_lock;
+};
+
+struct a2mp_cmd {
+ __u8 code;
+ __u8 ident;
+ __le16 len;
+ __u8 data[0];
+} __packed;
+
+/* A2MP command codes */
+#define A2MP_COMMAND_REJ 0x01
+struct a2mp_cmd_rej {
+ __le16 reason;
+ __u8 data[0];
+} __packed;
+
+#define A2MP_DISCOVER_REQ 0x02
+struct a2mp_discov_req {
+ __le16 mtu;
+ __le16 ext_feat;
+} __packed;
+
+struct a2mp_cl {
+ __u8 id;
+ __u8 type;
+ __u8 status;
+} __packed;
+
+#define A2MP_DISCOVER_RSP 0x03
+struct a2mp_discov_rsp {
+ __le16 mtu;
+ __le16 ext_feat;
+ struct a2mp_cl cl[0];
+} __packed;
+
+#define A2MP_CHANGE_NOTIFY 0x04
+#define A2MP_CHANGE_RSP 0x05
+
+#define A2MP_GETINFO_REQ 0x06
+struct a2mp_info_req {
+ __u8 id;
+} __packed;
+
+#define A2MP_GETINFO_RSP 0x07
+struct a2mp_info_rsp {
+ __u8 id;
+ __u8 status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le16 pal_cap;
+ __le16 assoc_size;
+} __packed;
+
+#define A2MP_GETAMPASSOC_REQ 0x08
+struct a2mp_amp_assoc_req {
+ __u8 id;
+} __packed;
+
+#define A2MP_GETAMPASSOC_RSP 0x09
+struct a2mp_amp_assoc_rsp {
+ __u8 id;
+ __u8 status;
+ __u8 amp_assoc[0];
+} __packed;
+
+#define A2MP_CREATEPHYSLINK_REQ 0x0A
+#define A2MP_DISCONNPHYSLINK_REQ 0x0C
+struct a2mp_physlink_req {
+ __u8 local_id;
+ __u8 remote_id;
+ __u8 amp_assoc[0];
+} __packed;
+
+#define A2MP_CREATEPHYSLINK_RSP 0x0B
+#define A2MP_DISCONNPHYSLINK_RSP 0x0D
+struct a2mp_physlink_rsp {
+ __u8 local_id;
+ __u8 remote_id;
+ __u8 status;
+} __packed;
+
+/* A2MP response status */
+#define A2MP_STATUS_SUCCESS 0x00
+#define A2MP_STATUS_INVALID_CTRL_ID 0x01
+#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
+#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
+#define A2MP_STATUS_COLLISION_OCCURED 0x03
+#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
+#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
+#define A2MP_STATUS_SECURITY_VIOLATION 0x06
+
+extern struct list_head amp_mgr_list;
+extern struct mutex amp_mgr_list_lock;
+
+struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+int amp_mgr_put(struct amp_mgr *mgr);
+u8 __next_ident(struct amp_mgr *mgr);
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb);
+struct amp_mgr *amp_mgr_lookup_by_state(u8 state);
+void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data);
+void a2mp_discover_amp(struct l2cap_chan *chan);
+void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
+void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
+void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
+void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
+
+#endif /* __A2MP_H */
diff --git a/include/net/bluetooth/amp.h b/include/net/bluetooth/amp.h
index 15d1817..7ea3db7 100644
--- a/include/net/bluetooth/amp.h
+++ b/include/net/bluetooth/amp.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2010-2012 The Linux Foundation. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
@@ -14,280 +14,41 @@
#ifndef __AMP_H
#define __AMP_H
-/* AMP defaults */
-
-#define A2MP_RSP_TIMEOUT (8000) /* 8 seconds */
-
-/* A2MP Protocol */
-
-/* A2MP command codes */
-#define A2MP_COMMAND_REJ 0x01
-#define A2MP_DISCOVER_REQ 0x02
-#define A2MP_DISCOVER_RSP 0x03
-#define A2MP_CHANGE_NOTIFY 0x04
-#define A2MP_CHANGE_RSP 0x05
-#define A2MP_GETINFO_REQ 0x06
-#define A2MP_GETINFO_RSP 0x07
-#define A2MP_GETAMPASSOC_REQ 0x08
-#define A2MP_GETAMPASSOC_RSP 0x09
-#define A2MP_CREATEPHYSLINK_REQ 0x0A
-#define A2MP_CREATEPHYSLINK_RSP 0x0B
-#define A2MP_DISCONNPHYSLINK_REQ 0x0C
-#define A2MP_DISCONNPHYSLINK_RSP 0x0D
-
-struct a2mp_cmd_hdr {
- __u8 code;
- __u8 ident;
- __le16 len;
-} __packed;
-
-struct a2mp_cmd_rej {
- __le16 reason;
-} __packed;
-
-struct a2mp_discover_req {
- __le16 mtu;
- __le16 ext_feat;
-} __packed;
-
-struct a2mp_cl {
- __u8 id;
- __u8 type;
- __u8 status;
-} __packed;
-
-struct a2mp_discover_rsp {
- __le16 mtu;
- __le16 ext_feat;
- struct a2mp_cl cl[0];
-} __packed;
-
-struct a2mp_getinfo_req {
- __u8 id;
-} __packed;
-
-struct a2mp_getinfo_rsp {
- __u8 id;
- __u8 status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le16 pal_cap;
- __le16 assoc_size;
-} __packed;
-
-struct a2mp_getampassoc_req {
- __u8 id;
-} __packed;
-
-struct a2mp_getampassoc_rsp {
- __u8 id;
- __u8 status;
- __u8 amp_assoc[0];
-} __packed;
-
-struct a2mp_createphyslink_req {
- __u8 local_id;
- __u8 remote_id;
- __u8 amp_assoc[0];
-} __packed;
-
-struct a2mp_createphyslink_rsp {
- __u8 local_id;
- __u8 remote_id;
- __u8 status;
-} __packed;
-
-struct a2mp_disconnphyslink_req {
- __u8 local_id;
- __u8 remote_id;
-} __packed;
-
-struct a2mp_disconnphyslink_rsp {
- __u8 local_id;
- __u8 remote_id;
- __u8 status;
-} __packed;
-
-
-/* L2CAP-AMP module interface */
-int amp_init(void);
-void amp_exit(void);
-
-/* L2CAP-AMP fixed channel interface */
-void amp_conn_ind(struct hci_conn *hcon, struct sk_buff *skb);
-
-/* L2CAP-AMP link interface */
-void amp_create_physical(struct l2cap_conn *conn, struct sock *sk);
-void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk);
-
-/* AMP manager internals */
struct amp_ctrl {
- struct amp_mgr *mgr;
- __u8 id;
- __u8 type;
- __u8 status;
- __u32 total_bw;
- __u32 max_bw;
- __u32 min_latency;
- __u16 pal_cap;
- __u16 max_assoc_size;
+ struct list_head list;
+ struct kref kref;
+ __u8 id;
+ __u16 assoc_len_so_far;
+ __u16 assoc_rem_len;
+ __u16 assoc_len;
+ __u8 *assoc;
};
-struct amp_mgr {
- struct list_head list;
- __u8 discovered;
- __u8 next_ident;
- struct l2cap_conn *l2cap_conn;
- struct socket *a2mp_sock;
- struct list_head ctx_list;
- rwlock_t ctx_list_lock;
- struct amp_ctrl *ctrls; /* @@ TODO s.b. list of controllers */
- struct sk_buff *skb;
- __u8 connected;
-};
+int amp_ctrl_put(struct amp_ctrl *ctrl);
+void amp_ctrl_get(struct amp_ctrl *ctrl);
+struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
+struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
+void amp_ctrl_list_flush(struct amp_mgr *mgr);
-/* AMP Manager signalling contexts */
-#define AMP_GETAMPASSOC 1
-#define AMP_CREATEPHYSLINK 2
-#define AMP_ACCEPTPHYSLINK 3
-#define AMP_CREATELOGLINK 4
-#define AMP_ACCEPTLOGLINK 5
+struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ u8 remote_id, bool out);
-/* Get AMP Assoc sequence */
-#define AMP_GAA_INIT 0
-#define AMP_GAA_RLAA_COMPLETE 1
-struct amp_gaa_state {
- __u8 req_ident;
- __u16 len_so_far;
- __u8 *assoc;
-};
+int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
-/* Create Physical Link sequence */
-#define AMP_CPL_INIT 0
-#define AMP_CPL_DISC_RSP 1
-#define AMP_CPL_GETINFO_RSP 2
-#define AMP_CPL_GAA_RSP 3
-#define AMP_CPL_CPL_STATUS 4
-#define AMP_CPL_WRA_COMPLETE 5
-#define AMP_CPL_CHANNEL_SELECT 6
-#define AMP_CPL_RLA_COMPLETE 7
-#define AMP_CPL_PL_COMPLETE 8
-#define AMP_CPL_PL_CANCEL 9
-struct amp_cpl_state {
- __u8 remote_id;
- __u16 max_len;
- __u8 *remote_assoc;
- __u8 *local_assoc;
- __u16 len_so_far;
- __u16 rem_len;
- __u8 phy_handle;
-};
-
-/* Accept Physical Link sequence */
-#define AMP_APL_INIT 0
-#define AMP_APL_APL_STATUS 1
-#define AMP_APL_WRA_COMPLETE 2
-#define AMP_APL_PL_COMPLETE 3
-struct amp_apl_state {
- __u8 remote_id;
- __u8 req_ident;
- __u8 *remote_assoc;
- __u16 len_so_far;
- __u16 rem_len;
- __u8 phy_handle;
-};
-
-/* Create/Accept Logical Link sequence */
-#define AMP_LOG_INIT 0
-#define AMP_LOG_LL_STATUS 1
-#define AMP_LOG_LL_COMPLETE 2
-struct amp_log_state {
- __u8 remote_id;
-};
-
-/* Possible event types a context may wait for */
-#define AMP_INIT 0x01
-#define AMP_HCI_EVENT 0x02
-#define AMP_HCI_CMD_CMPLT 0x04
-#define AMP_HCI_CMD_STATUS 0x08
-#define AMP_A2MP_RSP 0x10
-#define AMP_KILLED 0x20
-#define AMP_CANCEL 0x40
-struct amp_ctx {
- struct list_head list;
- struct amp_mgr *mgr;
- struct hci_dev *hdev;
- __u8 type;
- __u8 state;
- union {
- struct amp_gaa_state gaa;
- struct amp_cpl_state cpl;
- struct amp_apl_state apl;
- } d;
- __u8 evt_type;
- __u8 evt_code;
- __u16 opcode;
- __u8 id;
- __u8 rsp_ident;
-
- struct sock *sk;
- struct amp_ctx *deferred;
- struct timer_list timer;
-};
-
-/* AMP work */
-struct amp_work_pl_timeout {
- struct work_struct work;
- struct amp_ctrl *ctrl;
-};
-struct amp_work_ctx_timeout {
- struct work_struct work;
- struct amp_ctx *ctx;
-};
-struct amp_work_data_ready {
- struct work_struct work;
- struct sock *sk;
- int bytes;
-};
-struct amp_work_state_change {
- struct work_struct work;
- struct sock *sk;
-};
-struct amp_work_conn_ind {
- struct work_struct work;
- struct hci_conn *hcon;
- struct sk_buff *skb;
-};
-struct amp_work_create_physical {
- struct work_struct work;
- struct l2cap_conn *conn;
- u8 id;
- struct sock *sk;
-};
-struct amp_work_accept_physical {
- struct work_struct work;
- struct l2cap_conn *conn;
- u8 id;
- struct sock *sk;
-};
-struct amp_work_cmd_cmplt {
- struct work_struct work;
- struct hci_dev *hdev;
- u16 opcode;
- struct sk_buff *skb;
-};
-struct amp_work_cmd_status {
- struct work_struct work;
- struct hci_dev *hdev;
- u16 opcode;
- u8 status;
-};
-struct amp_work_event {
- struct work_struct work;
- struct hci_dev *hdev;
- u8 event;
- struct sk_buff *skb;
-};
+void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr);
+void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
+void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
+void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+ struct hci_conn *hcon);
+void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon);
+void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon);
+void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
+void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
+void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
#endif /* __AMP_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e909195..10eb9b3 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010-2013 The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -25,20 +25,22 @@
#ifndef __BLUETOOTH_H
#define __BLUETOOTH_H
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <linux/list.h>
#include <linux/poll.h>
#include <net/sock.h>
+#include <linux/seq_file.h>
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
#define PF_BLUETOOTH AF_BLUETOOTH
#endif
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1 1
+#define BLUETOOTH_VER_1_2 2
+#define BLUETOOTH_VER_2_0 3
+
/* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8
-#define BT_SKB_RESERVE_80211 32
#define BTPROTO_L2CAP 0
#define BTPROTO_HCI 1
@@ -63,68 +65,56 @@
#define BT_SECURITY_LOW 1
#define BT_SECURITY_MEDIUM 2
#define BT_SECURITY_HIGH 3
-#define BT_SECURITY_VERY_HIGH 4
#define BT_DEFER_SETUP 7
+
#define BT_FLUSHABLE 8
+#define BT_FLUSHABLE_OFF 0
+#define BT_FLUSHABLE_ON 1
+
#define BT_POWER 9
struct bt_power {
__u8 force_active;
};
+#define BT_POWER_FORCE_ACTIVE_OFF 0
+#define BT_POWER_FORCE_ACTIVE_ON 1
-#define BT_AMP_POLICY 10
+#define BT_CHANNEL_POLICY 10
-/* Require BR/EDR (default policy)
- * AMP controllers cannot be used
- * Channel move requests from the remote device are denied
- * If the L2CAP channel is currently using AMP, move the channel to BR/EDR
+/* BR/EDR only (default policy)
+ * AMP controllers cannot be used.
+ * Channel move requests from the remote device are denied.
+ * If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
*/
-#define BT_AMP_POLICY_REQUIRE_BR_EDR 0
+#define BT_CHANNEL_POLICY_BREDR_ONLY 0
-/* Prefer BR/EDR
- * Allow use of AMP controllers
- * If the L2CAP channel is currently on AMP, move it to BR/EDR
- * Channel move requests from the remote device are allowed
+/* BR/EDR Preferred
+ * Allow use of AMP controllers.
+ * If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ * Channel move requests from the remote device are allowed.
*/
-#define BT_AMP_POLICY_PREFER_BR_EDR 1
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1
-/* Prefer AMP
+/* AMP Preferred
* Allow use of AMP controllers
* If the L2CAP channel is currently on BR/EDR and AMP controller
- * resources are available, initiate a channel move to AMP
- * Channel move requests from the remote device are allowed
+ * resources are available, initiate a channel move to AMP.
+ * Channel move requests from the remote device are allowed.
* If the L2CAP socket has not been connected yet, try to create
* and configure the channel directly on an AMP controller rather
- * than BR/EDR
+ * than BR/EDR.
*/
-#define BT_AMP_POLICY_PREFER_AMP 2
+#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
-#define BT_LE_PARAMS 100
+__printf(1, 2)
+int bt_info(const char *fmt, ...);
+__printf(1, 2)
+int bt_err(const char *fmt, ...);
-#define BT_LE_SCAN_WINDOW_MIN 0x0004
-#define BT_LE_SCAN_WINDOW_MAX 0x4000
-#define BT_LE_SCAN_WINDOW_DEF 0x0004
-
-#define BT_LE_SCAN_INTERVAL_MIN 0x0004
-#define BT_LE_SCAN_INTERVAL_MAX 0x4000
-#define BT_LE_SCAN_INTERVAL_DEF 0x0008
-
-#define BT_LE_CONN_INTERVAL_MIN 0x0006
-#define BT_LE_CONN_INTERVAL_MAX 0x0C80
-#define BT_LE_CONN_INTERVAL_MIN_DEF 0x0008
-#define BT_LE_CONN_INTERVAL_MAX_DEF 0x0100
-
-#define BT_LE_LATENCY_MAX 0x01F4
-#define BT_LE_LATENCY_DEF 0x0000
-
-#define BT_LE_SUP_TO_MIN 0x000A
-#define BT_LE_SUP_TO_MAX 0x0C80
-#define BT_LE_SUP_TO_DEFAULT 0X03E8
-
-#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
-#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
-#define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
+#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
/* Connection and socket states */
enum {
@@ -139,63 +129,109 @@
BT_CLOSED
};
+/* If unused will be removed by compiler */
+static inline const char *state_to_string(int state)
+{
+ switch (state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
+
+ return "invalid state";
+}
+
/* BD Address */
typedef struct {
__u8 b[6];
} __packed bdaddr_t;
+/* BD Address type */
+#define BDADDR_BREDR 0x00
+#define BDADDR_LE_PUBLIC 0x01
+#define BDADDR_LE_RANDOM 0x02
+
+static inline bool bdaddr_type_is_valid(__u8 type)
+{
+ switch (type) {
+ case BDADDR_BREDR:
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool bdaddr_type_is_le(__u8 type)
+{
+ switch (type) {
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
/* Copy, swap, convert BD Address */
-static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
+static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
{
return memcmp(ba1, ba2, sizeof(bdaddr_t));
}
-static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
+static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
{
memcpy(dst, src, sizeof(bdaddr_t));
}
void baswap(bdaddr_t *dst, bdaddr_t *src);
-char *batostr(bdaddr_t *ba);
-bdaddr_t *strtoba(char *str);
/* Common socket structures and functions */
#define bt_sk(__sk) ((struct bt_sock *) __sk)
-struct bt_le_params {
- __u8 prohibit_remote_chg;
- __u8 filter_policy;
- __u16 scan_interval;
- __u16 scan_window;
- __u16 interval_min;
- __u16 interval_max;
- __u16 latency;
- __u16 supervision_timeout;
- __u16 min_ce_len;
- __u16 max_ce_len;
- __u16 conn_timeout;
-};
-
struct bt_sock {
struct sock sk;
bdaddr_t src;
bdaddr_t dst;
struct list_head accept_q;
struct sock *parent;
- u32 defer_setup;
- struct bt_le_params le_params;
+ unsigned long flags;
+};
+
+enum {
+ BT_SK_DEFER_SETUP,
+ BT_SK_SUSPEND,
};
struct bt_sock_list {
struct hlist_head head;
rwlock_t lock;
+#ifdef CONFIG_PROC_FS
+ int (* custom_seq_show)(struct seq_file *, void *);
+#endif
};
int bt_sock_register(int proto, const struct net_proto_family *ops);
-int bt_sock_unregister(int proto);
+void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -211,25 +247,35 @@
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
-struct bt_l2cap_control {
- __u8 frame_type;
- __u8 final;
- __u8 sar;
- __u8 super;
- __u16 reqseq;
- __u16 txseq;
- __u8 poll;
- __u8 fcs;
+struct l2cap_ctrl {
+ unsigned int sframe:1,
+ poll:1,
+ final:1,
+ fcs:1,
+ sar:2,
+ super:2;
+ __u16 reqseq;
+ __u16 txseq;
+ __u8 retries;
+};
+
+struct hci_dev;
+
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
+
+struct hci_req_ctrl {
+ bool start;
+ u8 event;
+ hci_req_complete_t complete;
};
struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
__u16 expect;
- __u8 retries;
__u8 force_active;
- unsigned short channel;
- struct bt_l2cap_control control;
+ struct l2cap_ctrl control;
+ struct hci_req_ctrl req;
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -250,13 +296,11 @@
{
struct sk_buff *skb;
- release_sock(sk);
skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
- lock_sock(sk);
if (!skb && *err)
return NULL;
@@ -277,7 +321,7 @@
return NULL;
}
-int bt_err(__u16 code);
+int bt_to_errno(__u16 code);
extern int hci_sock_init(void);
extern void hci_sock_cleanup(void);
@@ -285,6 +329,11 @@
extern int bt_sysfs_init(void);
extern void bt_sysfs_cleanup(void);
+extern int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list* sk_list,
+ int (* seq_show)(struct seq_file *, void *));
+extern void bt_procfs_cleanup(struct net *net, const char *name);
+
extern struct dentry *bt_debugfs;
int l2cap_init(void);
@@ -293,4 +342,6 @@
int sco_init(void);
void sco_exit(void);
+void bt_sock_reclassify_lock(struct sock *sk, int proto);
+
#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d88448f..db43501 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010-2012 The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -25,11 +25,16 @@
#ifndef __HCI_H
#define __HCI_H
-#define HCI_MAX_ACL_SIZE 1500
+#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+#define HCI_LINK_KEY_SIZE 16
+#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
+
+#define HCI_MAX_AMP_ASSOC_SIZE 672
+
/* HCI dev events */
#define HCI_DEV_REG 1
#define HCI_DEV_UNREG 2
@@ -37,7 +42,6 @@
#define HCI_DEV_DOWN 4
#define HCI_DEV_SUSPEND 5
#define HCI_DEV_RESUME 6
-#define HCI_DEV_WRITE 7
/* HCI notify events */
#define HCI_NOTIFY_CONN_ADD 1
@@ -52,15 +56,26 @@
#define HCI_RS232 4
#define HCI_PCI 5
#define HCI_SDIO 6
-#define HCI_SMD 7
/* HCI controller types */
#define HCI_BREDR 0x00
#define HCI_AMP 0x01
+/* First BR/EDR Controller shall have ID = 0 */
+#define HCI_BREDR_ID 0
+
+/* AMP controller status */
+#define AMP_CTRL_POWERED_DOWN 0x00
+#define AMP_CTRL_BLUETOOTH_ONLY 0x01
+#define AMP_CTRL_NO_CAPACITY 0x02
+#define AMP_CTRL_LOW_CAPACITY 0x03
+#define AMP_CTRL_MEDIUM_CAPACITY 0x04
+#define AMP_CTRL_HIGH_CAPACITY 0x05
+#define AMP_CTRL_FULL_CAPACITY 0x06
+
/* HCI device quirks */
enum {
- HCI_QUIRK_NO_RESET,
+ HCI_QUIRK_RESET_ON_CLOSE,
HCI_QUIRK_RAW_DEVICE,
HCI_QUIRK_FIXUP_BUFFER_SIZE
};
@@ -79,17 +94,42 @@
HCI_RAW,
+ HCI_RESET,
+};
+
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
HCI_SETUP,
HCI_AUTO_OFF,
+ HCI_RFKILLED,
HCI_MGMT,
HCI_PAIRABLE,
HCI_SERVICE_CACHE,
HCI_LINK_KEYS,
HCI_DEBUG_KEYS,
+ HCI_UNREGISTER,
- HCI_RESET,
+ HCI_LE_SCAN,
+ HCI_SSP_ENABLED,
+ HCI_HS_ENABLED,
+ HCI_LE_ENABLED,
+ HCI_LE_PERIPHERAL,
+ HCI_CONNECTABLE,
+ HCI_DISCOVERABLE,
+ HCI_LINK_SECURITY,
+ HCI_PERIODIC_INQ,
+ HCI_FAST_CONNECTABLE,
};
+/* A mask for the flags that are supposed to remain when a reset happens
+ * or the HCI device is closed.
+ */
+#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
+ BIT(HCI_FAST_CONNECTABLE))
+
/* HCI ioctl defines */
#define HCIDEVUP _IOW('H', 201, int)
#define HCIDEVDOWN _IOW('H', 202, int)
@@ -101,7 +141,6 @@
#define HCIGETCONNLIST _IOR('H', 212, int)
#define HCIGETCONNINFO _IOR('H', 213, int)
#define HCIGETAUTHINFO _IOR('H', 215, int)
-#define HCISETAUTHINFO _IOR('H', 216, int)
#define HCISETRAW _IOW('H', 220, int)
#define HCISETSCAN _IOW('H', 221, int)
@@ -119,13 +158,12 @@
#define HCIINQUIRY _IOR('H', 240, int)
/* HCI timeouts */
-#define HCI_DISCONN_AUTH_FAILED_TIMEOUT (10) /* 10 ms */
-#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
-#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
-#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
-#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
-#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
-#define HCI_CMD_TIMEOUT (5000) /* 5 seconds */
+#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
+#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
+#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -161,25 +199,11 @@
#define ESCO_2EV5 0x0100
#define ESCO_3EV5 0x0200
-#define ESCO_WBS (ESCO_EV3 | (EDR_ESCO_MASK ^ ESCO_2EV3))
-
#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
#define ALL_ESCO_MASK (SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
EDR_ESCO_MASK)
-/* Air Coding Format */
-#define ACF_CVSD 0x0000;
-#define ACF_ULAW 0x0001;
-#define ACF_ALAW 0x0002;
-#define ACF_TRANS 0x0003;
-
-/* Retransmission Effort */
-#define RE_NO_RETRANS 0x00;
-#define RE_POWER_CONSUMP 0x01;
-#define RE_LINK_QUALITY 0x02;
-#define RE_DONT_CARE 0xFF;
-
/* ACL flags */
#define ACL_START_NO_FLUSH 0x00
#define ACL_CONT 0x01
@@ -188,14 +212,13 @@
#define ACL_ACTIVE_BCAST 0x04
#define ACL_PICO_BCAST 0x08
-#define ACL_PB_MASK (ACL_CONT | ACL_START)
-
/* Baseband links */
#define SCO_LINK 0x00
#define ACL_LINK 0x01
#define ESCO_LINK 0x02
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
+#define AMP_LINK 0x81
/* LMP features */
#define LMP_3SLOT 0x01
@@ -225,6 +248,7 @@
#define LMP_EV4 0x01
#define LMP_EV5 0x02
+#define LMP_NO_BREDR 0x20
#define LMP_LE 0x40
#define LMP_SNIFF_SUBR 0x02
@@ -234,11 +258,18 @@
#define LMP_EDR_3S_ESCO 0x80
#define LMP_EXT_INQ 0x01
+#define LMP_SIMUL_LE_BR 0x02
#define LMP_SIMPLE_PAIR 0x08
#define LMP_NO_FLUSH 0x40
#define LMP_LSTO 0x01
#define LMP_INQ_TX_PWR 0x02
+#define LMP_EXTFEATURES 0x80
+
+/* Extended LMP features */
+#define LMP_HOST_SSP 0x01
+#define LMP_HOST_LE 0x02
+#define LMP_HOST_LE_BREDR 0x04
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -269,9 +300,59 @@
#define HCI_AT_GENERAL_BONDING 0x04
#define HCI_AT_GENERAL_BONDING_MITM 0x05
+/* Link Key types */
+#define HCI_LK_COMBINATION 0x00
+#define HCI_LK_LOCAL_UNIT 0x01
+#define HCI_LK_REMOTE_UNIT 0x02
+#define HCI_LK_DEBUG_COMBINATION 0x03
+#define HCI_LK_UNAUTH_COMBINATION 0x04
+#define HCI_LK_AUTH_COMBINATION 0x05
+#define HCI_LK_CHANGED_COMBINATION 0x06
+/* The spec doesn't define types for SMP keys, the _MASTER suffix is implied */
+#define HCI_SMP_STK 0x80
+#define HCI_SMP_STK_SLAVE 0x81
+#define HCI_SMP_LTK 0x82
+#define HCI_SMP_LTK_SLAVE 0x83
+
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_AUTH_FAILURE 0x05
+#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_REMOTE_USER_TERM 0x13
+#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
+#define HCI_ERROR_REMOTE_POWER_OFF 0x15
+#define HCI_ERROR_LOCAL_HOST_TERM 0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+
/* Flow control modes */
-#define HCI_PACKET_BASED_FLOW_CTL_MODE 0x00
-#define HCI_BLOCK_BASED_FLOW_CTL_MODE 0x01
+#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
+#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
+
+/* The core spec defines 127 as the "not available" value */
+#define HCI_TX_POWER_INVALID 127
+
+/* Extended Inquiry Response field types */
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
+#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */
+#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+
+/* Low Energy Advertising Flags */
+#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
+#define LE_AD_GENERAL 0x02 /* General Discoverable */
+#define LE_AD_NO_BREDR 0x04 /* BR/EDR not supported */
+#define LE_AD_SIM_LE_BREDR_CTRL 0x08 /* Simultaneous LE & BR/EDR Controller */
+#define LE_AD_SIM_LE_BREDR_HOST 0x10 /* Simultaneous LE & BR/EDR Host */
/* ----- HCI Commands ---- */
#define HCI_OP_NOP 0x0000
@@ -285,6 +366,8 @@
#define HCI_OP_INQUIRY_CANCEL 0x0402
+#define HCI_OP_PERIODIC_INQ 0x0403
+
#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
#define HCI_OP_CREATE_CONN 0x0405
@@ -329,12 +412,7 @@
#define HCI_OP_LINK_KEY_REPLY 0x040b
struct hci_cp_link_key_reply {
bdaddr_t bdaddr;
- __u8 link_key[16];
-} __packed;
-
-struct hci_rp_link_key_reply {
- __u8 status;
- bdaddr_t bdaddr;
+ __u8 link_key[HCI_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
@@ -413,11 +491,6 @@
__le16 handle;
} __packed;
-#define HCI_OP_READ_CLOCK_OFFSET 0x041f
-struct hci_cp_read_clock_offset {
- __le16 handle;
-} __packed;
-
#define HCI_OP_SETUP_SYNC_CONN 0x0428
struct hci_cp_setup_sync_conn {
__le16 handle;
@@ -465,6 +538,14 @@
#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
+#define HCI_OP_USER_PASSKEY_REPLY 0x042e
+struct hci_cp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f
+
#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430
struct hci_cp_remote_oob_data_reply {
bdaddr_t bdaddr;
@@ -483,43 +564,43 @@
__u8 reason;
} __packed;
-#define HCI_OP_CREATE_PHYS_LINK 0x0435
-struct hci_cp_create_phys_link {
+#define HCI_OP_CREATE_PHY_LINK 0x0435
+struct hci_cp_create_phy_link {
__u8 phy_handle;
__u8 key_len;
- __u8 type;
- __u8 data[32];
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
} __packed;
-#define HCI_OP_ACCEPT_PHYS_LINK 0x0436
-struct hci_cp_accept_phys_link {
+#define HCI_OP_ACCEPT_PHY_LINK 0x0436
+struct hci_cp_accept_phy_link {
__u8 phy_handle;
__u8 key_len;
- __u8 type;
- __u8 data[32];
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
} __packed;
-#define HCI_OP_DISCONN_PHYS_LINK 0x0437
-struct hci_cp_disconn_phys_link {
+#define HCI_OP_DISCONN_PHY_LINK 0x0437
+struct hci_cp_disconn_phy_link {
__u8 phy_handle;
__u8 reason;
} __packed;
-struct hci_ext_fs {
+struct ext_flow_spec {
__u8 id;
- __u8 type;
- __le16 max_sdu;
- __le32 sdu_arr_time;
- __le32 acc_latency;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
__le32 flush_to;
} __packed;
#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
-struct hci_cp_create_logical_link {
- __u8 phy_handle;
- struct hci_ext_fs tx_fs;
- struct hci_ext_fs rx_fs;
+struct hci_cp_create_accept_logical_link {
+ __u8 phy_handle;
+ struct ext_flow_spec tx_flow_spec;
+ struct ext_flow_spec rx_flow_spec;
} __packed;
#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
@@ -539,13 +620,6 @@
__u8 flow_spec_id;
} __packed;
-#define HCI_OP_FLOW_SPEC_MODIFY 0x043c
-struct hci_cp_flow_spec_modify {
- __le16 log_handle;
- struct hci_ext_fs tx_fs;
- struct hci_ext_fs rx_fs;
-} __packed;
-
#define HCI_OP_SNIFF_MODE 0x0803
struct hci_cp_sniff_mode {
__le16 handle;
@@ -706,12 +780,6 @@
__le16 voice_setting;
} __packed;
-#define HCI_OP_WRITE_AUTOMATIC_FLUSH_TIMEOUT 0x0c28
-struct hci_cp_write_automatic_flush_timeout {
- __le16 handle;
- __le16 timeout;
-} __packed;
-
#define HCI_OP_HOST_BUFFER_SIZE 0x0c33
struct hci_cp_host_buffer_size {
__le16 acl_mtu;
@@ -720,20 +788,14 @@
__le16 sco_max_pkt;
} __packed;
-#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a
-struct hci_cp_write_current_iac_lap {
- __u8 num_current_iac;
- __u8 lap[6];
-} __packed;
-
#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
#define HCI_MAX_EIR_LENGTH 240
#define HCI_OP_WRITE_EIR 0x0c52
struct hci_cp_write_eir {
- uint8_t fec;
- uint8_t data[HCI_MAX_EIR_LENGTH];
+ __u8 fec;
+ __u8 data[HCI_MAX_EIR_LENGTH];
} __packed;
#define HCI_OP_READ_SSP_MODE 0x0c55
@@ -755,38 +817,9 @@
} __packed;
#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
-
-#define HCI_OP_READ_LL_TIMEOUT 0x0c61
-struct hci_rp_read_ll_timeout {
+struct hci_rp_read_inq_rsp_tx_power {
__u8 status;
- __le16 timeout;
-} __packed;
-
-#define HCI_OP_WRITE_LL_TIMEOUT 0x0c62
-struct hci_cp_write_ll_timeout {
- __le16 timeout;
-} __packed;
-
-#define HCI_OP_SET_EVENT_MASK_PAGE2 0x0c63
-struct hci_cp_set_event_mask_page2 {
- __u8 mask[8];
-} __packed;
-
-#define HCI_OP_READ_LOCATION_DATA 0x0c64
-struct hci_rp_read_location_data {
- __u8 status;
- __u8 loc_dom_aware;
- __u8 loc_dom;
- __u8 loc_dom_opts;
- __u8 loc_opts;
-} __packed;
-
-#define HCI_OP_WRITE_LOCATION_DATA 0x0c65
-struct hci_cp_write_location_data {
- __u8 loc_dom_aware;
- __u8 loc_dom;
- __u8 loc_dom_opts;
- __u8 loc_opts;
+ __s8 tx_power;
} __packed;
#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
@@ -795,31 +828,10 @@
__u8 mode;
} __packed;
-#define HCI_OP_WRITE_FLOW_CONTROL_MODE 0x0c67
-struct hci_cp_write_flow_control_mode {
- __u8 mode;
-} __packed;
-
-#define HCI_OP_READ_BE_FLUSH_TIMEOUT 0x0c69
-struct hci_cp_read_be_flush_timeout {
- __le16 log_handle;
-} __packed;
-
-struct hci_rp_read_be_flush_timeout {
- __u8 status;
- __le32 timeout;
-} __packed;
-
-#define HCI_OP_WRITE_BE_FLUSH_TIMEOUT 0x0c6a
-struct hci_cp_write_be_flush_timeout {
- __le16 log_handle;
- __le32 timeout;
-} __packed;
-
-#define HCI_OP_SHORT_RANGE_MODE 0x0c6b
-struct hci_cp_short_range_mode {
- __u8 phy_handle;
- __u8 mode;
+#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
+struct hci_cp_write_le_host_supported {
+ __u8 le;
+ __u8 simul;
} __packed;
#define HCI_OP_READ_LOCAL_VERSION 0x1001
@@ -845,6 +857,9 @@
} __packed;
#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
+struct hci_cp_read_local_ext_features {
+ __u8 page;
+} __packed;
struct hci_rp_read_local_ext_features {
__u8 status;
__u8 page;
@@ -871,21 +886,33 @@
struct hci_rp_read_data_block_size {
__u8 status;
__le16 max_acl_len;
- __le16 data_block_len;
+ __le16 block_len;
__le16 num_blocks;
} __packed;
-#define HCI_OP_READ_RSSI 0x1405
-struct hci_cp_read_rssi {
- __le16 handle;
+#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
+struct hci_rp_read_page_scan_activity {
+ __u8 status;
+ __le16 interval;
+ __le16 window;
} __packed;
-struct hci_rp_read_rssi {
- __u8 status;
- __le16 handle;
- __s8 rssi;
+#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
+struct hci_cp_write_page_scan_activity {
+ __le16 interval;
+ __le16 window;
} __packed;
+#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
+struct hci_rp_read_page_scan_type {
+ __u8 status;
+ __u8 type;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47
+ #define PAGE_SCAN_TYPE_STANDARD 0x00
+ #define PAGE_SCAN_TYPE_INTERLACED 0x01
+
#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
struct hci_rp_read_local_amp_info {
__u8 status;
@@ -907,12 +934,11 @@
__le16 len_so_far;
__le16 max_len;
} __packed;
-
struct hci_rp_read_local_amp_assoc {
__u8 status;
__u8 phy_handle;
__le16 rem_len;
- __u8 frag[248];
+ __u8 frag[0];
} __packed;
#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
@@ -920,9 +946,8 @@
__u8 phy_handle;
__le16 len_so_far;
__le16 rem_len;
- __u8 frag[248];
+ __u8 frag[0];
} __packed;
-
struct hci_rp_write_remote_amp_assoc {
__u8 status;
__u8 phy_handle;
@@ -940,19 +965,49 @@
__u8 le_max_pkt;
} __packed;
-#define HCI_OP_LE_SET_SCAN_PARAMETERS 0x200b
-struct hci_cp_le_set_scan_parameters {
- __u8 type;
- __le16 interval;
- __le16 window;
- __u8 own_bdaddr_type;
- __u8 filter;
+#define HCI_OP_LE_READ_LOCAL_FEATURES 0x2003
+struct hci_rp_le_read_local_features {
+ __u8 status;
+ __u8 features[8];
} __packed;
+#define HCI_OP_LE_READ_ADV_TX_POWER 0x2007
+struct hci_rp_le_read_adv_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_MAX_AD_LENGTH 31
+
+#define HCI_OP_LE_SET_ADV_DATA 0x2008
+struct hci_cp_le_set_adv_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
+
+#define LE_SCAN_PASSIVE 0x00
+#define LE_SCAN_ACTIVE 0x01
+
+#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
+struct hci_cp_le_set_scan_param {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+ __u8 own_address_type;
+ __u8 filter_policy;
+} __packed;
+
+#define LE_SCAN_DISABLE 0x00
+#define LE_SCAN_ENABLE 0x01
+#define LE_SCAN_FILTER_DUP_DISABLE 0x00
+#define LE_SCAN_FILTER_DUP_ENABLE 0x01
+
#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
struct hci_cp_le_set_scan_enable {
- __u8 enable;
- __u8 filter_dup;
+ __u8 enable;
+ __u8 filter_dup;
} __packed;
#define HCI_OP_LE_CREATE_CONN 0x200d
@@ -973,24 +1028,10 @@
#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
-#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200F
+#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
struct hci_rp_le_read_white_list_size {
- __u8 status;
- __u8 size;
-} __packed;
-
-#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010
-
-#define HCI_OP_LE_ADD_DEV_WHITE_LIST 0x2011
-struct hci_cp_le_add_dev_white_list {
- __u8 addr_type;
- bdaddr_t addr;
-} __packed;
-
-#define HCI_OP_LE_REMOVE_DEV_WHITE_LIST 0x2012
-struct hci_cp_le_remove_dev_white_list {
- __u8 addr_type;
- bdaddr_t addr;
+ __u8 status;
+ __u8 size;
} __packed;
#define HCI_OP_LE_CONN_UPDATE 0x2013
@@ -1004,16 +1045,6 @@
__le16 max_ce_len;
} __packed;
-#define HCI_OP_LE_ENCRYPT 0x2017
-struct hci_cp_le_encrypt {
- __u8 key[16];
- __u8 data[16];
-} __packed;
-struct hci_cp_le_encrypt_reply {
- __u8 status;
- __u8 encrypted[16];
-} __packed;
-
#define HCI_OP_LE_START_ENC 0x2019
struct hci_cp_le_start_enc {
__le16 handle;
@@ -1041,6 +1072,12 @@
__le16 handle;
} __packed;
+#define HCI_OP_LE_READ_SUPPORTED_STATES 0x201c
+struct hci_rp_le_read_supported_states {
+ __u8 status;
+ __u8 le_states[8];
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1154,9 +1191,14 @@
} __packed;
#define HCI_EV_NUM_COMP_PKTS 0x13
+struct hci_comp_pkts_info {
+ __le16 handle;
+ __le16 count;
+} __packed;
+
struct hci_ev_num_comp_pkts {
__u8 num_hndl;
- /* variable length part */
+ struct hci_comp_pkts_info handles[0];
} __packed;
#define HCI_EV_MODE_CHANGE 0x14
@@ -1180,7 +1222,7 @@
#define HCI_EV_LINK_KEY_NOTIFY 0x18
struct hci_ev_link_key_notify {
bdaddr_t bdaddr;
- __u8 link_key[16];
+ __u8 link_key[HCI_LINK_KEY_SIZE];
__u8 key_type;
} __packed;
@@ -1276,6 +1318,12 @@
__u8 data[240];
} __packed;
+#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
+struct hci_ev_key_refresh_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
#define HCI_EV_IO_CAPA_REQUEST 0x31
struct hci_ev_io_capa_request {
bdaddr_t bdaddr;
@@ -1296,8 +1344,8 @@
} __packed;
#define HCI_EV_USER_PASSKEY_REQUEST 0x34
-struct hci_ev_user_passkey_request {
- bdaddr_t bdaddr;
+struct hci_ev_user_passkey_req {
+ bdaddr_t bdaddr;
} __packed;
#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
@@ -1311,12 +1359,24 @@
bdaddr_t bdaddr;
} __packed;
-#define HCI_EV_USER_PASSKEY_NOTIFICATION 0x3b
-struct hci_ev_user_passkey_notification {
+#define HCI_EV_USER_PASSKEY_NOTIFY 0x3b
+struct hci_ev_user_passkey_notify {
bdaddr_t bdaddr;
__le32 passkey;
} __packed;
+#define HCI_KEYPRESS_STARTED 0
+#define HCI_KEYPRESS_ENTERED 1
+#define HCI_KEYPRESS_ERASED 2
+#define HCI_KEYPRESS_CLEARED 3
+#define HCI_KEYPRESS_COMPLETED 4
+
+#define HCI_EV_KEYPRESS_NOTIFY 0x3c
+struct hci_ev_keypress_notify {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+
#define HCI_EV_REMOTE_HOST_FEATURES 0x3d
struct hci_ev_remote_host_features {
bdaddr_t bdaddr;
@@ -1328,7 +1388,55 @@
__u8 subevent;
} __packed;
+#define HCI_EV_PHY_LINK_COMPLETE 0x40
+struct hci_ev_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_CHANNEL_SELECTED 0x41
+struct hci_ev_channel_selected {
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
+struct hci_ev_disconn_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
+struct hci_ev_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
+struct hci_ev_disconn_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_NUM_COMP_BLOCKS 0x48
+struct hci_comp_blocks_info {
+ __le16 handle;
+ __le16 pkts;
+ __le16 blocks;
+} __packed;
+
+struct hci_ev_num_comp_blocks {
+ __le16 num_blocks;
+ __u8 num_hndl;
+ struct hci_comp_blocks_info handles[0];
+} __packed;
+
/* Low energy meta events */
+#define LE_CONN_ROLE_MASTER 0x00
+
#define HCI_EV_LE_CONN_COMPLETE 0x01
struct hci_ev_le_conn_complete {
__u8 status;
@@ -1342,6 +1450,14 @@
__u8 clk_accurancy;
} __packed;
+#define HCI_EV_LE_LTK_REQ 0x05
+struct hci_ev_le_ltk_req {
+ __le16 handle;
+ __u8 random[8];
+ __le16 ediv;
+} __packed;
+
+/* Advertising report event types */
#define ADV_IND 0x00
#define ADV_DIRECT_IND 0x01
#define ADV_SCAN_IND 0x02
@@ -1360,81 +1476,6 @@
__u8 data[0];
} __packed;
-#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
-struct hci_ev_le_conn_update_complete {
- __u8 status;
- __le16 handle;
- __le16 interval;
- __le16 latency;
- __le16 supervision_timeout;
-} __packed;
-
-#define HCI_EV_LE_LTK_REQ 0x05
-struct hci_ev_le_ltk_req {
- __le16 handle;
- __u8 random[8];
- __le16 ediv;
-} __packed;
-
-#define HCI_EV_PHYS_LINK_COMPLETE 0x40
-struct hci_ev_phys_link_complete {
- __u8 status;
- __u8 phy_handle;
-} __packed;
-
-#define HCI_EV_CHANNEL_SELECTED 0x41
-struct hci_ev_channel_selected {
- __u8 phy_handle;
-} __packed;
-
-#define HCI_EV_DISCONN_PHYS_LINK_COMPLETE 0x42
-struct hci_ev_disconn_phys_link_complete {
- __u8 status;
- __u8 phy_handle;
- __u8 reason;
-} __packed;
-
-#define HCI_EV_LOG_LINK_COMPLETE 0x45
-struct hci_ev_log_link_complete {
- __u8 status;
- __le16 log_handle;
- __u8 phy_handle;
- __u8 flow_spec_id;
-} __packed;
-
-#define HCI_EV_DISCONN_LOG_LINK_COMPLETE 0x46
-struct hci_ev_disconn_log_link_complete {
- __u8 status;
- __le16 log_handle;
- __u8 reason;
-} __packed;
-
-#define HCI_EV_FLOW_SPEC_MODIFY_COMPLETE 0x47
-struct hci_ev_flow_spec_modify_complete {
- __u8 status;
- __le16 log_handle;
-} __packed;
-
-#define HCI_EV_NUM_COMP_BLOCKS 0x48
-struct hci_ev_num_comp_blocks {
- __le16 total_num_blocks;
- __u8 num_hndl;
- /* variable length part */
-} __packed;
-
-#define HCI_EV_SHORT_RANGE_MODE_COMPLETE 0x4c
-struct hci_ev_short_range_mode_complete {
- __u8 status;
- __u8 phy_handle;
- __u8 mode;
-} __packed;
-
-#define HCI_EV_AMP_STATUS_CHANGE 0x4d
-struct hci_ev_amp_status_change {
- __u8 status;
- __u8 amp_status;
-} __packed;
-
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
@@ -1482,8 +1523,6 @@
__u8 dlen;
} __packed;
-#ifdef __KERNEL__
-#include <linux/skbuff.h>
static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
{
return (struct hci_event_hdr *) skb->data;
@@ -1498,15 +1537,14 @@
{
return (struct hci_sco_hdr *) skb->data;
}
-#endif
/* Command opcode pack/unpack */
-#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10))
+#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
#define hci_opcode_ocf(op) (op & 0x03ff)
/* ACL handle and flags pack/unpack */
-#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12))
+#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
#define hci_handle(h) (h & 0x0fff)
#define hci_flags(h) (h >> 12)
@@ -1529,7 +1567,8 @@
#define HCI_DEV_NONE 0xffff
#define HCI_CHANNEL_RAW 0
-#define HCI_CHANNEL_CONTROL 1
+#define HCI_CHANNEL_MONITOR 2
+#define HCI_CHANNEL_CONTROL 3
struct hci_filter {
unsigned long type_mask;
@@ -1595,8 +1634,6 @@
__u32 mtu;
__u32 cnt;
__u32 pkts;
- __u8 pending_sec_level;
- __u8 ssp_mode;
};
struct hci_dev_req {
@@ -1635,4 +1672,6 @@
};
#define IREQ_CACHE_FLUSH 0x0001
+extern bool enable_hs;
+
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f462d83..57123ee 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1,7 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, The Linux Foundation. All rights reserved.
- Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -27,10 +26,9 @@
#define __HCI_CORE_H
#include <net/bluetooth/hci.h>
-#include <linux/wakelock.h>
-/* HCI upper protocols */
-#define HCI_PROTO_L2CAP 0
-#define HCI_PROTO_SCO 1
+
+/* HCI priority */
+#define HCI_PRIO_MAX 7
/* HCI Core structures */
struct inquiry_data {
@@ -45,30 +43,41 @@
};
struct inquiry_entry {
- struct inquiry_entry *next;
+ struct list_head all; /* inq_cache.all */
+ struct list_head list; /* unknown or resolve */
+ enum {
+ NAME_NOT_KNOWN,
+ NAME_NEEDED,
+ NAME_PENDING,
+ NAME_KNOWN,
+ } name_state;
__u32 timestamp;
struct inquiry_data data;
};
-struct inquiry_cache {
- spinlock_t lock;
+struct discovery_state {
+ int type;
+ enum {
+ DISCOVERY_STOPPED,
+ DISCOVERY_STARTING,
+ DISCOVERY_FINDING,
+ DISCOVERY_RESOLVING,
+ DISCOVERY_STOPPING,
+ } state;
+ struct list_head all; /* All devices found during inquiry */
+ struct list_head unknown; /* Name state not known */
+ struct list_head resolve; /* Name needs to be resolved */
__u32 timestamp;
- struct inquiry_entry *list;
};
struct hci_conn_hash {
struct list_head list;
- spinlock_t lock;
unsigned int acl_num;
+ unsigned int amp_num;
unsigned int sco_num;
unsigned int le_num;
};
-struct hci_chan_list {
- struct list_head list;
- spinlock_t lock;
-};
-
struct bdaddr_list {
struct list_head list;
bdaddr_t bdaddr;
@@ -77,40 +86,28 @@
struct bt_uuid {
struct list_head list;
u8 uuid[16];
+ u8 size;
u8 svc_hint;
};
-struct key_master_id {
+struct smp_ltk {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 authenticated;
+ u8 type;
+ u8 enc_size;
__le16 ediv;
u8 rand[8];
-} __packed;
-
-#define KEY_TYPE_LE_BASE 0x11
-#define KEY_TYPE_LTK 0x11
-#define KEY_TYPE_IRK 0x12
-#define KEY_TYPE_CSRK 0x13
-
-struct link_key_data {
- bdaddr_t bdaddr;
- u8 addr_type;
- u8 key_type;
u8 val[16];
- u8 pin_len;
- u8 auth;
- u8 dlen;
- u8 data[0];
} __packed;
struct link_key {
struct list_head list;
bdaddr_t bdaddr;
- u8 addr_type;
- u8 key_type;
- u8 val[16];
+ u8 type;
+ u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
- u8 auth;
- u8 dlen;
- u8 data[0];
};
struct oob_data {
@@ -120,18 +117,29 @@
u8 randomizer[16];
};
-struct adv_entry {
- struct list_head list;
- bdaddr_t bdaddr;
- u8 bdaddr_type;
- u8 flags;
+struct le_scan_params {
+ u8 type;
+ u16 interval;
+ u16 window;
+ int timeout;
};
+#define HCI_MAX_SHORT_NAME_LENGTH 10
+
+struct amp_assoc {
+ __u16 len;
+ __u16 offset;
+ __u16 rem_len;
+ __u16 len_so_far;
+ __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
+};
+
+#define HCI_MAX_PAGES 3
+
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
- spinlock_t lock;
- atomic_t refcnt;
+ struct mutex lock;
char name[8];
unsigned long flags;
@@ -140,20 +148,33 @@
__u8 dev_type;
bdaddr_t bdaddr;
__u8 dev_name[HCI_MAX_NAME_LENGTH];
+ __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
- __u8 features[8];
+ __u8 max_page;
+ __u8 features[HCI_MAX_PAGES][8];
+ __u8 le_features[8];
+ __u8 le_white_list_size;
+ __u8 le_states[8];
__u8 commands[64];
- __u8 ssp_mode;
__u8 hci_ver;
__u16 hci_rev;
__u8 lmp_ver;
__u16 manufacturer;
- __le16 lmp_subver;
+ __u16 lmp_subver;
__u16 voice_setting;
__u8 io_capability;
+ __s8 inq_tx_power;
+ __u16 page_scan_interval;
+ __u16 page_scan_window;
+ __u8 page_scan_type;
+
+ __u16 devid_source;
+ __u16 devid_vendor;
+ __u16 devid_product;
+ __u16 devid_version;
__u16 pkt_type;
__u16 esco_type;
@@ -175,7 +196,11 @@
__u32 amp_max_flush_to;
__u32 amp_be_flush_to;
- __s8 is_wbs;
+ struct amp_assoc loc_assoc;
+
+ __u8 flow_ctl_mode;
+
+ unsigned int auto_accept_delay;
unsigned long quirks;
@@ -184,37 +209,44 @@
unsigned int sco_cnt;
unsigned int le_cnt;
- __u8 flow_ctl_mode;
-
unsigned int acl_mtu;
unsigned int sco_mtu;
unsigned int le_mtu;
unsigned int acl_pkts;
unsigned int sco_pkts;
unsigned int le_pkts;
- unsigned int le_white_list_size;
- unsigned int data_block_len;
+ __u16 block_len;
+ __u16 block_mtu;
+ __u16 num_blocks;
+ __u16 block_cnt;
unsigned long acl_last_tx;
unsigned long sco_last_tx;
unsigned long le_last_tx;
struct workqueue_struct *workqueue;
+ struct workqueue_struct *req_workqueue;
struct work_struct power_on;
- struct work_struct power_off;
- struct timer_list off_timer;
+ struct delayed_work power_off;
+
+ __u16 discov_timeout;
+ struct delayed_work discov_off;
+
+ struct delayed_work service_cache;
struct timer_list cmd_timer;
- struct tasklet_struct cmd_task;
- struct tasklet_struct rx_task;
- struct tasklet_struct tx_task;
+
+ struct work_struct rx_work;
+ struct work_struct cmd_work;
+ struct work_struct tx_work;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
+ struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
struct sk_buff *reassembly[NUM_REASSEMBLY];
@@ -223,143 +255,107 @@
__u32 req_status;
__u32 req_result;
- __u16 init_last_cmd;
+ struct list_head mgmt_pending;
- struct crypto_blkcipher *tfm;
-
- struct inquiry_cache inq_cache;
+ struct discovery_state discovery;
struct hci_conn_hash conn_hash;
- struct hci_chan_list chan_list;
struct list_head blacklist;
struct list_head uuids;
struct list_head link_keys;
+ struct list_head long_term_keys;
+
struct list_head remote_oob_data;
- struct list_head adv_entries;
- rwlock_t adv_entries_lock;
- struct timer_list adv_timer;
-
- struct timer_list disco_timer;
- struct timer_list disco_le_timer;
- __u8 disco_state;
- int disco_int_phase;
- int disco_int_count;
-
struct hci_dev_stats stat;
- struct sk_buff_head driver_init;
-
- void *driver_data;
- void *core_data;
-
atomic_t promisc;
struct dentry *debugfs;
- struct device *parent;
struct device dev;
struct rfkill *rfkill;
- struct module *owner;
+ unsigned long dev_flags;
+
+ struct delayed_work le_scan_disable;
+
+ struct work_struct le_scan;
+ struct le_scan_params le_scan_params;
+
+ __s8 adv_tx_power;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data_len;
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
+ int (*setup)(struct hci_dev *hdev);
int (*send)(struct sk_buff *skb);
- void (*destruct)(struct hci_dev *hdev);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
};
+#define HCI_PHY_HANDLE(handle) (handle & 0xff)
+
struct hci_conn {
struct list_head list;
atomic_t refcnt;
- spinlock_t lock;
bdaddr_t dst;
- __u8 dst_id;
- __u8 dst_type;
+ __u8 dst_type;
__u16 handle;
__u16 state;
__u8 mode;
__u8 type;
- __u8 out;
+ bool out;
__u8 attempt;
__u8 dev_class[3];
- __u8 features[8];
- __u8 ssp_mode;
+ __u8 features[HCI_MAX_PAGES][8];
__u16 interval;
__u16 pkt_type;
__u16 link_policy;
__u32 link_mode;
+ __u8 key_type;
__u8 auth_type;
__u8 sec_level;
__u8 pending_sec_level;
__u8 pin_length;
__u8 enc_key_size;
__u8 io_capability;
- __u8 auth_initiator;
- __u8 power_save;
+ __u32 passkey_notify;
+ __u8 passkey_entered;
__u16 disc_timeout;
- __u16 conn_timeout;
- unsigned long pend;
+ unsigned long flags;
__u8 remote_cap;
- __u8 remote_oob;
__u8 remote_auth;
-
- __s8 rssi_threshold;
- __u16 rssi_update_interval;
- __u8 rssi_update_thresh_exceed;
+ __u8 remote_id;
+ bool flush_key;
unsigned int sent;
struct sk_buff_head data_q;
+ struct list_head chan_list;
- struct timer_list disc_timer;
+ struct delayed_work disc_work;
struct timer_list idle_timer;
- struct delayed_work rssi_update_work;
- struct timer_list encrypt_pause_timer;
+ struct timer_list auto_accept_timer;
- struct work_struct work_add;
- struct work_struct work_del;
- struct wake_lock idle_lock;
struct device dev;
- atomic_t devref;
struct hci_dev *hdev;
void *l2cap_data;
void *sco_data;
- void *priv;
-
- __u8 link_key[16];
- __u8 key_type;
+ void *smp_conn;
+ struct amp_mgr *amp_mgr;
struct hci_conn *link;
- /* Low Energy SMP pairing data */
- __u8 oob; /* OOB pairing supported */
- __u8 tk_valid; /* TK value is valid */
- __u8 cfm_pending; /* CONFIRM cmd may be sent */
- __u8 preq[7]; /* Pairing Request */
- __u8 prsp[7]; /* Pairing Response */
- __u8 prnd[16]; /* Pairing Random */
- __u8 pcnf[16]; /* Pairing Confirm */
- __u8 tk[16]; /* Temporary Key */
- __u8 smp_key_size;
- __u8 sec_req;
- __u8 auth;
- void *smp_conn;
- struct timer_list smp_timer;
- __u8 conn_valid;
- __u8 hidp_session_valid;
-
-
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
@@ -367,47 +363,56 @@
struct hci_chan {
struct list_head list;
- struct hci_dev *hdev;
- __u16 state;
- atomic_t refcnt;
- __u16 ll_handle;
- struct hci_ext_fs tx_fs;
- struct hci_ext_fs rx_fs;
- struct hci_conn *conn;
- void *l2cap_sk;
+ __u16 handle;
+ struct hci_conn *conn;
+ struct sk_buff_head data_q;
+ unsigned int sent;
+ __u8 state;
};
-extern struct hci_proto *hci_proto[];
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
extern rwlock_t hci_cb_list_lock;
+/* ----- HCI interface to upper protocols ----- */
+extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern int l2cap_disconn_ind(struct hci_conn *hcon);
+extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
+ u16 flags);
+
+extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
-#define INQUIRY_ENTRY_AGE_MAX (HZ*60*60) /* 1 Hour */
+#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
-#define inquiry_cache_lock(c) spin_lock(&c->lock)
-#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
-#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
-#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
-
-static inline void inquiry_cache_init(struct hci_dev *hdev)
+static inline void discovery_init(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
- spin_lock_init(&c->lock);
- c->list = NULL;
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ INIT_LIST_HEAD(&hdev->discovery.all);
+ INIT_LIST_HEAD(&hdev->discovery.unknown);
+ INIT_LIST_HEAD(&hdev->discovery.resolve);
}
+bool hci_discovery_active(struct hci_dev *hdev);
+
+void hci_discovery_set_state(struct hci_dev *hdev, int state);
+
static inline int inquiry_cache_empty(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
- return c->list == NULL;
+ return list_empty(&hdev->discovery.all);
}
static inline long inquiry_cache_age(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
+ struct discovery_state *c = &hdev->discovery;
return jiffies - c->timestamp;
}
@@ -416,35 +421,51 @@
return jiffies - e->timestamp;
}
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state);
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie);
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known, bool *ssp);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
+ HCI_CONN_REAUTH_PEND,
HCI_CONN_ENCRYPT_PEND,
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
HCI_CONN_SCO_SETUP_PEND,
+ HCI_CONN_LE_SMP_PEND,
+ HCI_CONN_MGMT_CONNECTED,
+ HCI_CONN_SSP_ENABLED,
+ HCI_CONN_POWER_SAVE,
+ HCI_CONN_REMOTE_OOB,
};
-static inline void hci_conn_hash_init(struct hci_dev *hdev)
+static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
- struct hci_conn_hash *h = &hdev->conn_hash;
- INIT_LIST_HEAD(&h->list);
- spin_lock_init(&h->lock);
- h->acl_num = 0;
- h->sco_num = 0;
+ struct hci_dev *hdev = conn->hdev;
+ return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_add(&c->list, &h->list);
+ list_add_rcu(&c->list, &h->list);
switch (c->type) {
case ACL_LINK:
h->acl_num++;
break;
+ case AMP_LINK:
+ h->amp_num++;
+ break;
case LE_LINK:
h->le_num++;
break;
@@ -458,11 +479,17 @@
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_del(&c->list);
+
+ list_del_rcu(&c->list);
+ synchronize_rcu();
+
switch (c->type) {
case ACL_LINK:
h->acl_num--;
break;
+ case AMP_LINK:
+ h->amp_num--;
+ break;
case LE_LINK:
h->le_num--;
break;
@@ -473,55 +500,60 @@
}
}
+static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ switch (type) {
+ case ACL_LINK:
+ return h->acl_num;
+ case AMP_LINK:
+ return h->amp_num;
+ case LE_LINK:
+ return h->le_num;
+ case SCO_LINK:
+ case ESCO_LINK:
+ return h->sco_num;
+ default:
+ return 0;
+ }
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->handle == handle)
- return c;
- }
- return NULL;
-}
+ rcu_read_lock();
-static inline void hci_chan_list_init(struct hci_dev *hdev)
-{
- struct hci_chan_list *h = &hdev->chan_list;
- INIT_LIST_HEAD(&h->list);
- spin_lock_init(&h->lock);
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
}
static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
__u8 type, bdaddr_t *ba)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == type && !bacmp(&c->dst, ba))
- return c;
- }
- return NULL;
-}
+ rcu_read_lock();
-static inline struct hci_conn *hci_conn_hash_lookup_id(struct hci_dev *hdev,
- bdaddr_t *ba, __u8 id)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
- struct hci_conn *c;
-
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (!bacmp(&c->dst, ba) && (c->dst_id == id))
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
return c;
+ }
}
+
+ rcu_read_unlock();
+
return NULL;
}
@@ -529,166 +561,176 @@
__u8 type, __u16 state)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == type && c->state == state)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state) {
+ rcu_read_unlock();
return c;
+ }
}
+
+ rcu_read_unlock();
+
return NULL;
}
-static inline struct hci_chan *hci_chan_list_lookup_handle(struct hci_dev *hdev,
- __u16 handle)
-{
- struct hci_chan_list *l = &hdev->chan_list;
- struct list_head *p;
- struct hci_chan *c;
-
- list_for_each(p, &l->list) {
- c = list_entry(p, struct hci_chan, list);
- if (c->ll_handle == handle)
- return c;
- }
- return NULL;
-}
-
-static inline struct hci_chan *hci_chan_list_lookup_id(struct hci_dev *hdev,
- __u8 handle)
-{
- struct hci_chan_list *l = &hdev->chan_list;
- struct list_head *p;
- struct hci_chan *c;
-
- list_for_each(p, &l->list) {
- c = list_entry(p, struct hci_chan, list);
- if (c->conn->handle == handle)
- return c;
- }
- return NULL;
-}
-
-void hci_acl_connect(struct hci_conn *conn);
-void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
-void hci_add_sco(struct hci_conn *conn, __u16 handle);
+void hci_disconnect(struct hci_conn *conn, __u8 reason);
void hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
__u16 pkt_type, bdaddr_t *dst);
-struct hci_conn *hci_le_conn_add(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 addr_type);
int hci_conn_del(struct hci_conn *conn);
-void hci_conn_hash_flush(struct hci_dev *hdev, u8 is_process);
+void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
-struct hci_chan *hci_chan_add(struct hci_dev *hdev);
-int hci_chan_del(struct hci_chan *chan);
-static inline void hci_chan_hold(struct hci_chan *chan)
-{
- atomic_inc(&chan->refcnt);
-}
-int hci_chan_put(struct hci_chan *chan);
-
-struct hci_chan *hci_chan_create(struct hci_chan *chan,
- struct hci_ext_fs *tx_fs,
- struct hci_ext_fs *rx_fs);
-void hci_chan_modify(struct hci_chan *chan,
- struct hci_ext_fs *tx_fs,
- struct hci_ext_fs *rx_fs);
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+void hci_chan_del(struct hci_chan *chan);
+void hci_chan_list_flush(struct hci_conn *conn);
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
- __u16 pkt_type, bdaddr_t *dst,
- __u8 sec_level, __u8 auth_type);
-struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type,
- bdaddr_t *dst, __u8 sec_level,
- __u8 auth_type,
- struct bt_le_params *le_params);
-void hci_le_add_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst);
-void hci_le_remove_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst);
-void hci_le_cancel_create_connect(struct hci_dev *hdev, bdaddr_t *dst);
+ __u16 pkt_type, bdaddr_t *dst,
+ __u8 dst_type, __u8 sec_level, __u8 auth_type);
int hci_conn_check_link_mode(struct hci_conn *conn);
+int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
int hci_conn_change_link_key(struct hci_conn *conn);
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
-void hci_disconnect(struct hci_conn *conn, __u8 reason);
-void hci_disconnect_amp(struct hci_conn *conn, __u8 reason);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_conn_enter_sniff_mode(struct hci_conn *conn);
-void hci_conn_hold_device(struct hci_conn *conn);
-void hci_conn_put_device(struct hci_conn *conn);
+/*
+ * hci_conn_get() and hci_conn_put() are used to control the life-time of an
+ * "hci_conn" object. They do not guarantee that the hci_conn object is running,
+ * working or anything else. They just guarantee that the object is available
+ * and can be dereferenced. So you can use its locks, local variables and any
+ * other constant data.
+ * Before accessing runtime data, you _must_ lock the object and then check that
+ * it is still running. As soon as you release the locks, the connection might
+ * get dropped, though.
+ *
+ * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
+ * how long the underlying connection is held. So every channel that runs on the
+ * hci_conn object calls this to prevent the connection from disappearing. As
+ * long as you hold a device, you must also guarantee that you have a valid
+ * reference to the device via hci_conn_get() (or the initial reference from
+ * hci_conn_add()).
+ * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
+ * break because nobody cares for that. But this means, we cannot use
+ * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
+ */
-void hci_conn_set_rssi_reporter(struct hci_conn *conn,
- s8 rssi_threshold, u16 interval, u8 updateOnThreshExceed);
-void hci_conn_unset_rssi_reporter(struct hci_conn *conn);
-
-static inline void hci_conn_hold(struct hci_conn *conn)
+static inline void hci_conn_get(struct hci_conn *conn)
{
- atomic_inc(&conn->refcnt);
- del_timer(&conn->disc_timer);
+ get_device(&conn->dev);
}
static inline void hci_conn_put(struct hci_conn *conn)
{
+ put_device(&conn->dev);
+}
+
+static inline void hci_conn_hold(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
+ atomic_inc(&conn->refcnt);
+ cancel_delayed_work(&conn->disc_work);
+}
+
+static inline void hci_conn_drop(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
if (atomic_dec_and_test(&conn->refcnt)) {
unsigned long timeo;
- if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
del_timer(&conn->idle_timer);
if (conn->state == BT_CONNECTED) {
- timeo = msecs_to_jiffies(conn->disc_timeout);
+ timeo = conn->disc_timeout;
if (!conn->out)
- timeo *= 4;
- } else
+ timeo *= 20;
+ } else {
timeo = msecs_to_jiffies(10);
- } else
+ }
+ break;
+
+ case AMP_LINK:
+ timeo = conn->disc_timeout;
+ break;
+
+ default:
timeo = msecs_to_jiffies(10);
- mod_timer(&conn->disc_timer, jiffies + timeo);
+ break;
+ }
+
+ cancel_delayed_work(&conn->disc_work);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->disc_work, timeo);
}
}
/* ----- HCI Devices ----- */
-static inline void __hci_dev_put(struct hci_dev *d)
-{
- if (atomic_dec_and_test(&d->refcnt))
- d->destruct(d);
-}
-
static inline void hci_dev_put(struct hci_dev *d)
{
- __hci_dev_put(d);
- module_put(d->owner);
-}
+ BT_DBG("%s orig refcnt %d", d->name,
+ atomic_read(&d->dev.kobj.kref.refcount));
-static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
-{
- atomic_inc(&d->refcnt);
- return d;
+ put_device(&d->dev);
}
static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
{
- if (try_module_get(d->owner))
- return __hci_dev_hold(d);
- return NULL;
+ BT_DBG("%s orig refcnt %d", d->name,
+ atomic_read(&d->dev.kobj.kref.refcount));
+
+ get_device(&d->dev);
+ return d;
}
-#define hci_dev_lock(d) spin_lock(&d->lock)
-#define hci_dev_unlock(d) spin_unlock(&d->lock)
-#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
-#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
+#define hci_dev_lock(d) mutex_lock(&d->lock)
+#define hci_dev_unlock(d) mutex_unlock(&d->lock)
+
+#define to_hci_dev(d) container_of(d, struct hci_dev, dev)
+#define to_hci_conn(c) container_of(c, struct hci_conn, dev)
+
+static inline void *hci_get_drvdata(struct hci_dev *hdev)
+{
+ return dev_get_drvdata(&hdev->dev);
+}
+
+static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
+{
+ dev_set_drvdata(&hdev->dev, data);
+}
+
+/* hci_dev_list shall be locked */
+static inline uint8_t __hci_num_ctrl(void)
+{
+ uint8_t count = 0;
+ struct list_head *p;
+
+ list_for_each(p, &hci_dev_list) {
+ count++;
+ }
+
+ return count;
+}
struct hci_dev *hci_dev_get(int index);
-struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
-struct hci_dev *hci_dev_get_type(__u8 amp_type);
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
struct hci_dev *hci_alloc_dev(void);
void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev);
-int hci_unregister_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
@@ -701,23 +743,28 @@
int hci_get_conn_list(void __user *arg);
int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
-int hci_set_auth_info(struct hci_dev *hdev, void __user *arg);
int hci_inquiry(void __user *arg);
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
int hci_blacklist_clear(struct hci_dev *hdev);
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
int hci_uuids_clear(struct hci_dev *hdev);
int hci_link_keys_clear(struct hci_dev *hdev);
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
- u8 *key, u8 type, u8 pin_len);
-struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
-struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 type);
-int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, u8 type,
- u8 auth, u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]);
+int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
+ bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
+int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size,
+ __le16 ediv, u8 rand[8]);
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_smp_ltks_clear(struct hci_dev *hdev);
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
int hci_remote_oob_data_clear(struct hci_dev *hdev);
@@ -727,86 +774,99 @@
u8 *randomizer);
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
-#define ADV_CLEAR_TIMEOUT (3*60*HZ) /* Three minutes */
-int hci_adv_entries_clear(struct hci_dev *hdev);
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev);
-
-void hci_del_off_timer(struct hci_dev *hdev);
-
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct sk_buff *skb);
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
-int hci_register_sysfs(struct hci_dev *hdev);
-void hci_unregister_sysfs(struct hci_dev *hdev);
+void hci_init_sysfs(struct hci_dev *hdev);
+int hci_add_sysfs(struct hci_dev *hdev);
+void hci_del_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
-#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
+#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
/* ----- LMP capabilities ----- */
-#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
-#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
-#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
-#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
-#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
-#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
-#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
-#define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
+#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
+#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
+#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
+#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
+#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
+#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
+#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
+#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
+#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
+#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
+#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
+#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
+#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
+#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
+#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
+#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
+
+/* ----- Extended LMP capabilities ----- */
+#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
+#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
+#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
+
+/* returns true if at least one AMP active */
+static inline bool hci_amp_capable(void)
+{
+ struct hci_dev *hdev;
+ bool ret = false;
+
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(hdev, &hci_dev_list, list)
+ if (hdev->amp_type == HCI_AMP &&
+ test_bit(HCI_UP, &hdev->flags))
+ ret = true;
+ read_unlock(&hci_dev_list_lock);
+
+ return ret;
+}
/* ----- HCI protocols ----- */
-struct hci_proto {
- char *name;
- unsigned int id;
- unsigned long flags;
+#define HCI_PROTO_DEFER 0x01
- void *priv;
-
- int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
- int (*connect_cfm) (struct hci_conn *conn, __u8 status);
- int (*disconn_ind) (struct hci_conn *conn);
- int (*disconn_cfm) (struct hci_conn *conn, __u8 reason,
- __u8 is_process);
- int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
- int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
- int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
- int (*create_cfm) (struct hci_chan *chan, __u8 status);
- int (*modify_cfm) (struct hci_chan *chan, __u8 status);
- int (*destroy_cfm) (struct hci_chan *chan, __u8 status);
-};
-
-static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 type, __u8 *flags)
{
- register struct hci_proto *hp;
- int mask = 0;
+ switch (type) {
+ case ACL_LINK:
+ return l2cap_connect_ind(hdev, bdaddr);
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->connect_ind)
- mask |= hp->connect_ind(hdev, bdaddr, type);
+ case SCO_LINK:
+ case ESCO_LINK:
+ return sco_connect_ind(hdev, bdaddr, flags);
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->connect_ind)
- mask |= hp->connect_ind(hdev, bdaddr, type);
-
- return mask;
+ default:
+ BT_ERR("unknown link type %d", type);
+ return -EINVAL;
+ }
}
static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
{
- register struct hci_proto *hp;
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ l2cap_connect_cfm(conn, status);
+ break;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->connect_cfm)
- hp->connect_cfm(conn, status);
+ case SCO_LINK:
+ case ESCO_LINK:
+ sco_connect_cfm(conn, status);
+ break;
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->connect_cfm)
- hp->connect_cfm(conn, status);
+ default:
+ BT_ERR("unknown link type %d", conn->type);
+ break;
+ }
if (conn->connect_cfm_cb)
conn->connect_cfm_cb(conn, status);
@@ -814,32 +874,33 @@
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
{
- register struct hci_proto *hp;
- int reason = 0x13;
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return HCI_ERROR_REMOTE_USER_TERM;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->disconn_ind)
- reason = hp->disconn_ind(conn);
-
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->disconn_ind)
- reason = hp->disconn_ind(conn);
-
- return reason;
+ return l2cap_disconn_ind(conn);
}
-static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason,
- __u8 is_process)
+static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- register struct hci_proto *hp;
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ l2cap_disconn_cfm(conn, reason);
+ break;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->disconn_cfm)
- hp->disconn_cfm(conn, reason, is_process);
+ case SCO_LINK:
+ case ESCO_LINK:
+ sco_disconn_cfm(conn, reason);
+ break;
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->disconn_cfm)
- hp->disconn_cfm(conn, reason, is_process);
+ /* L2CAP would be handled for BREDR chan */
+ case AMP_LINK:
+ break;
+
+ default:
+ BT_ERR("unknown link type %d", conn->type);
+ break;
+ }
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
@@ -847,234 +908,277 @@
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
{
- register struct hci_proto *hp;
__u8 encrypt;
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return;
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
-
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ l2cap_security_cfm(conn, status, encrypt);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
+ __u8 encrypt)
{
- register struct hci_proto *hp;
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
-
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ l2cap_security_cfm(conn, status, encrypt);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
-static inline void hci_proto_create_cfm(struct hci_chan *chan, __u8 status)
-{
- register struct hci_proto *hp;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->create_cfm)
- hp->create_cfm(chan, status);
-}
-
-static inline void hci_proto_modify_cfm(struct hci_chan *chan, __u8 status)
-{
- register struct hci_proto *hp;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->modify_cfm)
- hp->modify_cfm(chan, status);
-}
-
-static inline void hci_proto_destroy_cfm(struct hci_chan *chan, __u8 status)
-{
- register struct hci_proto *hp;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->destroy_cfm)
- hp->destroy_cfm(chan, status);
-}
-
-int hci_register_proto(struct hci_proto *hproto);
-int hci_unregister_proto(struct hci_proto *hproto);
-
/* ----- HCI callbacks ----- */
struct hci_cb {
struct list_head list;
char *name;
- void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
};
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
- struct list_head *p;
+ struct hci_cb *cb;
__u8 encrypt;
hci_proto_auth_cfm(conn, status);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
-static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
+ __u8 encrypt)
{
- struct list_head *p;
+ struct hci_cb *cb;
if (conn->sec_level == BT_SECURITY_SDP)
conn->sec_level = BT_SECURITY_LOW;
- if (!status && encrypt && conn->pending_sec_level > conn->sec_level)
+ if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level;
hci_proto_encrypt_cfm(conn, status, encrypt);
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
- struct list_head *p;
+ struct hci_cb *cb;
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
-static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
+static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ __u8 role)
{
- struct list_head *p;
+ struct hci_cb *cb;
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
+}
+
+static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
+{
+ size_t parsed = 0;
+
+ if (data_len < 2)
+ return false;
+
+ while (parsed < data_len - 1) {
+ u8 field_len = data[0];
+
+ if (field_len == 0)
+ break;
+
+ parsed += field_len + 1;
+
+ if (parsed > data_len)
+ break;
+
+ if (data[1] == type)
+ return true;
+
+ data += field_len + 1;
+ }
+
+ return false;
+}
+
+static inline size_t eir_get_length(u8 *eir, size_t eir_len)
+{
+ size_t parsed = 0;
+
+ while (parsed < eir_len) {
+ u8 field_len = eir[0];
+
+ if (field_len == 0)
+ return parsed;
+
+ parsed += field_len + 1;
+ eir += field_len + 1;
+ }
+
+ return eir_len;
+}
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
}
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-int hci_register_notifier(struct notifier_block *nb);
-int hci_unregister_notifier(struct notifier_block *nb);
+struct hci_request {
+ struct hci_dev *hdev;
+ struct sk_buff_head cmd_q;
-/* AMP Manager event callbacks */
-struct amp_mgr_cb {
- struct list_head list;
- void (*amp_cmd_complete_event) (struct hci_dev *hdev, __u16 opcode,
- struct sk_buff *skb);
- void (*amp_cmd_status_event) (struct hci_dev *hdev, __u16 opcode,
- __u8 status);
- void (*amp_event) (struct hci_dev *hdev, __u8 ev_code,
- struct sk_buff *skb);
+ /* If something goes wrong when building the HCI request, the error
+ * value is stored in this field.
+ */
+ int err;
};
-void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
- struct sk_buff *skb);
-void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status);
-void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
- struct sk_buff *skb);
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param);
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
-int hci_register_amp(struct amp_mgr_cb *acb);
-int hci_unregister_amp(struct amp_mgr_cb *acb);
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
-int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
-void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
- struct sk_buff *skb, __u16 flags);
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
+ const void *param);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
-void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
-
/* ----- HCI Sockets ----- */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
- struct sock *skip_sk);
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event);
/* Management interface */
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(u16 index);
-int mgmt_index_removed(u16 index);
-int mgmt_powered(u16 index, u8 powered);
-int mgmt_discoverable(u16 index, u8 discoverable);
-int mgmt_connectable(u16 index, u8 connectable);
-int mgmt_new_key(u16 index, struct link_key *key, u8 bonded);
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le);
-int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval,
- u16 latency, u16 timeout);
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr, u8 reason);
-int mgmt_disconnect_failed(u16 index);
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr);
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_request(u16 index, u8 event, bdaddr_t *bdaddr,
- __le32 value);
-int mgmt_user_oob_request(u16 index, bdaddr_t *bdaddr);
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
- u8 status);
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
- u8 status);
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 type, u8 le,
- u8 *dev_class, s8 rssi, u8 eir_len, u8 *eir);
-void mgmt_read_rssi_complete(u16 index, s8 rssi, bdaddr_t *bdaddr,
- u16 handle, u8 status);
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 status, u8 *name);
-void mgmt_inquiry_started(u16 index);
-void mgmt_inquiry_complete_evt(u16 index, u8 status);
-void mgmt_disco_timeout(unsigned long data);
-void mgmt_disco_le_timeout(unsigned long data);
-int mgmt_encrypt_change(u16 index, bdaddr_t *bdaddr, u8 status);
+#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
+#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
+ BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
-/* LE SMP Management interface */
-int le_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, void *cp);
-int mgmt_remote_class(u16 index, bdaddr_t *bdaddr, u8 dev_class[3]);
-int mgmt_remote_version(u16 index, bdaddr_t *bdaddr, u8 ver, u16 mnf,
- u16 sub_ver);
-int mgmt_remote_features(u16 index, bdaddr_t *bdaddr, u8 features[8]);
+int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
+int mgmt_index_added(struct hci_dev *hdev);
+int mgmt_index_removed(struct hci_dev *hdev);
+int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+int mgmt_powered(struct hci_dev *hdev, u8 powered);
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent);
+int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class);
+int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason);
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, __le32 value,
+ u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 passkey,
+ u8 entered);
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status);
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status);
+int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+ u8 ssp, u8 *eir, u16 eir_len);
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+int mgmt_interleaved_discovery(struct hci_dev *hdev);
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+bool mgmt_valid_hdev(struct hci_dev *hdev);
+int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -1104,15 +1208,18 @@
#define hci_req_lock(d) mutex_lock(&d->req_lock)
#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
-void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
+void hci_update_ad(struct hci_request *req);
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
u16 latency, u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
__u8 ltk[16]);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
-void hci_le_ltk_neg_reply(struct hci_conn *conn);
+int hci_do_inquiry(struct hci_dev *hdev, u8 length);
+int hci_cancel_inquiry(struct hci_dev *hdev);
+int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
+ int timeout);
+int hci_cancel_le_scan(struct hci_dev *hdev);
-void hci_read_rssi(struct hci_conn *conn);
+u8 bdaddr_to_le(u8 bdaddr_type);
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
new file mode 100644
index 0000000..77d1e57
--- /dev/null
+++ b/include/net/bluetooth/hci_mon.h
@@ -0,0 +1,51 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_MON_H
+#define __HCI_MON_H
+
+struct hci_mon_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+#define HCI_MON_HDR_SIZE 6
+
+#define HCI_MON_NEW_INDEX 0
+#define HCI_MON_DEL_INDEX 1
+#define HCI_MON_COMMAND_PKT 2
+#define HCI_MON_EVENT_PKT 3
+#define HCI_MON_ACL_TX_PKT 4
+#define HCI_MON_ACL_RX_PKT 5
+#define HCI_MON_SCO_TX_PKT 6
+#define HCI_MON_SCO_RX_PKT 7
+
+struct hci_mon_new_index {
+ __u8 type;
+ __u8 bus;
+ bdaddr_t bdaddr;
+ char name[8];
+} __packed;
+#define HCI_MON_NEW_INDEX_SIZE 16
+
+#endif /* __HCI_MON_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index b1ee41a..fb94cf1 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010-2012 The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
@@ -27,39 +27,43 @@
#ifndef __L2CAP_H
#define __L2CAP_H
+#include <asm/unaligned.h>
+
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
#define L2CAP_DEFAULT_MIN_MTU 48
-#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xffff
-#define L2CAP_DEFAULT_FLUSH_TO 0xffff
-#define L2CAP_MAX_FLUSH_TO 0x7ff
+#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
+#define L2CAP_EFS_DEFAULT_FLUSH_TO 0xFFFFFFFF
#define L2CAP_DEFAULT_TX_WINDOW 63
+#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
#define L2CAP_DEFAULT_MAX_TX 3
#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
-#define L2CAP_DEFAULT_MAX_PDU_SIZE 1482 /* Sized for AMP or BR/EDR */
+#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
#define L2CAP_DEFAULT_ACK_TO 200
+#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
-#define L2CAP_MAX_ERTM_QUEUED 5
-#define L2CAP_MIN_ERTM_QUEUED 2
+#define L2CAP_LE_MIN_MTU 23
+
+#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
+#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_ENC_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
+#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
#define L2CAP_A2MP_DEFAULT_MTU 670
-#define L2CAP_TX_WIN_MAX_ENHANCED 0x3f
-#define L2CAP_TX_WIN_MAX_EXTENDED 0x3fff
-#define L2CAP_LE_DEFAULT_MTU 23
-
-#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
-#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
-#define L2CAP_MOVE_TIMEOUT (4*HZ) /* 4 seconds */
-#define L2CAP_MOVE_ERTX_TIMEOUT (60*HZ) /* 60 seconds */
-
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
__le16 l2_psm;
bdaddr_t l2_bdaddr;
__le16 l2_cid;
+ __u8 l2_bdaddr_type;
};
/* L2CAP socket options */
@@ -87,35 +91,36 @@
#define L2CAP_LM_TRUSTED 0x0008
#define L2CAP_LM_RELIABLE 0x0010
#define L2CAP_LM_SECURE 0x0020
-#define L2CAP_LM_FLUSHABLE 0x0040
/* L2CAP command codes */
-#define L2CAP_COMMAND_REJ 0x01
-#define L2CAP_CONN_REQ 0x02
-#define L2CAP_CONN_RSP 0x03
-#define L2CAP_CONF_REQ 0x04
-#define L2CAP_CONF_RSP 0x05
-#define L2CAP_DISCONN_REQ 0x06
-#define L2CAP_DISCONN_RSP 0x07
-#define L2CAP_ECHO_REQ 0x08
-#define L2CAP_ECHO_RSP 0x09
-#define L2CAP_INFO_REQ 0x0a
-#define L2CAP_INFO_RSP 0x0b
+#define L2CAP_COMMAND_REJ 0x01
+#define L2CAP_CONN_REQ 0x02
+#define L2CAP_CONN_RSP 0x03
+#define L2CAP_CONF_REQ 0x04
+#define L2CAP_CONF_RSP 0x05
+#define L2CAP_DISCONN_REQ 0x06
+#define L2CAP_DISCONN_RSP 0x07
+#define L2CAP_ECHO_REQ 0x08
+#define L2CAP_ECHO_RSP 0x09
+#define L2CAP_INFO_REQ 0x0a
+#define L2CAP_INFO_RSP 0x0b
#define L2CAP_CREATE_CHAN_REQ 0x0c
#define L2CAP_CREATE_CHAN_RSP 0x0d
-#define L2CAP_MOVE_CHAN_REQ 0x0e
-#define L2CAP_MOVE_CHAN_RSP 0x0f
-#define L2CAP_MOVE_CHAN_CFM 0x10
+#define L2CAP_MOVE_CHAN_REQ 0x0e
+#define L2CAP_MOVE_CHAN_RSP 0x0f
+#define L2CAP_MOVE_CHAN_CFM 0x10
#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
-/* L2CAP feature mask */
+/* L2CAP extended feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
#define L2CAP_FEAT_RETRANS 0x00000002
+#define L2CAP_FEAT_BIDIR_QOS 0x00000004
#define L2CAP_FEAT_ERTM 0x00000008
#define L2CAP_FEAT_STREAMING 0x00000010
#define L2CAP_FEAT_FCS 0x00000020
+#define L2CAP_FEAT_EXT_FLOW 0x00000040
#define L2CAP_FEAT_FIXED_CHAN 0x00000080
#define L2CAP_FEAT_EXT_WINDOW 0x00000100
#define L2CAP_FEAT_UCD 0x00000200
@@ -128,52 +133,57 @@
#define L2CAP_FC_L2CAP 0x02
#define L2CAP_FC_A2MP 0x08
-/* L2CAP Control Field */
-#define L2CAP_CTRL_SAR 0xC000
-#define L2CAP_CTRL_REQSEQ 0x3F00
-#define L2CAP_CTRL_TXSEQ 0x007E
-#define L2CAP_CTRL_FINAL 0x0080
-#define L2CAP_CTRL_POLL 0x0010
-#define L2CAP_CTRL_SUPERVISE 0x000C
-#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
+/* L2CAP Control Field bit masks */
+#define L2CAP_CTRL_SAR 0xC000
+#define L2CAP_CTRL_REQSEQ 0x3F00
+#define L2CAP_CTRL_TXSEQ 0x007E
+#define L2CAP_CTRL_SUPERVISE 0x000C
-#define L2CAP_CTRL_TXSEQ_SHIFT 1
-#define L2CAP_CTRL_SUPERVISE_SHIFT 2
-#define L2CAP_CTRL_POLL_SHIFT 4
-#define L2CAP_CTRL_FINAL_SHIFT 7
-#define L2CAP_CTRL_REQSEQ_SHIFT 8
-#define L2CAP_CTRL_SAR_SHIFT 14
+#define L2CAP_CTRL_RETRANS 0x0080
+#define L2CAP_CTRL_FINAL 0x0080
+#define L2CAP_CTRL_POLL 0x0010
+#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
-#define L2CAP_EXT_CTRL_SAR 0x00030000
-#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
-#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
-#define L2CAP_EXT_CTRL_FINAL 0x00000002
-#define L2CAP_EXT_CTRL_POLL 0x00040000
-#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
-#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+#define L2CAP_CTRL_TXSEQ_SHIFT 1
+#define L2CAP_CTRL_SUPER_SHIFT 2
+#define L2CAP_CTRL_POLL_SHIFT 4
+#define L2CAP_CTRL_FINAL_SHIFT 7
+#define L2CAP_CTRL_REQSEQ_SHIFT 8
+#define L2CAP_CTRL_SAR_SHIFT 14
-#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
-#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
-#define L2CAP_EXT_CTRL_SAR_SHIFT 16
-#define L2CAP_EXT_CTRL_SUPERVISE_SHIFT 16
-#define L2CAP_EXT_CTRL_POLL_SHIFT 18
-#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR 0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
-/* L2CAP Supervisory Frame Types */
-#define L2CAP_SFRAME_RR 0x00
-#define L2CAP_SFRAME_REJ 0x01
-#define L2CAP_SFRAME_RNR 0x02
-#define L2CAP_SFRAME_SREJ 0x03
+#define L2CAP_EXT_CTRL_POLL 0x00040000
+#define L2CAP_EXT_CTRL_FINAL 0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
+#define L2CAP_EXT_CTRL_SAR_SHIFT 16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
+#define L2CAP_EXT_CTRL_POLL_SHIFT 18
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
+
+/* L2CAP Supervisory Function */
+#define L2CAP_SUPER_RR 0x00
+#define L2CAP_SUPER_REJ 0x01
+#define L2CAP_SUPER_RNR 0x02
+#define L2CAP_SUPER_SREJ 0x03
/* L2CAP Segmentation and Reassembly */
-#define L2CAP_SAR_UNSEGMENTED 0x00
-#define L2CAP_SAR_START 0x01
-#define L2CAP_SAR_END 0x02
-#define L2CAP_SAR_CONTINUE 0x03
+#define L2CAP_SAR_UNSEGMENTED 0x00
+#define L2CAP_SAR_START 0x01
+#define L2CAP_SAR_END 0x02
+#define L2CAP_SAR_CONTINUE 0x03
-/* L2CAP ERTM / Streaming extra field lengths */
-#define L2CAP_SDULEN_SIZE 2
-#define L2CAP_FCS_SIZE 2
+/* L2CAP Command rej. reasons */
+#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
+#define L2CAP_REJ_MTU_EXCEEDED 0x0001
+#define L2CAP_REJ_INVALID_CID 0x0002
/* L2CAP structures */
struct l2cap_hdr {
@@ -181,8 +191,14 @@
__le16 cid;
} __packed;
#define L2CAP_HDR_SIZE 4
-#define L2CAP_ENHANCED_HDR_SIZE 6
-#define L2CAP_EXTENDED_HDR_SIZE 8
+#define L2CAP_ENH_HDR_SIZE 6
+#define L2CAP_EXT_HDR_SIZE 8
+
+#define L2CAP_FCS_SIZE 2
+#define L2CAP_SDULEN_SIZE 2
+#define L2CAP_PSMLEN_SIZE 2
+#define L2CAP_ENH_CTRL_SIZE 2
+#define L2CAP_EXT_CTRL_SIZE 4
struct l2cap_cmd_hdr {
__u8 code;
@@ -191,10 +207,21 @@
} __packed;
#define L2CAP_CMD_HDR_SIZE 4
-struct l2cap_cmd_rej {
+struct l2cap_cmd_rej_unk {
__le16 reason;
} __packed;
+struct l2cap_cmd_rej_mtu {
+ __le16 reason;
+ __le16 max_mtu;
+} __packed;
+
+struct l2cap_cmd_rej_cid {
+ __le16 reason;
+ __le16 scid;
+ __le16 dcid;
+} __packed;
+
struct l2cap_conn_req {
__le16 psm;
__le16 scid;
@@ -207,6 +234,10 @@
__le16 status;
} __packed;
+/* protocol/service multiplexer (PSM) */
+#define L2CAP_PSM_SDP 0x0001
+#define L2CAP_PSM_RFCOMM 0x0003
+
/* channel indentifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
@@ -217,14 +248,15 @@
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
-/* connect result */
+/* connect/create channel results */
#define L2CAP_CR_SUCCESS 0x0000
#define L2CAP_CR_PEND 0x0001
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
+#define L2CAP_CR_BAD_AMP 0x0005
-/* connect status */
+/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
#define L2CAP_CS_AUTHEN_PEND 0x0001
#define L2CAP_CS_AUTHOR_PEND 0x0002
@@ -247,7 +279,10 @@
#define L2CAP_CONF_REJECT 0x0002
#define L2CAP_CONF_UNKNOWN 0x0003
#define L2CAP_CONF_PENDING 0x0004
-#define L2CAP_CONF_FLOW_SPEC_REJECT 0x0005
+#define L2CAP_CONF_EFS_REJECT 0x0005
+
+/* configuration req/rsp continuation flag */
+#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
struct l2cap_conf_opt {
__u8 type;
@@ -264,13 +299,8 @@
#define L2CAP_CONF_QOS 0x03
#define L2CAP_CONF_RFC 0x04
#define L2CAP_CONF_FCS 0x05
-#define L2CAP_CONF_EXT_FS 0x06
-#define L2CAP_CONF_EXT_WINDOW 0x07
-
-/* QOS Service type */
-#define L2CAP_SERVICE_NO_TRAFFIC 0x00
-#define L2CAP_SERVICE_BEST_EFFORT 0x01
-#define L2CAP_SERVICE_GUARANTEED 0x02
+#define L2CAP_CONF_EFS 0x06
+#define L2CAP_CONF_EWS 0x07
#define L2CAP_CONF_MAX_SIZE 22
@@ -283,26 +313,27 @@
__le16 max_pdu_size;
} __packed;
-struct l2cap_conf_ext_fs {
- __u8 id;
- __u8 type;
- __le16 max_sdu;
- __le32 sdu_arr_time;
- __le32 acc_latency;
- __le32 flush_to;
-} __packed;
-
-struct l2cap_conf_prm {
- __u8 fcs;
- __le32 flush_to;
-};
-
#define L2CAP_MODE_BASIC 0x00
#define L2CAP_MODE_RETRANS 0x01
#define L2CAP_MODE_FLOWCTL 0x02
#define L2CAP_MODE_ERTM 0x03
#define L2CAP_MODE_STREAMING 0x04
+struct l2cap_conf_efs {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC 0x00
+#define L2CAP_SERV_BESTEFFORT 0x01
+#define L2CAP_SERV_GUARANTEED 0x02
+
+#define L2CAP_BESTEFFORT_ID 0x01
+
struct l2cap_disconn_req {
__le16 dcid;
__le16 scid;
@@ -327,83 +358,53 @@
__le16 psm;
__le16 scid;
__u8 amp_id;
-} __attribute__ ((packed));
+} __packed;
struct l2cap_create_chan_rsp {
__le16 dcid;
__le16 scid;
__le16 result;
__le16 status;
-} __attribute__ ((packed));
-
-#define L2CAP_CREATE_CHAN_SUCCESS (0x0000)
-#define L2CAP_CREATE_CHAN_PENDING (0x0001)
-#define L2CAP_CREATE_CHAN_REFUSED_PSM (0x0002)
-#define L2CAP_CREATE_CHAN_REFUSED_SECURITY (0x0003)
-#define L2CAP_CREATE_CHAN_REFUSED_RESOURCES (0x0004)
-#define L2CAP_CREATE_CHAN_REFUSED_CONTROLLER (0x0005)
-
-#define L2CAP_CREATE_CHAN_STATUS_NONE (0x0000)
-#define L2CAP_CREATE_CHAN_STATUS_AUTHENTICATION (0x0001)
-#define L2CAP_CREATE_CHAN_STATUS_AUTHORIZATION (0x0002)
+} __packed;
struct l2cap_move_chan_req {
__le16 icid;
__u8 dest_amp_id;
-} __attribute__ ((packed));
+} __packed;
struct l2cap_move_chan_rsp {
__le16 icid;
__le16 result;
-} __attribute__ ((packed));
+} __packed;
-#define L2CAP_MOVE_CHAN_SUCCESS (0x0000)
-#define L2CAP_MOVE_CHAN_PENDING (0x0001)
-#define L2CAP_MOVE_CHAN_REFUSED_CONTROLLER (0x0002)
-#define L2CAP_MOVE_CHAN_REFUSED_SAME_ID (0x0003)
-#define L2CAP_MOVE_CHAN_REFUSED_CONFIG (0x0004)
-#define L2CAP_MOVE_CHAN_REFUSED_COLLISION (0x0005)
-#define L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED (0x0006)
+#define L2CAP_MR_SUCCESS 0x0000
+#define L2CAP_MR_PEND 0x0001
+#define L2CAP_MR_BAD_ID 0x0002
+#define L2CAP_MR_SAME_ID 0x0003
+#define L2CAP_MR_NOT_SUPP 0x0004
+#define L2CAP_MR_COLLISION 0x0005
+#define L2CAP_MR_NOT_ALLOWED 0x0006
struct l2cap_move_chan_cfm {
__le16 icid;
__le16 result;
-} __attribute__ ((packed));
+} __packed;
-#define L2CAP_MOVE_CHAN_CONFIRMED (0x0000)
-#define L2CAP_MOVE_CHAN_UNCONFIRMED (0x0001)
+#define L2CAP_MC_CONFIRMED 0x0000
+#define L2CAP_MC_UNCONFIRMED 0x0001
struct l2cap_move_chan_cfm_rsp {
__le16 icid;
-} __attribute__ ((packed));
-
-struct l2cap_amp_signal_work {
- struct work_struct work;
- struct l2cap_cmd_hdr cmd;
- struct l2cap_conn *conn;
- struct sk_buff *skb;
- u8 *data;
-};
-
-struct l2cap_resegment_work {
- struct work_struct work;
- struct sock *sk;
-};
-
-struct l2cap_logical_link_work {
- struct work_struct work;
- struct hci_chan *chan;
- u8 status;
-};
+} __packed;
/* info type */
-#define L2CAP_IT_CL_MTU 0x0001
-#define L2CAP_IT_FEAT_MASK 0x0002
-#define L2CAP_IT_FIXED_CHAN 0x0003
+#define L2CAP_IT_CL_MTU 0x0001
+#define L2CAP_IT_FEAT_MASK 0x0002
+#define L2CAP_IT_FIXED_CHAN 0x0003
/* info result */
-#define L2CAP_IR_SUCCESS 0x0000
-#define L2CAP_IR_NOTSUPP 0x0001
+#define L2CAP_IR_SUCCESS 0x0000
+#define L2CAP_IR_NOTSUPP 0x0001
struct l2cap_conn_param_update_req {
__le16 min;
@@ -420,64 +421,27 @@
#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
#define L2CAP_CONN_PARAM_REJECTED 0x0001
-/* ----- L2CAP connections ----- */
-struct l2cap_chan_list {
- struct sock *head;
- rwlock_t lock;
-};
-
-struct l2cap_conn {
- struct hci_conn *hcon;
-
- bdaddr_t *dst;
- bdaddr_t *src;
-
- unsigned int mtu;
-
- __u32 feat_mask;
- __u8 fc_mask;
- struct amp_mgr *mgr;
-
- __u8 info_state;
- __u8 info_ident;
-
- struct timer_list info_timer;
-
- spinlock_t lock;
-
- struct sk_buff *rx_skb;
- __u32 rx_len;
- __u8 tx_ident;
-
- __u8 disc_reason;
-
- struct l2cap_chan_list chan_list;
-};
-
-struct sock_del_list {
- struct sock *sk;
- struct list_head list;
-};
-
-#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
-#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
-#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
-
-/* ----- L2CAP channel and socket info ----- */
-#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
-#define TX_QUEUE(sk) (&l2cap_pi(sk)->tx_queue)
-#define SREJ_QUEUE(sk) (&l2cap_pi(sk)->srej_queue)
-
+/* ----- L2CAP channels and connections ----- */
struct l2cap_seq_list {
- __u16 head;
- __u16 tail;
- __u16 size;
- __u16 mask;
- __u16 *list;
+ __u16 head;
+ __u16 tail;
+ __u16 mask;
+ __u16 *list;
};
-struct l2cap_pinfo {
- struct bt_sock bt;
+#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
+#define L2CAP_SEQ_LIST_TAIL 0x8000
+
+struct l2cap_chan {
+ struct sock *sk;
+
+ struct l2cap_conn *conn;
+ struct hci_conn *hs_hcon;
+ struct hci_chan *hs_hchan;
+ struct kref kref;
+
+ __u8 state;
+
__le16 psm;
__u16 dcid;
__u16 scid;
@@ -486,34 +450,42 @@
__u16 omtu;
__u16 flush_to;
__u8 mode;
- __u8 fixed_channel;
- __u8 num_conf_req;
- __u8 num_conf_rsp;
- __u8 incoming;
+ __u8 chan_type;
+ __u8 chan_policy;
- __u8 fcs;
+ __le16 sport;
+
__u8 sec_level;
- __u8 role_switch;
- __u8 force_reliable;
- __u8 flushable;
- __u8 force_active;
+
+ __u8 ident;
__u8 conf_req[64];
__u8 conf_len;
- __u8 conf_ident;
- __u16 conf_state;
- __u8 conn_state;
+ __u8 num_conf_req;
+ __u8 num_conf_rsp;
+
+ __u8 fcs;
+
+ __u16 tx_win;
+ __u16 tx_win_max;
+ __u16 ack_win;
+ __u8 max_tx;
+ __u16 retrans_timeout;
+ __u16 monitor_timeout;
+ __u16 mps;
+
__u8 tx_state;
__u8 rx_state;
- __u8 reconf_state;
- __u8 amp_id;
- __u8 amp_move_id;
- __u8 amp_move_state;
- __u8 amp_move_role;
- __u8 amp_move_cmd_ident;
- __u16 amp_move_reqseq;
- __u16 amp_move_event;
+ unsigned long conf_state;
+ unsigned long conn_state;
+ unsigned long flags;
+
+ __u8 remote_amp_id;
+ __u8 local_amp_id;
+ __u8 move_id;
+ __u8 move_state;
+ __u8 move_role;
__u16 next_tx_seq;
__u16 expected_ack_seq;
@@ -521,203 +493,338 @@
__u16 buffer_seq;
__u16 srej_save_reqseq;
__u16 last_acked_seq;
- __u32 frames_sent;
+ __u16 frames_sent;
__u16 unacked_frames;
__u8 retry_count;
- __u16 srej_queue_next;
__u16 sdu_len;
struct sk_buff *sdu;
struct sk_buff *sdu_last_frag;
- atomic_t ertm_queued;
- __u8 ident;
-
- __u16 tx_win;
- __u16 tx_win_max;
- __u16 ack_win;
- __u8 max_tx;
- __u8 amp_pref;
__u16 remote_tx_win;
__u8 remote_max_tx;
- __u8 extended_control;
- __u16 retrans_timeout;
- __u16 monitor_timeout;
__u16 remote_mps;
- __u16 mps;
- __le16 sport;
+ __u8 local_id;
+ __u8 local_stype;
+ __u16 local_msdu;
+ __u32 local_sdu_itime;
+ __u32 local_acc_lat;
+ __u32 local_flush_to;
- struct delayed_work retrans_work;
- struct delayed_work monitor_work;
- struct delayed_work ack_work;
- struct work_struct tx_work;
- struct sk_buff_head tx_queue;
- struct sk_buff_head srej_queue;
- struct l2cap_seq_list srej_list;
- struct l2cap_seq_list retrans_list;
- struct hci_conn *ampcon;
- struct hci_chan *ampchan;
- struct l2cap_conn *conn;
- struct l2cap_conf_prm local_conf;
- struct l2cap_conf_prm remote_conf;
- struct l2cap_conf_ext_fs local_fs;
- struct l2cap_conf_ext_fs remote_fs;
- struct sock *next_c;
- struct sock *prev_c;
+ __u8 remote_id;
+ __u8 remote_stype;
+ __u16 remote_msdu;
+ __u32 remote_sdu_itime;
+ __u32 remote_acc_lat;
+ __u32 remote_flush_to;
+
+ struct delayed_work chan_timer;
+ struct delayed_work retrans_timer;
+ struct delayed_work monitor_timer;
+ struct delayed_work ack_timer;
+
+ struct sk_buff *tx_send_head;
+ struct sk_buff_head tx_q;
+ struct sk_buff_head srej_q;
+ struct l2cap_seq_list srej_list;
+ struct l2cap_seq_list retrans_list;
+
+ struct list_head list;
+ struct list_head global_l;
+
+ void *data;
+ struct l2cap_ops *ops;
+ struct mutex lock;
};
-#define L2CAP_CONF_REQ_SENT 0x0001
-#define L2CAP_CONF_INPUT_DONE 0x0002
-#define L2CAP_CONF_OUTPUT_DONE 0x0004
-#define L2CAP_CONF_MTU_DONE 0x0008
-#define L2CAP_CONF_MODE_DONE 0x0010
-#define L2CAP_CONF_CONNECT_PEND 0x0020
-#define L2CAP_CONF_NO_FCS_RECV 0x0040
-#define L2CAP_CONF_STATE2_DEVICE 0x0080
-#define L2CAP_CONF_EXT_WIN_RECV 0x0100
-#define L2CAP_CONF_LOCKSTEP 0x0200
-#define L2CAP_CONF_LOCKSTEP_PEND 0x0400
-#define L2CAP_CONF_PEND_SENT 0x0800
-#define L2CAP_CONF_EFS_RECV 0x1000
+struct l2cap_ops {
+ char *name;
+
+ struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
+ int (*recv) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
+ void (*teardown) (struct l2cap_chan *chan, int err);
+ void (*close) (struct l2cap_chan *chan);
+ void (*state_change) (struct l2cap_chan *chan,
+ int state);
+ void (*ready) (struct l2cap_chan *chan);
+ void (*defer) (struct l2cap_chan *chan);
+ struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
+ unsigned long len, int nb);
+};
+
+struct l2cap_conn {
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
+
+ bdaddr_t *dst;
+ bdaddr_t *src;
+
+ unsigned int mtu;
+
+ __u32 feat_mask;
+ __u8 fixed_chan_mask;
+
+ __u8 info_state;
+ __u8 info_ident;
+
+ struct delayed_work info_timer;
+
+ spinlock_t lock;
+
+ struct sk_buff *rx_skb;
+ __u32 rx_len;
+ __u8 tx_ident;
+
+ __u8 disc_reason;
+
+ struct delayed_work security_timer;
+ struct smp_chan *smp_chan;
+
+ struct list_head chan_l;
+ struct mutex chan_lock;
+ struct kref ref;
+ struct list_head users;
+};
+
+struct l2cap_user {
+ struct list_head list;
+ int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
+ void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
+};
+
+#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
+#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
+#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
+
+#define L2CAP_CHAN_RAW 1
+#define L2CAP_CHAN_CONN_LESS 2
+#define L2CAP_CHAN_CONN_ORIENTED 3
+#define L2CAP_CHAN_CONN_FIX_A2MP 4
+
+/* ----- L2CAP socket info ----- */
+#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+
+struct l2cap_pinfo {
+ struct bt_sock bt;
+ struct l2cap_chan *chan;
+ struct sk_buff *rx_busy_skb;
+};
+
+enum {
+ CONF_REQ_SENT,
+ CONF_INPUT_DONE,
+ CONF_OUTPUT_DONE,
+ CONF_MTU_DONE,
+ CONF_MODE_DONE,
+ CONF_CONNECT_PEND,
+ CONF_RECV_NO_FCS,
+ CONF_STATE2_DEVICE,
+ CONF_EWS_RECV,
+ CONF_LOC_CONF_PEND,
+ CONF_REM_CONF_PEND,
+ CONF_NOT_COMPLETE,
+};
#define L2CAP_CONF_MAX_CONF_REQ 2
#define L2CAP_CONF_MAX_CONF_RSP 2
-#define L2CAP_RECONF_NONE 0x00
-#define L2CAP_RECONF_INT 0x01
-#define L2CAP_RECONF_ACC 0x02
+enum {
+ CONN_SREJ_SENT,
+ CONN_WAIT_F,
+ CONN_SREJ_ACT,
+ CONN_SEND_PBIT,
+ CONN_REMOTE_BUSY,
+ CONN_LOCAL_BUSY,
+ CONN_REJ_ACT,
+ CONN_SEND_FBIT,
+ CONN_RNR_SENT,
+};
-#define L2CAP_CONN_SREJ_ACT 0x01
-#define L2CAP_CONN_REJ_ACT 0x02
-#define L2CAP_CONN_REMOTE_BUSY 0x04
-#define L2CAP_CONN_LOCAL_BUSY 0x08
-#define L2CAP_CONN_SEND_FBIT 0x10
-#define L2CAP_CONN_SENT_RNR 0x20
+/* Definitions for flags in l2cap_chan */
+enum {
+ FLAG_ROLE_SWITCH,
+ FLAG_FORCE_ACTIVE,
+ FLAG_FORCE_RELIABLE,
+ FLAG_FLUSHABLE,
+ FLAG_EXT_CTRL,
+ FLAG_EFS_ENABLE,
+};
-#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
-#define L2CAP_SEQ_LIST_TAIL 0x8000
+enum {
+ L2CAP_TX_STATE_XMIT,
+ L2CAP_TX_STATE_WAIT_F,
+};
-#define L2CAP_ERTM_TX_STATE_XMIT 0x01
-#define L2CAP_ERTM_TX_STATE_WAIT_F 0x02
+enum {
+ L2CAP_RX_STATE_RECV,
+ L2CAP_RX_STATE_SREJ_SENT,
+ L2CAP_RX_STATE_MOVE,
+ L2CAP_RX_STATE_WAIT_P,
+ L2CAP_RX_STATE_WAIT_F,
+};
-#define L2CAP_ERTM_RX_STATE_RECV 0x01
-#define L2CAP_ERTM_RX_STATE_SREJ_SENT 0x02
-#define L2CAP_ERTM_RX_STATE_AMP_MOVE 0x03
-#define L2CAP_ERTM_RX_STATE_WAIT_P_FLAG 0x04
-#define L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE 0x05
-#define L2CAP_ERTM_RX_STATE_WAIT_F_FLAG 0x06
+enum {
+ L2CAP_TXSEQ_EXPECTED,
+ L2CAP_TXSEQ_EXPECTED_SREJ,
+ L2CAP_TXSEQ_UNEXPECTED,
+ L2CAP_TXSEQ_UNEXPECTED_SREJ,
+ L2CAP_TXSEQ_DUPLICATE,
+ L2CAP_TXSEQ_DUPLICATE_SREJ,
+ L2CAP_TXSEQ_INVALID,
+ L2CAP_TXSEQ_INVALID_IGNORE,
+};
-#define L2CAP_ERTM_TXSEQ_EXPECTED 0x00
-#define L2CAP_ERTM_TXSEQ_EXPECTED_SREJ 0x01
-#define L2CAP_ERTM_TXSEQ_UNEXPECTED 0x02
-#define L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ 0x03
-#define L2CAP_ERTM_TXSEQ_DUPLICATE 0x04
-#define L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ 0x05
-#define L2CAP_ERTM_TXSEQ_INVALID 0x06
-#define L2CAP_ERTM_TXSEQ_INVALID_IGNORE 0x07
+enum {
+ L2CAP_EV_DATA_REQUEST,
+ L2CAP_EV_LOCAL_BUSY_DETECTED,
+ L2CAP_EV_LOCAL_BUSY_CLEAR,
+ L2CAP_EV_RECV_REQSEQ_AND_FBIT,
+ L2CAP_EV_RECV_FBIT,
+ L2CAP_EV_RETRANS_TO,
+ L2CAP_EV_MONITOR_TO,
+ L2CAP_EV_EXPLICIT_POLL,
+ L2CAP_EV_RECV_IFRAME,
+ L2CAP_EV_RECV_RR,
+ L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR,
+ L2CAP_EV_RECV_SREJ,
+ L2CAP_EV_RECV_FRAME,
+};
-#define L2CAP_ERTM_EVENT_DATA_REQUEST 0x01
-#define L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED 0x02
-#define L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR 0x03
-#define L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT 0x04
-#define L2CAP_ERTM_EVENT_RECV_FBIT 0x05
-#define L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES 0x06
-#define L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES 0x07
-#define L2CAP_ERTM_EVENT_EXPLICIT_POLL 0x08
-#define L2CAP_ERTM_EVENT_RECV_IFRAME 0x09
-#define L2CAP_ERTM_EVENT_RECV_RR 0x0a
-#define L2CAP_ERTM_EVENT_RECV_REJ 0x0b
-#define L2CAP_ERTM_EVENT_RECV_RNR 0x0c
-#define L2CAP_ERTM_EVENT_RECV_SREJ 0x0d
-#define L2CAP_ERTM_EVENT_RECV_FRAME 0x0e
+enum {
+ L2CAP_MOVE_ROLE_NONE,
+ L2CAP_MOVE_ROLE_INITIATOR,
+ L2CAP_MOVE_ROLE_RESPONDER,
+};
-#define L2CAP_AMP_MOVE_NONE 0
-#define L2CAP_AMP_MOVE_INITIATOR 1
-#define L2CAP_AMP_MOVE_RESPONDER 2
+enum {
+ L2CAP_MOVE_STABLE,
+ L2CAP_MOVE_WAIT_REQ,
+ L2CAP_MOVE_WAIT_RSP,
+ L2CAP_MOVE_WAIT_RSP_SUCCESS,
+ L2CAP_MOVE_WAIT_CONFIRM,
+ L2CAP_MOVE_WAIT_CONFIRM_RSP,
+ L2CAP_MOVE_WAIT_LOGICAL_COMP,
+ L2CAP_MOVE_WAIT_LOGICAL_CFM,
+ L2CAP_MOVE_WAIT_LOCAL_BUSY,
+ L2CAP_MOVE_WAIT_PREPARE,
+};
-#define L2CAP_AMP_STATE_STABLE 0
-#define L2CAP_AMP_STATE_WAIT_CREATE 1
-#define L2CAP_AMP_STATE_WAIT_CREATE_RSP 2
-#define L2CAP_AMP_STATE_WAIT_MOVE 3
-#define L2CAP_AMP_STATE_WAIT_MOVE_RSP 4
-#define L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS 5
-#define L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM 6
-#define L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP 7
-#define L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE 8
-#define L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM 9
-#define L2CAP_AMP_STATE_WAIT_LOCAL_BUSY 10
-#define L2CAP_AMP_STATE_WAIT_PREPARE 11
-#define L2CAP_AMP_STATE_RESEGMENT 12
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
-#define L2CAP_ATT_ERROR 0x01
-#define L2CAP_ATT_MTU_REQ 0x02
-#define L2CAP_ATT_MTU_RSP 0x03
-#define L2CAP_ATT_RESPONSE_BIT 0x01
-#define L2CAP_ATT_INDICATE 0x1D
-#define L2CAP_ATT_CONFIRM 0x1E
-#define L2CAP_ATT_NOT_SUPPORTED 0x06
+static inline void l2cap_chan_lock(struct l2cap_chan *chan)
+{
+ mutex_lock(&chan->lock);
+}
-#define __delta_seq(x, y, pi) ((x) >= (y) ? (x) - (y) : \
- (pi)->tx_win_max + 1 - (y) + (x))
-#define __next_seq(x, pi) ((x + 1) & ((pi)->tx_win_max))
+static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
+{
+ mutex_unlock(&chan->lock);
+}
+
+static inline void l2cap_set_timer(struct l2cap_chan *chan,
+ struct delayed_work *work, long timeout)
+{
+ BT_DBG("chan %p state %s timeout %ld", chan,
+ state_to_string(chan->state), timeout);
+
+ /* If delayed work cancelled do not hold(chan)
+ since it is already done with previous set_timer */
+ if (!cancel_delayed_work(work))
+ l2cap_chan_hold(chan);
+
+ schedule_delayed_work(work, timeout);
+}
+
+static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
+ struct delayed_work *work)
+{
+ bool ret;
+
+ /* put(chan) if delayed work cancelled otherwise it
+ is done in delayed work function */
+ ret = cancel_delayed_work(work);
+ if (ret)
+ l2cap_chan_put(chan);
+
+ return ret;
+}
+
+#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
+#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
+#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
+#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
+#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
+ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
+
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+ if (seq1 >= seq2)
+ return seq1 - seq2;
+ else
+ return chan->tx_win_max + 1 - seq2 + seq1;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+ return (seq + 1) % (chan->tx_win_max + 1);
+}
+
+static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
+{
+ return NULL;
+}
+
+static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
+{
+}
+
+static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
+{
+}
extern bool disable_ertm;
-extern const struct proto_ops l2cap_sock_ops;
-extern struct bt_sock_list l2cap_sk_list;
int l2cap_init_sockets(void);
void l2cap_cleanup_sockets(void);
+bool l2cap_is_socket(struct socket *sock);
-u8 l2cap_get_ident(struct l2cap_conn *conn);
-void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
-int l2cap_build_conf_req(struct sock *sk, void *data);
+void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
int __l2cap_wait_ack(struct sock *sk);
-struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len);
-struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len);
-struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg,
- size_t len, u16 sdulen, int reseg);
-int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
- struct msghdr *msg, size_t len, int reseg);
-int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue);
-void l2cap_do_send(struct sock *sk, struct sk_buff *skb);
-void l2cap_streaming_send(struct sock *sk);
-int l2cap_ertm_send(struct sock *sk);
-int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs);
-int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
- struct sk_buff_head *skbs, u8 event);
+int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
+int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
-int l2cap_sock_le_params_valid(struct bt_le_params *le_params);
-void l2cap_sock_set_timer(struct sock *sk, long timeout);
-void l2cap_sock_clear_timer(struct sock *sk);
-void __l2cap_sock_close(struct sock *sk, int reason);
-void l2cap_sock_kill(struct sock *sk);
-void l2cap_sock_init(struct sock *sk, struct sock *parent);
-struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
- int proto, gfp_t prio);
-struct sock *l2cap_find_sock_by_fixed_cid_and_dir(__le16 cid, bdaddr_t *src,
- bdaddr_t *dst, int server);
-void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err);
-void l2cap_chan_del(struct sock *sk, int err);
-int l2cap_do_connect(struct sock *sk);
-int l2cap_data_channel(struct sock *sk, struct sk_buff *skb);
-void l2cap_amp_move_init(struct sock *sk);
-void l2cap_ertm_destruct(struct sock *sk);
-void l2cap_ertm_shutdown(struct sock *sk);
-void l2cap_ertm_recv_done(struct sock *sk);
+struct l2cap_chan *l2cap_chan_create(void);
+void l2cap_chan_close(struct l2cap_chan *chan, int reason);
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority);
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan);
+void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+int l2cap_ertm_init(struct l2cap_chan *chan);
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_send_conn_req(struct l2cap_chan *chan);
+void l2cap_move_start(struct l2cap_chan *chan);
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+ u8 status);
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
-void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt);
+void l2cap_conn_get(struct l2cap_conn *conn);
+void l2cap_conn_put(struct l2cap_conn *conn);
-void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb);
-
-void l2cap_amp_physical_complete(int result, u8 remote_id, u8 local_id,
- struct sock *sk);
-
-void l2cap_amp_logical_complete(int result, struct hci_conn *ampcon,
- struct hci_chan *ampchan, struct sock *sk);
-
-void l2cap_amp_logical_destroyed(struct hci_conn *ampcon);
-
-void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process);
+int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
+void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 3bf514f..9944c3e 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -2,6 +2,7 @@
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -23,399 +24,465 @@
#define MGMT_INDEX_NONE 0xFFFF
+#define MGMT_STATUS_SUCCESS 0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND 0x01
+#define MGMT_STATUS_NOT_CONNECTED 0x02
+#define MGMT_STATUS_FAILED 0x03
+#define MGMT_STATUS_CONNECT_FAILED 0x04
+#define MGMT_STATUS_AUTH_FAILED 0x05
+#define MGMT_STATUS_NOT_PAIRED 0x06
+#define MGMT_STATUS_NO_RESOURCES 0x07
+#define MGMT_STATUS_TIMEOUT 0x08
+#define MGMT_STATUS_ALREADY_CONNECTED 0x09
+#define MGMT_STATUS_BUSY 0x0a
+#define MGMT_STATUS_REJECTED 0x0b
+#define MGMT_STATUS_NOT_SUPPORTED 0x0c
+#define MGMT_STATUS_INVALID_PARAMS 0x0d
+#define MGMT_STATUS_DISCONNECTED 0x0e
+#define MGMT_STATUS_NOT_POWERED 0x0f
+#define MGMT_STATUS_CANCELLED 0x10
+#define MGMT_STATUS_INVALID_INDEX 0x11
+#define MGMT_STATUS_RFKILLED 0x12
+
struct mgmt_hdr {
- __le16 opcode;
- __le16 index;
- __le16 len;
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
} __packed;
+struct mgmt_addr_info {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+#define MGMT_ADDR_INFO_SIZE 7
+
#define MGMT_OP_READ_VERSION 0x0001
+#define MGMT_READ_VERSION_SIZE 0
struct mgmt_rp_read_version {
- __u8 version;
- __le16 revision;
+ __u8 version;
+ __le16 revision;
+} __packed;
+
+#define MGMT_OP_READ_COMMANDS 0x0002
+#define MGMT_READ_COMMANDS_SIZE 0
+struct mgmt_rp_read_commands {
+ __le16 num_commands;
+ __le16 num_events;
+ __le16 opcodes[0];
} __packed;
#define MGMT_OP_READ_INDEX_LIST 0x0003
+#define MGMT_READ_INDEX_LIST_SIZE 0
struct mgmt_rp_read_index_list {
- __le16 num_controllers;
- __le16 index[0];
+ __le16 num_controllers;
+ __le16 index[0];
} __packed;
/* Reserve one extra byte for names in management messages so that they
* are always guaranteed to be nul-terminated */
#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1)
+
+#define MGMT_SETTING_POWERED 0x00000001
+#define MGMT_SETTING_CONNECTABLE 0x00000002
+#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
+#define MGMT_SETTING_DISCOVERABLE 0x00000008
+#define MGMT_SETTING_PAIRABLE 0x00000010
+#define MGMT_SETTING_LINK_SECURITY 0x00000020
+#define MGMT_SETTING_SSP 0x00000040
+#define MGMT_SETTING_BREDR 0x00000080
+#define MGMT_SETTING_HS 0x00000100
+#define MGMT_SETTING_LE 0x00000200
#define MGMT_OP_READ_INFO 0x0004
+#define MGMT_READ_INFO_SIZE 0
struct mgmt_rp_read_info {
- __u8 type;
- __u8 powered;
- __u8 connectable;
- __u8 discoverable;
- __u8 pairable;
- __u8 sec_mode;
- bdaddr_t bdaddr;
- __u8 dev_class[3];
- __u8 features[8];
- __u16 manufacturer;
- __u8 hci_ver;
- __u16 hci_rev;
- __u8 name[MGMT_MAX_NAME_LENGTH];
- __u8 le_white_list_size;
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __u8 dev_class[3];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
} __packed;
struct mgmt_mode {
__u8 val;
} __packed;
+#define MGMT_SETTING_SIZE 1
+
#define MGMT_OP_SET_POWERED 0x0005
#define MGMT_OP_SET_DISCOVERABLE 0x0006
+struct mgmt_cp_set_discoverable {
+ __u8 val;
+ __le16 timeout;
+} __packed;
+#define MGMT_SET_DISCOVERABLE_SIZE 3
#define MGMT_OP_SET_CONNECTABLE 0x0007
-#define MGMT_OP_SET_PAIRABLE 0x0008
+#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
-#define MGMT_OP_ADD_UUID 0x0009
-struct mgmt_cp_add_uuid {
- __u8 uuid[16];
- __u8 svc_hint;
-} __packed;
+#define MGMT_OP_SET_PAIRABLE 0x0009
-#define MGMT_OP_REMOVE_UUID 0x000A
-struct mgmt_cp_remove_uuid {
- __u8 uuid[16];
-} __packed;
+#define MGMT_OP_SET_LINK_SECURITY 0x000A
-#define MGMT_OP_SET_DEV_CLASS 0x000B
+#define MGMT_OP_SET_SSP 0x000B
+
+#define MGMT_OP_SET_HS 0x000C
+
+#define MGMT_OP_SET_LE 0x000D
+#define MGMT_OP_SET_DEV_CLASS 0x000E
struct mgmt_cp_set_dev_class {
- __u8 major;
- __u8 minor;
+ __u8 major;
+ __u8 minor;
} __packed;
-#define MGMT_MAJOR_CLASS_MASK 0x1F
-#define MGMT_MAJOR_CLASS_LIMITED 0x20
+#define MGMT_SET_DEV_CLASS_SIZE 2
-#define MGMT_OP_SET_SERVICE_CACHE 0x000C
-struct mgmt_cp_set_service_cache {
- __u8 enable;
+#define MGMT_OP_SET_LOCAL_NAME 0x000F
+struct mgmt_cp_set_local_name {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+#define MGMT_SET_LOCAL_NAME_SIZE 260
+
+#define MGMT_OP_ADD_UUID 0x0010
+struct mgmt_cp_add_uuid {
+ __u8 uuid[16];
+ __u8 svc_hint;
+} __packed;
+#define MGMT_ADD_UUID_SIZE 17
+
+#define MGMT_OP_REMOVE_UUID 0x0011
+struct mgmt_cp_remove_uuid {
+ __u8 uuid[16];
+} __packed;
+#define MGMT_REMOVE_UUID_SIZE 16
+
+struct mgmt_link_key_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+ __u8 pin_len;
} __packed;
-struct mgmt_key_info {
- bdaddr_t bdaddr;
- u8 addr_type;
- u8 key_type;
- u8 val[16];
- u8 pin_len;
- u8 auth;
- u8 dlen;
- u8 data[10];
+#define MGMT_OP_LOAD_LINK_KEYS 0x0012
+struct mgmt_cp_load_link_keys {
+ __u8 debug_keys;
+ __le16 key_count;
+ struct mgmt_link_key_info keys[0];
+} __packed;
+#define MGMT_LOAD_LINK_KEYS_SIZE 3
+
+struct mgmt_ltk_info {
+ struct mgmt_addr_info addr;
+ __u8 authenticated;
+ __u8 master;
+ __u8 enc_size;
+ __le16 ediv;
+ __u8 rand[8];
+ __u8 val[16];
} __packed;
-#define MGMT_OP_LOAD_KEYS 0x000D
-struct mgmt_cp_load_keys {
- __u8 debug_keys;
- __le16 key_count;
- struct mgmt_key_info keys[0];
+#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013
+struct mgmt_cp_load_long_term_keys {
+ __le16 key_count;
+ struct mgmt_ltk_info keys[0];
} __packed;
+#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2
-#define MGMT_OP_REMOVE_KEY 0x000E
-struct mgmt_cp_remove_key {
- bdaddr_t bdaddr;
- __u8 disconnect;
-} __packed;
-
-#define MGMT_OP_DISCONNECT 0x000F
+#define MGMT_OP_DISCONNECT 0x0014
struct mgmt_cp_disconnect {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_DISCONNECT_SIZE MGMT_ADDR_INFO_SIZE
struct mgmt_rp_disconnect {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_GET_CONNECTIONS 0x0010
+#define MGMT_OP_GET_CONNECTIONS 0x0015
+#define MGMT_GET_CONNECTIONS_SIZE 0
struct mgmt_rp_get_connections {
__le16 conn_count;
- bdaddr_t conn[0];
+ struct mgmt_addr_info addr[0];
} __packed;
-#define MGMT_OP_PIN_CODE_REPLY 0x0011
+#define MGMT_OP_PIN_CODE_REPLY 0x0016
struct mgmt_cp_pin_code_reply {
- bdaddr_t bdaddr;
- __u8 pin_len;
- __u8 pin_code[16];
+ struct mgmt_addr_info addr;
+ __u8 pin_len;
+ __u8 pin_code[16];
} __packed;
+#define MGMT_PIN_CODE_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 17)
struct mgmt_rp_pin_code_reply {
- bdaddr_t bdaddr;
- uint8_t status;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012
+#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
struct mgmt_cp_pin_code_neg_reply {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_PIN_CODE_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_SET_IO_CAPABILITY 0x0013
+#define MGMT_OP_SET_IO_CAPABILITY 0x0018
struct mgmt_cp_set_io_capability {
- __u8 io_capability;
+ __u8 io_capability;
} __packed;
+#define MGMT_SET_IO_CAPABILITY_SIZE 1
-#define MGMT_OP_PAIR_DEVICE 0x0014
+#define MGMT_OP_PAIR_DEVICE 0x0019
struct mgmt_cp_pair_device {
- bdaddr_t bdaddr;
- __u8 io_cap;
+ struct mgmt_addr_info addr;
+ __u8 io_cap;
} __packed;
+#define MGMT_PAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
struct mgmt_rp_pair_device {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_USER_CONFIRM_REPLY 0x0015
+#define MGMT_OP_CANCEL_PAIR_DEVICE 0x001A
+#define MGMT_CANCEL_PAIR_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNPAIR_DEVICE 0x001B
+struct mgmt_cp_unpair_device {
+ struct mgmt_addr_info addr;
+ __u8 disconnect;
+} __packed;
+#define MGMT_UNPAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_unpair_device {
+ struct mgmt_addr_info addr;
+};
+
+#define MGMT_OP_USER_CONFIRM_REPLY 0x001C
struct mgmt_cp_user_confirm_reply {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_USER_CONFIRM_REPLY_SIZE MGMT_ADDR_INFO_SIZE
struct mgmt_rp_user_confirm_reply {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016
-
-#define MGMT_OP_SET_LOCAL_NAME 0x0017
-struct mgmt_cp_set_local_name {
- __u8 name[MGMT_MAX_NAME_LENGTH];
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001D
+struct mgmt_cp_user_confirm_neg_reply {
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_USER_CONFIRM_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0018
-struct mgmt_rp_read_local_oob_data {
- __u8 hash[16];
- __u8 randomizer[16];
-} __packed;
-
-#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0019
-struct mgmt_cp_add_remote_oob_data {
- bdaddr_t bdaddr;
- __u8 hash[16];
- __u8 randomizer[16];
-} __packed;
-
-#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x001A
-struct mgmt_cp_remove_remote_oob_data {
- bdaddr_t bdaddr;
-} __packed;
-
-#define MGMT_OP_START_DISCOVERY 0x001B
-
-#define MGMT_OP_STOP_DISCOVERY 0x001C
-
-#define MGMT_OP_USER_PASSKEY_REPLY 0x001D
+#define MGMT_OP_USER_PASSKEY_REPLY 0x001E
struct mgmt_cp_user_passkey_reply {
- bdaddr_t bdaddr;
- __le32 passkey;
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+} __packed;
+#define MGMT_USER_PASSKEY_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 4)
+struct mgmt_rp_user_passkey_reply {
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_RESOLVE_NAME 0x001E
-struct mgmt_cp_resolve_name {
- bdaddr_t bdaddr;
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001F
+struct mgmt_cp_user_passkey_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_PASSKEY_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
+#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
+struct mgmt_rp_read_local_oob_data {
+ __u8 hash[16];
+ __u8 randomizer[16];
} __packed;
-#define MGMT_OP_SET_LIMIT_DISCOVERABLE 0x001F
+#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
+struct mgmt_cp_add_remote_oob_data {
+ struct mgmt_addr_info addr;
+ __u8 hash[16];
+ __u8 randomizer[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
-#define MGMT_OP_SET_CONNECTION_PARAMS 0x0020
-struct mgmt_cp_set_connection_params {
- bdaddr_t bdaddr;
- __le16 interval_min;
- __le16 interval_max;
- __le16 slave_latency;
- __le16 timeout_multiplier;
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022
+struct mgmt_cp_remove_remote_oob_data {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_REMOTE_OOB_DATA_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_START_DISCOVERY 0x0023
+struct mgmt_cp_start_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_START_DISCOVERY_SIZE 1
+
+#define MGMT_OP_STOP_DISCOVERY 0x0024
+struct mgmt_cp_stop_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_STOP_DISCOVERY_SIZE 1
+
+#define MGMT_OP_CONFIRM_NAME 0x0025
+struct mgmt_cp_confirm_name {
+ struct mgmt_addr_info addr;
+ __u8 name_known;
+} __packed;
+#define MGMT_CONFIRM_NAME_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_confirm_name {
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_OP_ENCRYPT_LINK 0x0021
-struct mgmt_cp_encrypt_link {
- bdaddr_t bdaddr;
- __u8 enable;
+#define MGMT_OP_BLOCK_DEVICE 0x0026
+struct mgmt_cp_block_device {
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_BLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_SET_RSSI_REPORTER 0x0022
-struct mgmt_cp_set_rssi_reporter {
- bdaddr_t bdaddr;
- __s8 rssi_threshold;
- __le16 interval;
- __u8 updateOnThreshExceed;
+#define MGMT_OP_UNBLOCK_DEVICE 0x0027
+struct mgmt_cp_unblock_device {
+ struct mgmt_addr_info addr;
} __packed;
+#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
-#define MGMT_OP_UNSET_RSSI_REPORTER 0x0023
-struct mgmt_cp_unset_rssi_reporter {
- bdaddr_t bdaddr;
+#define MGMT_OP_SET_DEVICE_ID 0x0028
+struct mgmt_cp_set_device_id {
+ __le16 source;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
} __packed;
-
-#define MGMT_OP_CANCEL_RESOLVE_NAME 0x0024
-struct mgmt_cp_cancel_resolve_name {
- bdaddr_t bdaddr;
-} __packed;
-
-#define MGMT_OP_LE_READ_WHITE_LIST_SIZE 0xE000
-
-#define MGMT_OP_LE_CLEAR_WHITE_LIST 0xE001
-
-#define MGMT_OP_LE_ADD_DEV_WHITE_LIST 0xE002
-struct mgmt_cp_le_add_dev_white_list {
- __u8 addr_type;
- bdaddr_t bdaddr;
-} __packed;
-
-#define MGMT_OP_LE_REMOVE_DEV_WHITE_LIST 0xE003
-struct mgmt_cp_le_remove_dev_white_list {
- __u8 addr_type;
- bdaddr_t bdaddr;
-} __packed;
-
-#define MGMT_OP_LE_CREATE_CONN_WHITE_LIST 0xE004
-
-#define MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST 0xE005
-
-#define MGMT_OP_LE_CANCEL_CREATE_CONN 0xE006
-struct mgmt_cp_le_cancel_create_conn {
- bdaddr_t bdaddr;
-} __packed;
+#define MGMT_SET_DEVICE_ID_SIZE 8
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
- __le16 opcode;
- __u8 data[0];
+ __le16 opcode;
+ __u8 status;
+ __u8 data[0];
} __packed;
#define MGMT_EV_CMD_STATUS 0x0002
struct mgmt_ev_cmd_status {
- __u8 status;
- __le16 opcode;
+ __le16 opcode;
+ __u8 status;
} __packed;
#define MGMT_EV_CONTROLLER_ERROR 0x0003
struct mgmt_ev_controller_error {
- __u8 error_code;
+ __u8 error_code;
} __packed;
#define MGMT_EV_INDEX_ADDED 0x0004
#define MGMT_EV_INDEX_REMOVED 0x0005
-#define MGMT_EV_POWERED 0x0006
+#define MGMT_EV_NEW_SETTINGS 0x0006
-#define MGMT_EV_DISCOVERABLE 0x0007
+#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
+struct mgmt_ev_class_of_dev_changed {
+ __u8 dev_class[3];
+};
-#define MGMT_EV_CONNECTABLE 0x0008
-
-#define MGMT_EV_PAIRABLE 0x0009
-
-#define MGMT_EV_NEW_KEY 0x000A
-struct mgmt_ev_new_key {
- __u8 store_hint;
- struct mgmt_key_info key;
+#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
+struct mgmt_ev_local_name_changed {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
} __packed;
-#define MGMT_EV_CONNECTED 0x000B
-struct mgmt_ev_connected {
- bdaddr_t bdaddr;
- __u8 le;
+#define MGMT_EV_NEW_LINK_KEY 0x0009
+struct mgmt_ev_new_link_key {
+ __u8 store_hint;
+ struct mgmt_link_key_info key;
} __packed;
-#define MGMT_EV_DISCONNECTED 0x000C
-struct mgmt_ev_disconnected {
- bdaddr_t bdaddr;
- __u8 reason;
+#define MGMT_EV_NEW_LONG_TERM_KEY 0x000A
+struct mgmt_ev_new_long_term_key {
+ __u8 store_hint;
+ struct mgmt_ltk_info key;
+} __packed;
+
+#define MGMT_EV_DEVICE_CONNECTED 0x000B
+struct mgmt_ev_device_connected {
+ struct mgmt_addr_info addr;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_DEV_DISCONN_UNKNOWN 0x00
+#define MGMT_DEV_DISCONN_TIMEOUT 0x01
+#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
+#define MGMT_DEV_DISCONN_REMOTE 0x03
+
+#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
+struct mgmt_ev_device_disconnected {
+ struct mgmt_addr_info addr;
+ __u8 reason;
} __packed;
#define MGMT_EV_CONNECT_FAILED 0x000D
struct mgmt_ev_connect_failed {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
+ __u8 status;
} __packed;
#define MGMT_EV_PIN_CODE_REQUEST 0x000E
struct mgmt_ev_pin_code_request {
- bdaddr_t bdaddr;
- __u8 secure;
+ struct mgmt_addr_info addr;
+ __u8 secure;
} __packed;
#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
struct mgmt_ev_user_confirm_request {
- bdaddr_t bdaddr;
- __u8 auto_confirm;
- __u8 event;
- __le32 value;
+ struct mgmt_addr_info addr;
+ __u8 confirm_hint;
+ __le32 value;
} __packed;
-#define MGMT_EV_AUTH_FAILED 0x0010
+#define MGMT_EV_USER_PASSKEY_REQUEST 0x0010
+struct mgmt_ev_user_passkey_request {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_AUTH_FAILED 0x0011
struct mgmt_ev_auth_failed {
- bdaddr_t bdaddr;
- __u8 status;
+ struct mgmt_addr_info addr;
+ __u8 status;
} __packed;
-#define MGMT_EV_LOCAL_NAME_CHANGED 0x0011
-struct mgmt_ev_local_name_changed {
- __u8 name[MGMT_MAX_NAME_LENGTH];
-} __packed;
+#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
+#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
- bdaddr_t bdaddr;
- __u8 dev_class[3];
- __s8 rssi;
- __u8 le;
- __u8 type;
- __u8 eir[HCI_MAX_EIR_LENGTH];
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
} __packed;
-#define MGMT_EV_REMOTE_NAME 0x0013
-struct mgmt_ev_remote_name {
- bdaddr_t bdaddr;
- __u8 status;
- __u8 name[MGMT_MAX_NAME_LENGTH];
+#define MGMT_EV_DISCOVERING 0x0013
+struct mgmt_ev_discovering {
+ __u8 type;
+ __u8 discovering;
} __packed;
-#define MGMT_EV_DISCOVERING 0x0014
-
-#define MGMT_EV_USER_PASSKEY_REQUEST 0x0015
-struct mgmt_ev_user_passkey_request {
- bdaddr_t bdaddr;
+#define MGMT_EV_DEVICE_BLOCKED 0x0014
+struct mgmt_ev_device_blocked {
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_EV_ENCRYPT_CHANGE 0x0016
-struct mgmt_ev_encrypt_change {
- bdaddr_t bdaddr;
- __u8 status;
+#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
+struct mgmt_ev_device_unblocked {
+ struct mgmt_addr_info addr;
} __packed;
-
-#define MGMT_EV_REMOTE_CLASS 0x0017
-struct mgmt_ev_remote_class {
- bdaddr_t bdaddr;
- __u8 dev_class[3];
+#define MGMT_EV_DEVICE_UNPAIRED 0x0016
+struct mgmt_ev_device_unpaired {
+ struct mgmt_addr_info addr;
} __packed;
-#define MGMT_EV_REMOTE_VERSION 0x0018
-struct mgmt_ev_remote_version {
- bdaddr_t bdaddr;
- __u8 lmp_ver;
- __u16 manufacturer;
- __u16 lmp_subver;
-} __packed;
-
-#define MGMT_EV_REMOTE_FEATURES 0x0019
-struct mgmt_ev_remote_features {
- bdaddr_t bdaddr;
- uint8_t features[8];
-} __packed;
-
-#define MGMT_EV_RSSI_UPDATE 0x0020
-struct mgmt_ev_rssi_update {
- bdaddr_t bdaddr;
- __s8 rssi;
-} __packed;
-
-#define MGMT_EV_LE_CONN_PARAMS 0xF000
-struct mgmt_ev_le_conn_params {
- bdaddr_t bdaddr;
- __u16 interval;
- __u16 latency;
- __u16 timeout;
+#define MGMT_EV_PASSKEY_NOTIFY 0x0017
+struct mgmt_ev_passkey_notify {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+ __u8 entered;
} __packed;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 6eac4a7..7afd419 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -158,7 +158,6 @@
struct timer_list timer;
unsigned long state;
unsigned long flags;
- atomic_t refcnt;
int initiator;
/* Default DLC parameters */
@@ -211,6 +210,7 @@
#define RFCOMM_AUTH_ACCEPT 6
#define RFCOMM_AUTH_REJECT 7
#define RFCOMM_DEFER_SETUP 8
+#define RFCOMM_ENC_DROP 9
/* Scheduling flags and events */
#define RFCOMM_SCHED_WAKEUP 31
@@ -234,7 +234,8 @@
/* ---- RFCOMM DLCs (channels) ---- */
struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
void rfcomm_dlc_free(struct rfcomm_dlc *d);
-int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
+int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
+ u8 channel);
int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
@@ -271,12 +272,8 @@
}
/* ---- RFCOMM sessions ---- */
-void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst);
-
-static inline void rfcomm_session_hold(struct rfcomm_session *s)
-{
- atomic_inc(&s->refcnt);
-}
+void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src,
+ bdaddr_t *dst);
/* ---- RFCOMM sockets ---- */
struct sockaddr_rc {
@@ -312,7 +309,8 @@
int rfcomm_init_sockets(void);
void rfcomm_cleanup_sockets(void);
-int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d);
+int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel,
+ struct rfcomm_dlc **d);
/* ---- RFCOMM TTY ---- */
#define RFCOMM_MAX_DEV 256
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 8c85c98..6d1857a 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -1,7 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright (c) 2011, The Linux Foundation. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -31,7 +30,7 @@
#define SCO_DEFAULT_FLUSH_TO 0xFFFF
#define SCO_CONN_TIMEOUT (HZ * 40)
-#define SCO_DISCONN_TIMEOUT (HZ * 20)
+#define SCO_DISCONN_TIMEOUT (HZ * 2)
#define SCO_CONN_IDLE_TIMEOUT (HZ * 60)
/* SCO socket address */
@@ -39,7 +38,6 @@
sa_family_t sco_family;
bdaddr_t sco_bdaddr;
__u16 sco_pkt_type;
- __s8 is_wbs;
};
/* SCO socket options */
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index f0f2842..f8ba07f 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -55,13 +55,6 @@
#define SMP_AUTH_BONDING 0x01
#define SMP_AUTH_MITM 0x04
-#define SMP_JUST_WORKS 0x00
-#define SMP_JUST_CFM 0x01
-#define SMP_REQ_PASSKEY 0x02
-#define SMP_CFM_PASSKEY 0x03
-#define SMP_REQ_OOB 0x04
-#define SMP_OVERLAP 0xFF
-
#define SMP_CMD_PAIRING_CONFIRM 0x03
struct smp_cmd_pairing_confirm {
__u8 confirm_val[16];
@@ -84,7 +77,7 @@
#define SMP_CMD_MASTER_IDENT 0x07
struct smp_cmd_master_ident {
- __u16 ediv;
+ __le16 ediv;
__u8 rand[8];
} __packed;
@@ -115,18 +108,39 @@
#define SMP_CONFIRM_FAILED 0x04
#define SMP_PAIRING_NOTSUPP 0x05
#define SMP_ENC_KEY_SIZE 0x06
-#define SMP_CMD_NOTSUPP 0x07
-#define SMP_UNSPECIFIED 0x08
+#define SMP_CMD_NOTSUPP 0x07
+#define SMP_UNSPECIFIED 0x08
#define SMP_REPEATED_ATTEMPTS 0x09
#define SMP_MIN_ENC_KEY_SIZE 7
#define SMP_MAX_ENC_KEY_SIZE 16
+#define SMP_FLAG_TK_VALID 1
+#define SMP_FLAG_CFM_PENDING 2
+#define SMP_FLAG_MITM_AUTH 3
+
+struct smp_chan {
+ struct l2cap_conn *conn;
+ u8 preq[7]; /* SMP Pairing Request */
+ u8 prsp[7]; /* SMP Pairing Response */
+ u8 prnd[16]; /* SMP Pairing Random (local) */
+ u8 rrnd[16]; /* SMP Pairing Random (remote) */
+ u8 pcnf[16]; /* SMP Pairing Confirm */
+ u8 tk[16]; /* SMP Temporary Key */
+ u8 enc_key_size;
+ unsigned long smp_flags;
+ struct crypto_blkcipher *tfm;
+ struct work_struct confirm;
+ struct work_struct random;
+
+};
+
/* SMP Commands */
-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
-int smp_link_encrypt_cmplt(struct l2cap_conn *conn, __u8 status, __u8 encrypt);
-void smp_conn_security_fail(struct l2cap_conn *conn, __u8 code, __u8 reason);
-void smp_timeout(unsigned long l2cap_conn);
+int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
+
+void smp_chan_destroy(struct l2cap_conn *conn);
#endif /* __SMP_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index f673ba5..8e986f0 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -624,6 +624,7 @@
* Will use last 4 bytes of packet sent from
* user-space instead.
*/
+ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index fc4543a..d3f3f7b 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -11,7 +11,6 @@
select CRYPTO_BLKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
- select CRYPTO_HMAC
select CRYPTO_SHA256
help
Bluetooth is low-cost, low-power, short-range wireless technology.
@@ -49,4 +48,3 @@
source "net/bluetooth/hidp/Kconfig"
source "drivers/bluetooth/Kconfig"
-
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 828be1d..dea6a28 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
- amp.o
+ a2mp.o amp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 0000000..17f33a6
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,1003 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
+
+/* Global AMP Manager list */
+LIST_HEAD(amp_mgr_list);
+DEFINE_MUTEX(amp_mgr_list_lock);
+
+/* A2MP build & send command helper functions */
+static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
+{
+ struct a2mp_cmd *cmd;
+ int plen;
+
+ plen = sizeof(*cmd) + len;
+ cmd = kzalloc(plen, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->code = code;
+ cmd->ident = ident;
+ cmd->len = cpu_to_le16(len);
+
+ memcpy(cmd->data, data, len);
+
+ return cmd;
+}
+
+void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
+{
+ struct l2cap_chan *chan = mgr->a2mp_chan;
+ struct a2mp_cmd *cmd;
+ u16 total_len = len + sizeof(*cmd);
+ struct kvec iv;
+ struct msghdr msg;
+
+ cmd = __a2mp_build(code, ident, len, data);
+ if (!cmd)
+ return;
+
+ iv.iov_base = cmd;
+ iv.iov_len = total_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.msg_iov = (struct iovec *) &iv;
+ msg.msg_iovlen = 1;
+
+ l2cap_chan_send(chan, &msg, total_len, 0);
+
+ kfree(cmd);
+}
+
+u8 __next_ident(struct amp_mgr *mgr)
+{
+ if (++mgr->ident == 0)
+ mgr->ident = 1;
+
+ return mgr->ident;
+}
+
+static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
+{
+ cl->id = 0;
+ cl->type = 0;
+ cl->status = 1;
+}
+
+/* hci_dev_list shall be locked */
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
+{
+ int i = 0;
+ struct hci_dev *hdev;
+
+ __a2mp_cl_bredr(cl);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ /* Iterate through AMP controllers */
+ if (hdev->id == HCI_BREDR_ID)
+ continue;
+
+ /* Starting from second entry */
+ if (++i >= num_ctrl)
+ return;
+
+ cl[i].id = hdev->id;
+ cl[i].type = hdev->amp_type;
+ cl[i].status = hdev->amp_status;
+ }
+}
+
+/* Processing A2MP messages */
+static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cmd_rej *rej = (void *) skb->data;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*rej))
+ return -EINVAL;
+
+ BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
+
+ skb_pull(skb, sizeof(*rej));
+
+ return 0;
+}
+
+static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_discov_req *req = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct a2mp_discov_rsp *rsp;
+ u16 ext_feat;
+ u8 num_ctrl;
+
+ if (len < sizeof(*req))
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(*req));
+
+ ext_feat = le16_to_cpu(req->ext_feat);
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
+
+ /* check that packet is not broken for now */
+ while (ext_feat & A2MP_FEAT_EXT) {
+ if (len < sizeof(ext_feat))
+ return -EINVAL;
+
+ ext_feat = get_unaligned_le16(skb->data);
+ BT_DBG("efm 0x%4.4x", ext_feat);
+ len -= sizeof(ext_feat);
+ skb_pull(skb, sizeof(ext_feat));
+ }
+
+ read_lock(&hci_dev_list_lock);
+
+ num_ctrl = __hci_num_ctrl();
+ len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
+ rsp = kmalloc(len, GFP_ATOMIC);
+ if (!rsp) {
+ read_unlock(&hci_dev_list_lock);
+ return -ENOMEM;
+ }
+
+ rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ rsp->ext_feat = 0;
+
+ __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
+
+ read_unlock(&hci_dev_list_lock);
+
+ a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
+
+ kfree(rsp);
+ return 0;
+}
+
+static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_discov_rsp *rsp = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct a2mp_cl *cl;
+ u16 ext_feat;
+ bool found = false;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ len -= sizeof(*rsp);
+ skb_pull(skb, sizeof(*rsp));
+
+ ext_feat = le16_to_cpu(rsp->ext_feat);
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
+
+ /* check that packet is not broken for now */
+ while (ext_feat & A2MP_FEAT_EXT) {
+ if (len < sizeof(ext_feat))
+ return -EINVAL;
+
+ ext_feat = get_unaligned_le16(skb->data);
+ BT_DBG("efm 0x%4.4x", ext_feat);
+ len -= sizeof(ext_feat);
+ skb_pull(skb, sizeof(ext_feat));
+ }
+
+ cl = (void *) skb->data;
+ while (len >= sizeof(*cl)) {
+ BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
+ cl->status);
+
+ if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) {
+ struct a2mp_info_req req;
+
+ found = true;
+ req.id = cl->id;
+ a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
+ sizeof(req), &req);
+ }
+
+ len -= sizeof(*cl);
+ cl = (void *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* Fall back to L2CAP init sequence */
+ if (!found) {
+ struct l2cap_conn *conn = mgr->l2cap_conn;
+ struct l2cap_chan *chan;
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+
+ BT_DBG("chan %p state %s", chan,
+ state_to_string(chan->state));
+
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP)
+ continue;
+
+ l2cap_chan_lock(chan);
+
+ if (chan->state == BT_CONNECT)
+ l2cap_send_conn_req(chan);
+
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+ }
+
+ return 0;
+}
+
+static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cl *cl = (void *) skb->data;
+
+ while (skb->len >= sizeof(*cl)) {
+ BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
+ cl->status);
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* TODO send A2MP_CHANGE_RSP */
+
+ return 0;
+}
+
+static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_info_req *req = (void *) skb->data;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->dev_type != HCI_AMP) {
+ struct a2mp_info_rsp rsp;
+
+ rsp.id = req->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+
+ goto done;
+ }
+
+ set_bit(READ_LOC_AMP_INFO, &mgr->state);
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+
+done:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
+ struct a2mp_amp_assoc_req req;
+ struct amp_ctrl *ctrl;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*rsp))
+ return -EINVAL;
+
+ BT_DBG("id %d status 0x%2.2x", rsp->id, rsp->status);
+
+ if (rsp->status)
+ return -EINVAL;
+
+ ctrl = amp_ctrl_add(mgr, rsp->id);
+ if (!ctrl)
+ return -ENOMEM;
+
+ req.id = rsp->id;
+ a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
+ &req);
+
+ skb_pull(skb, sizeof(*rsp));
+ return 0;
+}
+
+static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_amp_assoc_req *req = (void *) skb->data;
+ struct hci_dev *hdev;
+ struct amp_mgr *tmp;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ /* Make sure that other request is not processed */
+ tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->amp_type == HCI_BREDR || tmp) {
+ struct a2mp_amp_assoc_rsp rsp;
+ rsp.id = req->id;
+
+ if (tmp) {
+ rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+ amp_mgr_put(tmp);
+ } else {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ }
+
+ a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+
+ goto done;
+ }
+
+ amp_read_loc_assoc(hdev, mgr);
+
+done:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct hci_dev *hdev;
+ struct amp_ctrl *ctrl;
+ struct hci_conn *hcon;
+ size_t assoc_len;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ assoc_len = len - sizeof(*rsp);
+
+ BT_DBG("id %d status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
+ assoc_len);
+
+ if (rsp->status)
+ return -EINVAL;
+
+ /* Save remote ASSOC data */
+ ctrl = amp_ctrl_lookup(mgr, rsp->id);
+ if (ctrl) {
+ u8 *assoc;
+
+ assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
+ if (!assoc) {
+ amp_ctrl_put(ctrl);
+ return -ENOMEM;
+ }
+
+ ctrl->assoc = assoc;
+ ctrl->assoc_len = assoc_len;
+ ctrl->assoc_rem_len = assoc_len;
+ ctrl->assoc_len_so_far = 0;
+
+ amp_ctrl_put(ctrl);
+ }
+
+ /* Create Phys Link */
+ hdev = hci_dev_get(rsp->id);
+ if (!hdev)
+ return -EINVAL;
+
+ hcon = phylink_add(hdev, mgr, rsp->id, true);
+ if (!hcon)
+ goto done;
+
+ BT_DBG("Created hcon %p: loc:%d -> rem:%d", hcon, hdev->id, rsp->id);
+
+ mgr->bredr_chan->remote_amp_id = rsp->id;
+
+ amp_create_phylink(hdev, mgr, hcon);
+
+done:
+ hci_dev_put(hdev);
+ skb_pull(skb, len);
+ return 0;
+}
+
+static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+ struct hci_conn *hcon;
+ struct amp_ctrl *ctrl;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+
+ hdev = hci_dev_get(req->remote_id);
+ if (!hdev || hdev->amp_type != HCI_AMP) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
+ if (!ctrl) {
+ ctrl = amp_ctrl_add(mgr, rsp.remote_id);
+ if (ctrl) {
+ amp_ctrl_get(ctrl);
+ } else {
+ rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+ goto send_rsp;
+ }
+ }
+
+ if (ctrl) {
+ size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
+ u8 *assoc;
+
+ assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
+ if (!assoc) {
+ amp_ctrl_put(ctrl);
+ return -ENOMEM;
+ }
+
+ ctrl->assoc = assoc;
+ ctrl->assoc_len = assoc_len;
+ ctrl->assoc_rem_len = assoc_len;
+ ctrl->assoc_len_so_far = 0;
+
+ amp_ctrl_put(ctrl);
+ }
+
+ hcon = phylink_add(hdev, mgr, req->local_id, false);
+ if (hcon) {
+ amp_accept_phylink(hdev, mgr, hcon);
+ rsp.status = A2MP_STATUS_SUCCESS;
+ } else {
+ rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+ }
+
+send_rsp:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ /* Reply error now and success after HCI Write Remote AMP Assoc
+ command complete with success status
+ */
+ if (rsp.status != A2MP_STATUS_SUCCESS) {
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
+ sizeof(rsp), &rsp);
+ } else {
+ set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
+ mgr->ident = hdr->ident;
+ }
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+ struct hci_conn *hcon;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+ rsp.status = A2MP_STATUS_SUCCESS;
+
+ hdev = hci_dev_get(req->remote_id);
+ if (!hdev) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst);
+ if (!hcon) {
+ BT_ERR("No phys link exist");
+ rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
+ goto clean;
+ }
+
+ /* TODO Disconnect Phys Link here */
+
+clean:
+ hci_dev_put(hdev);
+
+send_rsp:
+ a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+/* Handle A2MP signalling */
+static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct a2mp_cmd *hdr;
+ struct amp_mgr *mgr = chan->data;
+ int err = 0;
+
+ amp_mgr_get(mgr);
+
+ while (skb->len >= sizeof(*hdr)) {
+ u16 len;
+
+ hdr = (void *) skb->data;
+ len = le16_to_cpu(hdr->len);
+
+ BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
+
+ skb_pull(skb, sizeof(*hdr));
+
+ if (len > skb->len || !hdr->ident) {
+ err = -EINVAL;
+ break;
+ }
+
+ mgr->ident = hdr->ident;
+
+ switch (hdr->code) {
+ case A2MP_COMMAND_REJ:
+ a2mp_command_rej(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCOVER_REQ:
+ err = a2mp_discover_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_NOTIFY:
+ err = a2mp_change_notify(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETINFO_REQ:
+ err = a2mp_getinfo_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETAMPASSOC_REQ:
+ err = a2mp_getampassoc_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CREATEPHYSLINK_REQ:
+ err = a2mp_createphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCONNPHYSLINK_REQ:
+ err = a2mp_discphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCOVER_RSP:
+ err = a2mp_discover_rsp(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETINFO_RSP:
+ err = a2mp_getinfo_rsp(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETAMPASSOC_RSP:
+ err = a2mp_getampassoc_rsp(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_RSP:
+ case A2MP_CREATEPHYSLINK_RSP:
+ case A2MP_DISCONNPHYSLINK_RSP:
+ err = a2mp_cmd_rsp(mgr, skb, hdr);
+ break;
+
+ default:
+ BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (err) {
+ struct a2mp_cmd_rej rej;
+
+ rej.reason = __constant_cpu_to_le16(0);
+ hdr = (void *) skb->data;
+
+ BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
+
+ a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
+ &rej);
+ }
+
+ /* Always free skb and return success error code to prevent
+ from sending L2CAP Disconnect over A2MP channel */
+ kfree_skb(skb);
+
+ amp_mgr_put(mgr);
+
+ return 0;
+}
+
+static void a2mp_chan_close_cb(struct l2cap_chan *chan)
+{
+ l2cap_chan_put(chan);
+}
+
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
+{
+ struct amp_mgr *mgr = chan->data;
+
+ if (!mgr)
+ return;
+
+ BT_DBG("chan %p state %s", chan, state_to_string(state));
+
+ chan->state = state;
+
+ switch (state) {
+ case BT_CLOSED:
+ if (mgr)
+ amp_mgr_put(mgr);
+ break;
+ }
+}
+
+static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb)
+{
+ return bt_skb_alloc(len, GFP_KERNEL);
+}
+
+static struct l2cap_ops a2mp_chan_ops = {
+ .name = "L2CAP A2MP channel",
+ .recv = a2mp_chan_recv_cb,
+ .close = a2mp_chan_close_cb,
+ .state_change = a2mp_chan_state_change_cb,
+ .alloc_skb = a2mp_chan_alloc_skb_cb,
+
+ /* Not implemented for A2MP */
+ .new_connection = l2cap_chan_no_new_connection,
+ .teardown = l2cap_chan_no_teardown,
+ .ready = l2cap_chan_no_ready,
+ .defer = l2cap_chan_no_defer,
+};
+
+static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
+{
+ struct l2cap_chan *chan;
+ int err;
+
+ chan = l2cap_chan_create();
+ if (!chan)
+ return NULL;
+
+ BT_DBG("chan %p", chan);
+
+ chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+ chan->ops = &a2mp_chan_ops;
+
+ l2cap_chan_set_defaults(chan);
+ chan->remote_max_tx = chan->max_tx;
+ chan->remote_tx_win = chan->tx_win;
+
+ chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ chan->mode = L2CAP_MODE_ERTM;
+
+ err = l2cap_ertm_init(chan);
+ if (err < 0) {
+ l2cap_chan_del(chan, 0);
+ return NULL;
+ }
+
+ chan->conf_state = 0;
+
+ if (locked)
+ __l2cap_chan_add(conn, chan);
+ else
+ l2cap_chan_add(conn, chan);
+
+ chan->remote_mps = chan->omtu;
+ chan->mps = chan->omtu;
+
+ chan->state = BT_CONNECTED;
+
+ return chan;
+}
+
+/* AMP Manager functions */
+struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ kref_get(&mgr->kref);
+
+ return mgr;
+}
+
+static void amp_mgr_destroy(struct kref *kref)
+{
+ struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
+
+ BT_DBG("mgr %p", mgr);
+
+ mutex_lock(&_mgr_list_lock);
+ list_del(&mgr->list);
+ mutex_unlock(&_mgr_list_lock);
+
+ amp_ctrl_list_flush(mgr);
+ kfree(mgr);
+}
+
+int amp_mgr_put(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ return kref_put(&mgr->kref, &_mgr_destroy);
+}
+
+static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
+{
+ struct amp_mgr *mgr;
+ struct l2cap_chan *chan;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ BT_DBG("conn %p mgr %p", conn, mgr);
+
+ mgr->l2cap_conn = conn;
+
+ chan = a2mp_chan_open(conn, locked);
+ if (!chan) {
+ kfree(mgr);
+ return NULL;
+ }
+
+ mgr->a2mp_chan = chan;
+ chan->data = mgr;
+
+ conn->hcon->amp_mgr = mgr;
+
+ kref_init(&mgr->kref);
+
+ /* Remote AMP ctrl list initialization */
+ INIT_LIST_HEAD(&mgr->amp_ctrls);
+ mutex_init(&mgr->amp_ctrls_lock);
+
+ mutex_lock(&_mgr_list_lock);
+ list_add(&mgr->list, &_mgr_list);
+ mutex_unlock(&_mgr_list_lock);
+
+ return mgr;
+}
+
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct amp_mgr *mgr;
+
+ mgr = amp_mgr_create(conn, false);
+ if (!mgr) {
+ BT_ERR("Could not create AMP manager");
+ return NULL;
+ }
+
+ BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
+
+ return mgr->a2mp_chan;
+}
+
+struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
+{
+ struct amp_mgr *mgr;
+
+ mutex_lock(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if (test_and_clear_bit(state, &mgr->state)) {
+ amp_mgr_get(mgr);
+ mutex_unlock(&_mgr_list_lock);
+ return mgr;
+ }
+ }
+ mutex_unlock(&_mgr_list_lock);
+
+ return NULL;
+}
+
+void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
+{
+ struct amp_mgr *mgr;
+ struct a2mp_info_rsp rsp;
+
+ mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
+ if (!mgr)
+ return;
+
+ BT_DBG("%s mgr %p", hdev->name, mgr);
+
+ rsp.id = hdev->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ if (hdev->amp_type != HCI_BREDR) {
+ rsp.status = 0;
+ rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+ rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+ rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+ rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+ rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+ }
+
+ a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
+ amp_mgr_put(mgr);
+}
+
+void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
+{
+ struct amp_mgr *mgr;
+ struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+ struct a2mp_amp_assoc_rsp *rsp;
+ size_t len;
+
+ mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+ if (!mgr)
+ return;
+
+ BT_DBG("%s mgr %p", hdev->name, mgr);
+
+ len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
+ rsp = kzalloc(len, GFP_KERNEL);
+ if (!rsp) {
+ amp_mgr_put(mgr);
+ return;
+ }
+
+ rsp->id = hdev->id;
+
+ if (status) {
+ rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
+ } else {
+ rsp->status = A2MP_STATUS_SUCCESS;
+ memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
+ }
+
+ a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
+ amp_mgr_put(mgr);
+ kfree(rsp);
+}
+
+void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
+{
+ struct amp_mgr *mgr;
+ struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+ struct a2mp_physlink_req *req;
+ struct l2cap_chan *bredr_chan;
+ size_t len;
+
+ mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
+ if (!mgr)
+ return;
+
+ len = sizeof(*req) + loc_assoc->len;
+
+ BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
+
+ req = kzalloc(len, GFP_KERNEL);
+ if (!req) {
+ amp_mgr_put(mgr);
+ return;
+ }
+
+ bredr_chan = mgr->bredr_chan;
+ if (!bredr_chan)
+ goto clean;
+
+ req->local_id = hdev->id;
+ req->remote_id = bredr_chan->remote_amp_id;
+ memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
+
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
+
+clean:
+ amp_mgr_put(mgr);
+ kfree(req);
+}
+
+void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
+{
+ struct amp_mgr *mgr;
+ struct a2mp_physlink_rsp rsp;
+ struct hci_conn *hs_hcon;
+
+ mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
+ if (!mgr)
+ return;
+
+ hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
+ if (!hs_hcon) {
+ rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+ } else {
+ rsp.remote_id = hs_hcon->remote_id;
+ rsp.status = A2MP_STATUS_SUCCESS;
+ }
+
+ BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
+ status);
+
+ rsp.local_id = hdev->id;
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
+ amp_mgr_put(mgr);
+}
+
+void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ struct a2mp_discov_req req;
+
+ BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
+
+ if (!mgr) {
+ mgr = amp_mgr_create(conn, true);
+ if (!mgr)
+ return;
+ }
+
+ mgr->bredr_chan = chan;
+
+ req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ req.ext_feat = 0;
+ a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
+}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 7f260fc..d3dfb53 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,24 +25,11 @@
/* Bluetooth address family and sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <net/sock.h>
#include <asm/ioctls.h>
-#include <linux/kmod.h>
#include <net/bluetooth/bluetooth.h>
-
-#ifdef CONFIG_ANDROID_PARANOID_NETWORK
-#include <linux/android_aid.h>
-#endif
+#include <linux/proc_fs.h>
+#include <linux/uidgid.h>
#ifndef CONFIG_BT_SOCK_DEBUG
#undef BT_DBG
@@ -80,19 +67,16 @@
"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
-static inline void bt_sock_reclassify_lock(struct socket *sock, int proto)
+void bt_sock_reclassify_lock(struct sock *sk, int proto)
{
- struct sock *sk = sock->sk;
-
- if (!sk)
- return;
-
+ BUG_ON(!sk);
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk,
bt_slock_key_strings[proto], &bt_slock_key[proto],
bt_key_strings[proto], &bt_lock_key[proto]);
}
+EXPORT_SYMBOL(bt_sock_reclassify_lock);
int bt_sock_register(int proto, const struct net_proto_family *ops)
{
@@ -114,35 +98,26 @@
}
EXPORT_SYMBOL(bt_sock_register);
-int bt_sock_unregister(int proto)
+void bt_sock_unregister(int proto)
{
- int err = 0;
-
if (proto < 0 || proto >= BT_MAX_PROTO)
- return -EINVAL;
+ return;
write_lock(&bt_proto_lock);
-
- if (!bt_proto[proto])
- err = -ENOENT;
- else
- bt_proto[proto] = NULL;
-
+ bt_proto[proto] = NULL;
write_unlock(&bt_proto_lock);
-
- return err;
}
EXPORT_SYMBOL(bt_sock_unregister);
-#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#ifdef CONFIG_PARANOID_NETWORK
static inline int current_has_bt_admin(void)
{
- return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN));
+ return !current_euid();
}
static inline int current_has_bt(void)
{
- return (current_has_bt_admin() || in_egroup_p(AID_NET_BT));
+ return current_has_bt_admin();
}
# else
static inline int current_has_bt_admin(void)
@@ -183,7 +158,8 @@
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
err = bt_proto[proto]->create(net, sock, proto, kern);
- bt_sock_reclassify_lock(sock, proto);
+ if (!err)
+ bt_sock_reclassify_lock(sock->sk, proto);
module_put(bt_proto[proto]->owner);
}
@@ -194,17 +170,17 @@
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
- write_lock_bh(&l->lock);
+ write_lock(&l->lock);
sk_add_node(sk, &l->head);
- write_unlock_bh(&l->lock);
+ write_unlock(&l->lock);
}
EXPORT_SYMBOL(bt_sock_link);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
- write_lock_bh(&l->lock);
+ write_lock(&l->lock);
sk_del_node_init(sk);
- write_unlock_bh(&l->lock);
+ write_unlock(&l->lock);
}
EXPORT_SYMBOL(bt_sock_unlink);
@@ -237,33 +213,30 @@
BT_DBG("parent %p", parent);
- local_bh_disable();
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
- bh_lock_sock(sk);
+ lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
- bh_unlock_sock(sk);
+ release_sock(sk);
bt_accept_unlink(sk);
continue;
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
- bt_sk(parent)->defer_setup) {
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return sk;
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
- local_bh_enable();
return NULL;
}
@@ -290,8 +263,6 @@
return err;
}
- msg->msg_namelen = 0;
-
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
@@ -451,15 +422,16 @@
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
- (bt_sk(parent)->defer_setup &&
- sk->sk_state == BT_CONNECT2))
+ (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
+ sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
return 0;
}
-unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
@@ -472,7 +444,8 @@
return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= POLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
@@ -491,7 +464,7 @@
sk->sk_state == BT_CONFIG)
return mask;
- if (sock_writeable(sk))
+ if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -556,9 +529,8 @@
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
- set_current_state(TASK_INTERRUPTIBLE);
-
if (!timeo) {
err = -EINPROGRESS;
break;
@@ -572,17 +544,148 @@
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
EXPORT_SYMBOL(bt_sock_wait_state);
+#ifdef CONFIG_PROC_FS
+struct bt_seq_state {
+ struct bt_sock_list *l;
+};
+
+static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(seq->private->l->lock)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ read_lock(&l->lock);
+ return seq_hlist_start_head(&l->head, *pos);
+}
+
+static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ return seq_hlist_next(v, &l->head, pos);
+}
+
+static void bt_seq_stop(struct seq_file *seq, void *v)
+ __releases(seq->private->l->lock)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ read_unlock(&l->lock);
+}
+
+static int bt_seq_show(struct seq_file *seq, void *v)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent");
+
+ if (l->custom_seq_show) {
+ seq_putc(seq, ' ');
+ l->custom_seq_show(seq, v);
+ }
+
+ seq_putc(seq, '\n');
+ } else {
+ struct sock *sk = sk_entry(v);
+ struct bt_sock *bt = bt_sk(sk);
+
+ seq_printf(seq,
+ "%pK %-6d %-6u %-6u %-6lu %pMR %pMR %-6lu",
+ sk,
+ atomic_read(&sk->sk_refcnt),
+ sk_rmem_alloc_get(sk),
+ sk_wmem_alloc_get(sk),
+ sock_i_ino(sk),
+ &bt->src,
+ &bt->dst,
+ bt->parent? sock_i_ino(bt->parent): 0LU);
+
+ if (l->custom_seq_show) {
+ seq_putc(seq, ' ');
+ l->custom_seq_show(seq, v);
+ }
+
+ seq_putc(seq, '\n');
+ }
+ return 0;
+}
+
+static struct seq_operations bt_seq_ops = {
+ .start = bt_seq_start,
+ .next = bt_seq_next,
+ .stop = bt_seq_stop,
+ .show = bt_seq_show,
+};
+
+static int bt_seq_open(struct inode *inode, struct file *file)
+{
+ struct bt_sock_list *sk_list;
+ struct bt_seq_state *s;
+
+ sk_list = PDE_DATA(inode);
+ s = __seq_open_private(file, &bt_seq_ops,
+ sizeof(struct bt_seq_state));
+ if (!s)
+ return -ENOMEM;
+
+ s->l = sk_list;
+ return 0;
+}
+
+static const struct file_operations bt_fops = {
+ .open = bt_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list* sk_list,
+ int (* seq_show)(struct seq_file *, void *))
+{
+ sk_list->custom_seq_show = seq_show;
+
+ if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
+ return -ENOMEM;
+ return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+ remove_proc_entry(name, net->proc_net);
+}
+#else
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list* sk_list,
+ int (* seq_show)(struct seq_file *, void *))
+{
+ return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+}
+#endif
+EXPORT_SYMBOL(bt_procfs_init);
+EXPORT_SYMBOL(bt_procfs_cleanup);
+
static struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index c19be91..a3f3380 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2010-2012 The Linux Foundation. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
@@ -11,2031 +11,456 @@
GNU General Public License for more details.
*/
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/timer.h>
-
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <linux/err.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
#include <crypto/hash.h>
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/amp.h>
-
-static struct workqueue_struct *amp_workqueue;
-
-LIST_HEAD(amp_mgr_list);
-DEFINE_RWLOCK(amp_mgr_list_lock);
-
-static int send_a2mp(struct socket *sock, u8 *data, int len);
-
-static void ctx_timeout(unsigned long data);
-
-static void launch_ctx(struct amp_mgr *mgr);
-static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
-static int kill_ctx(struct amp_ctx *ctx);
-static int cancel_ctx(struct amp_ctx *ctx);
-
-static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
-
-static void remove_amp_mgr(struct amp_mgr *mgr)
+/* Remote AMP Controllers interface */
+void amp_ctrl_get(struct amp_ctrl *ctrl)
{
- BT_DBG("mgr %p", mgr);
+ BT_DBG("ctrl %p orig refcnt %d", ctrl,
+ atomic_read(&ctrl->kref.refcount));
- write_lock(&_mgr_list_lock);
- list_del(&mgr->list);
- write_unlock(&_mgr_list_lock);
-
- read_lock(&mgr->ctx_list_lock);
- while (!list_empty(&mgr->ctx_list)) {
- struct amp_ctx *ctx;
- ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
- read_unlock(&mgr->ctx_list_lock);
- BT_DBG("kill ctx %p", ctx);
- kill_ctx(ctx);
- read_lock(&mgr->ctx_list_lock);
- }
- read_unlock(&mgr->ctx_list_lock);
-
- kfree(mgr->ctrls);
-
- kfree(mgr);
+ kref_get(&ctrl->kref);
}
-static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
+static void amp_ctrl_destroy(struct kref *kref)
{
- struct amp_mgr *mgr;
- struct amp_mgr *found = NULL;
+ struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
- read_lock(&_mgr_list_lock);
- list_for_each_entry(mgr, &_mgr_list, list) {
- if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
- found = mgr;
- break;
- }
- }
- read_unlock(&_mgr_list_lock);
- return found;
+ BT_DBG("ctrl %p", ctrl);
+
+ kfree(ctrl->assoc);
+ kfree(ctrl);
}
-static struct amp_mgr *get_create_amp_mgr(struct hci_conn *hcon,
- struct sk_buff *skb)
+int amp_ctrl_put(struct amp_ctrl *ctrl)
{
- struct amp_mgr *mgr;
+ BT_DBG("ctrl %p orig refcnt %d", ctrl,
+ atomic_read(&ctrl->kref.refcount));
- write_lock(&_mgr_list_lock);
- list_for_each_entry(mgr, &_mgr_list, list) {
- if (mgr->l2cap_conn == hcon->l2cap_data) {
- BT_DBG("found %p", mgr);
- write_unlock(&_mgr_list_lock);
- goto gc_finished;
- }
- }
- write_unlock(&_mgr_list_lock);
-
- mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
- if (!mgr)
- return NULL;
-
- mgr->l2cap_conn = hcon->l2cap_data;
- mgr->next_ident = 1;
- INIT_LIST_HEAD(&mgr->ctx_list);
- rwlock_init(&mgr->ctx_list_lock);
- mgr->skb = skb;
- BT_DBG("hcon %p mgr %p", hcon, mgr);
- mgr->a2mp_sock = open_fixed_channel(&hcon->hdev->bdaddr, &hcon->dst);
- if (!mgr->a2mp_sock) {
- kfree(mgr);
- return NULL;
- }
- write_lock(&_mgr_list_lock);
- list_add(&(mgr->list), &_mgr_list);
- write_unlock(&_mgr_list_lock);
-
-gc_finished:
- return mgr;
+ return kref_put(&ctrl->kref, &_ctrl_destroy);
}
-static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
-{
- if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
- return mgr->ctrls;
- else
- return NULL;
-}
-
-static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
+struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
{
struct amp_ctrl *ctrl;
- BT_DBG("mgr %p, id %d", mgr, id);
- if ((mgr->ctrls) && (mgr->ctrls->id == id))
- ctrl = mgr->ctrls;
- else {
- kfree(mgr->ctrls);
- ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
- if (ctrl) {
- ctrl->mgr = mgr;
- ctrl->id = id;
- }
- mgr->ctrls = ctrl;
- }
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return NULL;
+
+ kref_init(&ctrl->kref);
+ ctrl->id = id;
+
+ mutex_lock(&mgr->amp_ctrls_lock);
+ list_add(&ctrl->list, &mgr->amp_ctrls);
+ mutex_unlock(&mgr->amp_ctrls_lock);
+
+ BT_DBG("mgr %p ctrl %p", mgr, ctrl);
return ctrl;
}
-static struct amp_ctx *create_ctx(u8 type, u8 state)
+void amp_ctrl_list_flush(struct amp_mgr *mgr)
{
- struct amp_ctx *ctx = NULL;
+ struct amp_ctrl *ctrl, *n;
- ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
- if (ctx) {
- ctx->type = type;
- ctx->state = state;
- init_timer(&(ctx->timer));
- ctx->timer.function = ctx_timeout;
- ctx->timer.data = (unsigned long) ctx;
- }
- BT_DBG("ctx %p, type %d", ctx, type);
- return ctx;
-}
-
-static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
-{
- BT_DBG("ctx %p", ctx);
- write_lock(&mgr->ctx_list_lock);
- list_add(&ctx->list, &mgr->ctx_list);
- write_unlock(&mgr->ctx_list_lock);
- ctx->mgr = mgr;
- execute_ctx(ctx, AMP_INIT, 0);
-}
-
-static void destroy_ctx(struct amp_ctx *ctx)
-{
- struct amp_mgr *mgr = ctx->mgr;
-
- BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
- del_timer(&ctx->timer);
- write_lock(&mgr->ctx_list_lock);
- list_del(&ctx->list);
- write_unlock(&mgr->ctx_list_lock);
- if (ctx->deferred)
- execute_ctx(ctx->deferred, AMP_INIT, 0);
- kfree(ctx);
-}
-
-static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
-{
- struct amp_ctx *fnd = NULL;
- struct amp_ctx *ctx;
-
- read_lock(&mgr->ctx_list_lock);
- list_for_each_entry(ctx, &mgr->ctx_list, list) {
- if (ctx->type == type) {
- fnd = ctx;
- break;
- }
- }
- read_unlock(&mgr->ctx_list_lock);
- return fnd;
-}
-
-static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
-{
- struct amp_mgr *mgr = cur->mgr;
- struct amp_ctx *fnd = NULL;
- struct amp_ctx *ctx;
-
- read_lock(&mgr->ctx_list_lock);
- list_for_each_entry(ctx, &mgr->ctx_list, list) {
- if ((ctx->type == type) && (ctx != cur)) {
- fnd = ctx;
- break;
- }
- }
- read_unlock(&mgr->ctx_list_lock);
- return fnd;
-}
-
-static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
-{
- struct amp_ctx *fnd = NULL;
- struct amp_ctx *ctx;
-
- read_lock(&mgr->ctx_list_lock);
- list_for_each_entry(ctx, &mgr->ctx_list, list) {
- if ((ctx->evt_type & AMP_A2MP_RSP) &&
- (ctx->rsp_ident == ident)) {
- fnd = ctx;
- break;
- }
- }
- read_unlock(&mgr->ctx_list_lock);
- return fnd;
-}
-
-static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
- u16 evt_value)
-{
- struct amp_mgr *mgr;
- struct amp_ctx *fnd = NULL;
-
- read_lock(&_mgr_list_lock);
- list_for_each_entry(mgr, &_mgr_list, list) {
- struct amp_ctx *ctx;
- read_lock(&mgr->ctx_list_lock);
- list_for_each_entry(ctx, &mgr->ctx_list, list) {
- struct hci_dev *ctx_hdev;
- ctx_hdev = hci_dev_get(ctx->id);
- if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
- switch (evt_type) {
- case AMP_HCI_CMD_STATUS:
- case AMP_HCI_CMD_CMPLT:
- if (ctx->opcode == evt_value)
- fnd = ctx;
- break;
- case AMP_HCI_EVENT:
- if (ctx->evt_code == (u8) evt_value)
- fnd = ctx;
- break;
- }
- }
- if (ctx_hdev)
- hci_dev_put(ctx_hdev);
-
- if (fnd)
- break;
- }
- read_unlock(&mgr->ctx_list_lock);
- }
- read_unlock(&_mgr_list_lock);
- return fnd;
-}
-
-static inline u8 next_ident(struct amp_mgr *mgr)
-{
- if (++mgr->next_ident == 0)
- mgr->next_ident = 1;
- return mgr->next_ident;
-}
-
-static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
- u16 len, void *data, u16 len2, void *data2)
-{
- struct a2mp_cmd_hdr *hdr;
- int plen;
- u8 *p, *cmd;
-
- BT_DBG("ident %d code 0x%02x", ident, code);
- if (!mgr->a2mp_sock)
- return;
- plen = sizeof(*hdr) + len + len2;
- cmd = kzalloc(plen, GFP_ATOMIC);
- if (!cmd)
- return;
- hdr = (struct a2mp_cmd_hdr *) cmd;
- hdr->code = code;
- hdr->ident = ident;
- hdr->len = cpu_to_le16(len+len2);
- p = cmd + sizeof(*hdr);
- memcpy(p, data, len);
- p += len;
- memcpy(p, data2, len2);
- send_a2mp(mgr->a2mp_sock, cmd, plen);
- kfree(cmd);
-}
-
-static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
- u8 code, u16 len, void *data)
-{
- send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
-}
-
-static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
-{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- struct a2mp_cmd_rej *rej;
- struct amp_ctx *ctx;
-
- BT_DBG("ident %d code %d", hdr->ident, hdr->code);
- rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
- if (skb->len < sizeof(*rej))
- return -EINVAL;
- BT_DBG("reason %d", le16_to_cpu(rej->reason));
- ctx = get_ctx_a2mp(mgr, hdr->ident);
- if (ctx)
- kill_ctx(ctx);
- skb_pull(skb, sizeof(*rej));
- return 0;
-}
-
-static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
- void *msg)
-{
- struct a2mp_cl clist[16];
- struct a2mp_cl *cl;
- struct hci_dev *hdev;
- int num_ctrls = 1, id;
-
- cl = clist;
- cl->id = 0;
- cl->type = 0;
- cl->status = 1;
-
- for (id = 0; id < 16; ++id) {
- hdev = hci_dev_get(id);
- if (hdev) {
- if ((hdev->amp_type != HCI_BREDR) &&
- test_bit(HCI_UP, &hdev->flags)) {
- (cl + num_ctrls)->id = hdev->id;
- (cl + num_ctrls)->type = hdev->amp_type;
- (cl + num_ctrls)->status = hdev->amp_status;
- ++num_ctrls;
- }
- hci_dev_put(hdev);
- }
- }
- send_a2mp_cmd2(mgr, ident, code, len, msg,
- num_ctrls*sizeof(*cl), clist);
-
- return 0;
-}
-
-static void send_a2mp_change_notify(void)
-{
- struct amp_mgr *mgr;
-
- list_for_each_entry(mgr, &_mgr_list, list) {
- if (mgr->discovered)
- send_a2mp_cl(mgr, next_ident(mgr),
- A2MP_CHANGE_NOTIFY, 0, NULL);
- }
-}
-
-static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
-{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- struct a2mp_discover_req *req;
- u16 *efm;
- struct a2mp_discover_rsp rsp;
-
- req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
- if (skb->len < sizeof(*req))
- return -EINVAL;
- efm = (u16 *) skb_pull(skb, sizeof(*req));
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
- le16_to_cpu(req->ext_feat));
-
- while (le16_to_cpu(req->ext_feat) & 0x8000) {
- if (skb->len < sizeof(*efm))
- return -EINVAL;
- req->ext_feat = *efm;
- BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
- efm = (u16 *) skb_pull(skb, sizeof(*efm));
- }
-
- rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- rsp.ext_feat = 0;
-
- mgr->discovered = 1;
-
- return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
- sizeof(rsp), &rsp);
-}
-
-static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
-{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- struct a2mp_cl *cl;
-
- cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
- while (skb->len >= sizeof(*cl)) {
- struct amp_ctrl *ctrl;
- if (cl->id != 0) {
- ctrl = get_create_ctrl(mgr, cl->id);
- if (ctrl != NULL) {
- ctrl->type = cl->type;
- ctrl->status = cl->status;
- }
- }
- cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
- }
-
- /* TODO find controllers in manager that were not on received */
- /* controller list and destroy them */
- send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
-
- return 0;
-}
-
-static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
-{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- u8 *data;
- int id;
- struct hci_dev *hdev;
- struct a2mp_getinfo_rsp rsp;
-
- data = (u8 *) skb_pull(skb, sizeof(*hdr));
- if (le16_to_cpu(hdr->len) < sizeof(*data))
- return -EINVAL;
- if (skb->len < sizeof(*data))
- return -EINVAL;
- id = *data;
- skb_pull(skb, sizeof(*data));
- rsp.id = id;
- rsp.status = 1;
-
- BT_DBG("id %d", id);
- hdev = hci_dev_get(id);
-
- if (hdev && hdev->amp_type != HCI_BREDR) {
- rsp.status = 0;
- rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
- rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
- rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
- rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
- rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
- }
-
- send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
-
- if (hdev)
- hci_dev_put(hdev);
-
- return 0;
-}
-
-static void create_physical(struct l2cap_conn *conn, struct sock *sk)
-{
- struct amp_mgr *mgr;
- struct amp_ctx *ctx = NULL;
-
- BT_DBG("conn %p", conn);
- mgr = get_create_amp_mgr(conn->hcon, NULL);
- if (!mgr)
- goto cp_finished;
BT_DBG("mgr %p", mgr);
- ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
- if (!ctx)
- goto cp_finished;
- ctx->sk = sk;
- sock_hold(sk);
- start_ctx(mgr, ctx);
- return;
-cp_finished:
- l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
-}
-
-static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
-{
- struct amp_mgr *mgr;
- struct hci_dev *hdev;
- struct hci_conn *conn;
- struct amp_ctx *aplctx = NULL;
- u8 remote_id = 0;
- int result = -EINVAL;
-
- BT_DBG("lcon %p", lcon);
- hdev = hci_dev_get(id);
- if (!hdev)
- goto ap_finished;
- BT_DBG("hdev %p", hdev);
- mgr = get_create_amp_mgr(lcon->hcon, NULL);
- if (!mgr)
- goto ap_finished;
- BT_DBG("mgr %p", mgr);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &mgr->l2cap_conn->hcon->dst);
- if (conn) {
- BT_DBG("conn %p", hdev);
- result = 0;
- remote_id = conn->dst_id;
- goto ap_finished;
+ mutex_lock(&mgr->amp_ctrls_lock);
+ list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
+ list_del(&ctrl->list);
+ amp_ctrl_put(ctrl);
}
- aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
- if (!aplctx)
- goto ap_finished;
- aplctx->sk = sk;
- sock_hold(sk);
- return;
-
-ap_finished:
- if (hdev)
- hci_dev_put(hdev);
- l2cap_amp_physical_complete(result, id, remote_id, sk);
+ mutex_unlock(&mgr->amp_ctrls_lock);
}
-static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
+struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- struct amp_ctx *ctx;
- struct a2mp_getampassoc_req *req;
+ struct amp_ctrl *ctrl;
- if (hdr->len < sizeof(*req))
- return -EINVAL;
- req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
- skb_pull(skb, sizeof(*req));
+ BT_DBG("mgr %p id %d", mgr, id);
- ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
- if (!ctx)
- return -ENOMEM;
- ctx->id = req->id;
- ctx->d.gaa.req_ident = hdr->ident;
- ctx->hdev = hci_dev_get(ctx->id);
- if (ctx->hdev)
- ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
- GFP_ATOMIC);
- start_ctx(mgr, ctx);
- return 0;
-}
-
-static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
-{
- struct sk_buff *skb = (struct sk_buff *) data;
- struct hci_cp_read_local_amp_assoc cp;
- struct hci_rp_read_local_amp_assoc *rp;
- struct a2mp_getampassoc_rsp rsp;
- u16 rem_len;
- u16 frag_len;
-
- rsp.status = 1;
- if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
- goto gaa_finished;
-
- switch (ctx->state) {
- case AMP_GAA_INIT:
- ctx->state = AMP_GAA_RLAA_COMPLETE;
- ctx->evt_type = AMP_HCI_CMD_CMPLT;
- ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
- ctx->d.gaa.len_so_far = 0;
- cp.phy_handle = 0;
- cp.len_so_far = 0;
- cp.max_len = ctx->hdev->amp_assoc_size;
- hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
- break;
-
- case AMP_GAA_RLAA_COMPLETE:
- if (skb->len < 4)
- goto gaa_finished;
- rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
- if (rp->status)
- goto gaa_finished;
- rem_len = le16_to_cpu(rp->rem_len);
- skb_pull(skb, 4);
- frag_len = skb->len;
-
- if (ctx->d.gaa.len_so_far + rem_len <=
- ctx->hdev->amp_assoc_size) {
- struct hci_cp_read_local_amp_assoc cp;
- u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
- memcpy(assoc, rp->frag, frag_len);
- ctx->d.gaa.len_so_far += rem_len;
- rem_len -= frag_len;
- if (rem_len == 0) {
- rsp.status = 0;
- goto gaa_finished;
- }
- /* more assoc data to read */
- cp.phy_handle = 0;
- cp.len_so_far = ctx->d.gaa.len_so_far;
- cp.max_len = ctx->hdev->amp_assoc_size;
- hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ mutex_lock(&mgr->amp_ctrls_lock);
+ list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
+ if (ctrl->id == id) {
+ amp_ctrl_get(ctrl);
+ mutex_unlock(&mgr->amp_ctrls_lock);
+ return ctrl;
}
- break;
-
- default:
- goto gaa_finished;
- break;
}
- return 0;
+ mutex_unlock(&mgr->amp_ctrls_lock);
-gaa_finished:
- rsp.id = ctx->id;
- send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
- sizeof(rsp), &rsp,
- ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
- kfree(ctx->d.gaa.assoc);
- if (ctx->hdev)
- hci_dev_put(ctx->hdev);
- return 1;
+ return NULL;
}
-struct hmac_sha256_result {
- struct completion completion;
- int err;
-};
-
-static void hmac_sha256_final(struct crypto_async_request *req, int err)
+/* Physical Link interface */
+static u8 __next_handle(struct amp_mgr *mgr)
{
- struct hmac_sha256_result *r = req->data;
- if (err == -EINPROGRESS)
- return;
- r->err = err;
- complete(&r->completion);
+ if (++mgr->handle == 0)
+ mgr->handle = 1;
+
+ return mgr->handle;
}
-int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
- u8 *output, u8 outlen)
+struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ u8 remote_id, bool out)
+{
+ bdaddr_t *dst = mgr->l2cap_conn->dst;
+ struct hci_conn *hcon;
+
+ hcon = hci_conn_add(hdev, AMP_LINK, 0, dst);
+ if (!hcon)
+ return NULL;
+
+ BT_DBG("hcon %p dst %pMR", hcon, dst);
+
+ hcon->state = BT_CONNECT;
+ hcon->attempt++;
+ hcon->handle = __next_handle(mgr);
+ hcon->remote_id = remote_id;
+ hcon->amp_mgr = amp_mgr_get(mgr);
+ hcon->out = out;
+
+ return hcon;
+}
+
+/* AMP crypto key generation interface */
+static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
{
int ret = 0;
- struct crypto_ahash *tfm;
- struct scatterlist sg;
- struct ahash_request *req;
- struct hmac_sha256_result tresult;
- void *hash_buff = NULL;
+ struct crypto_shash *tfm;
- unsigned char hash_result[64];
- int i;
+ if (!ksize)
+ return -EINVAL;
- memset(output, 0, outlen);
-
- init_completion(&tresult.completion);
-
- tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(tfm)) {
- BT_DBG("crypto_alloc_ahash failed");
- ret = PTR_ERR(tfm);
- goto err_tfm;
+ BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
+ return PTR_ERR(tfm);
}
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- BT_DBG("failed to allocate request for hmac(sha256)");
- ret = -ENOMEM;
- goto err_req;
+ ret = crypto_shash_setkey(tfm, key, ksize);
+ if (ret) {
+ BT_DBG("crypto_ahash_setkey failed: err %d", ret);
+ } else {
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
+
+ desc.shash.tfm = tfm;
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ret = crypto_shash_digest(&desc.shash, plaintext, psize,
+ output);
}
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- hmac_sha256_final, &tresult);
-
- hash_buff = kzalloc(psize, GFP_KERNEL);
- if (!hash_buff) {
- BT_DBG("failed to kzalloc hash_buff");
- ret = -ENOMEM;
- goto err_hash_buf;
- }
-
- memset(hash_result, 0, 64);
- memcpy(hash_buff, plaintext, psize);
- sg_init_one(&sg, hash_buff, psize);
-
- if (ksize) {
- crypto_ahash_clear_flags(tfm, ~0);
- ret = crypto_ahash_setkey(tfm, key, ksize);
-
- if (ret) {
- BT_DBG("crypto_ahash_setkey failed");
- goto err_setkey;
- }
- }
-
- ahash_request_set_crypt(req, &sg, hash_result, psize);
- ret = crypto_ahash_digest(req);
-
- BT_DBG("ret 0x%x", ret);
-
- switch (ret) {
- case 0:
- for (i = 0; i < outlen; i++)
- output[i] = hash_result[i];
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&tresult.completion);
- if (!ret && !tresult.err) {
- INIT_COMPLETION(tresult.completion);
- break;
- } else {
- BT_DBG("wait_for_completion_interruptible failed");
- if (!ret)
- ret = tresult.err;
- goto out;
- }
- default:
- goto out;
- }
-
-out:
-err_setkey:
- kfree(hash_buff);
-err_hash_buf:
- ahash_request_free(req);
-err_req:
- crypto_free_ahash(tfm);
-err_tfm:
+ crypto_free_shash(tfm);
return ret;
}
-static void show_key(u8 *k)
+int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
{
- int i = 0;
- for (i = 0; i < 32; i += 8)
- BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
- *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
- *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
-}
-
-static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
-{
- u8 bt2_key[32];
- u8 gamp_key[32];
- u8 b802_key[32];
- int result;
+ struct hci_dev *hdev = conn->hdev;
+ struct link_key *key;
+ u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
+ u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
+ int err;
if (!hci_conn_check_link_mode(conn))
return -EACCES;
- BT_DBG("key_type %d", conn->key_type);
- if (conn->key_type < 3)
+ BT_DBG("conn %p key_type %d", conn, conn->key_type);
+
+ /* Legacy key */
+ if (conn->key_type < 3) {
+ BT_ERR("Legacy key type %d", conn->key_type);
return -EACCES;
+ }
*type = conn->key_type;
- *len = 32;
- memcpy(&bt2_key[0], conn->link_key, 16);
- memcpy(&bt2_key[16], conn->link_key, 16);
- result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
- if (result)
- goto ps_finished;
+ *len = HCI_AMP_LINK_KEY_SIZE;
- if (conn->key_type == 3) {
- BT_DBG("gamp_key");
- show_key(gamp_key);
- memcpy(data, gamp_key, 32);
- goto ps_finished;
+ key = hci_find_link_key(hdev, &conn->dst);
+ if (!key) {
+ BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
+ return -EACCES;
}
- result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
- if (result)
- goto ps_finished;
+ /* BR/EDR Link Key concatenated together with itself */
+ memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
+ memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
- BT_DBG("802b_key");
- show_key(b802_key);
- memcpy(data, b802_key, 32);
-
-ps_finished:
- return result;
-}
-
-static u8 amp_next_handle;
-static inline u8 physlink_handle(struct hci_dev *hdev)
-{
- /* TODO amp_next_handle should be part of hci_dev */
- if (amp_next_handle == 0)
- amp_next_handle = 1;
- return amp_next_handle++;
-}
-
-/* Start an Accept Physical Link sequence */
-static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
-{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- struct amp_ctx *ctx = NULL;
- struct a2mp_createphyslink_req *req;
-
- if (hdr->len < sizeof(*req))
- return -EINVAL;
- req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
- skb_pull(skb, sizeof(*req));
- BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
-
- /* initialize the context */
- ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
- if (!ctx)
- return -ENOMEM;
- ctx->d.apl.req_ident = hdr->ident;
- ctx->d.apl.remote_id = req->local_id;
- ctx->id = req->remote_id;
-
- /* add the supplied remote assoc to the context */
- ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
- if (ctx->d.apl.remote_assoc)
- memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
- ctx->d.apl.len_so_far = 0;
- ctx->d.apl.rem_len = skb->len;
- skb_pull(skb, skb->len);
- ctx->hdev = hci_dev_get(ctx->id);
- start_ctx(mgr, ctx);
- return 0;
-}
-
-static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
-{
- struct sk_buff *skb = data;
- struct hci_cp_accept_phys_link acp;
- struct hci_cp_write_remote_amp_assoc wcp;
- struct hci_rp_write_remote_amp_assoc *wrp;
- struct hci_ev_cmd_status *cs = data;
- struct hci_ev_phys_link_complete *ev;
- struct a2mp_createphyslink_rsp rsp;
- struct amp_ctx *cplctx;
- struct amp_ctx *aplctx;
- u16 frag_len;
- struct hci_conn *conn;
- int result;
-
- BT_DBG("state %d", ctx->state);
- result = -EINVAL;
- rsp.status = 1; /* Invalid Controller ID */
- if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
- goto apl_finished;
- if (evt_type == AMP_KILLED) {
- result = -EAGAIN;
- rsp.status = 4; /* Disconnect request received */
- goto apl_finished;
- }
- if (!ctx->d.apl.remote_assoc) {
- result = -ENOMEM;
- rsp.status = 2; /* Unable to Start */
- goto apl_finished;
+ /* Derive Generic AMP Link Key (gamp) */
+ err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
+ if (err) {
+ BT_ERR("Could not derive Generic AMP Key: err %d", err);
+ return err;
}
- switch (ctx->state) {
- case AMP_APL_INIT:
- BT_DBG("local_id %d, remote_id %d",
- ctx->id, ctx->d.apl.remote_id);
- conn = hci_conn_hash_lookup_id(ctx->hdev,
- &ctx->mgr->l2cap_conn->hcon->dst,
- ctx->d.apl.remote_id);
- if (conn) {
- result = -EEXIST;
- rsp.status = 5; /* Already Exists */
- goto apl_finished;
- }
-
- aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
- if ((aplctx) &&
- (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
- BT_DBG("deferred to %p", aplctx);
- aplctx->deferred = ctx;
- break;
- }
-
- cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
- if ((cplctx) &&
- (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
- struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
- BT_DBG("local %s remote %s",
- batostr(&bcon->hdev->bdaddr),
- batostr(&bcon->dst));
- if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
- (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
- BT_DBG("COLLISION LOSER");
- cplctx->deferred = ctx;
- cancel_ctx(cplctx);
- break;
- } else {
- BT_DBG("COLLISION WINNER");
- result = -EISCONN;
- rsp.status = 3; /* Collision */
- goto apl_finished;
- }
- }
-
- result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
- &acp.key_len, &acp.type);
- if (result) {
- BT_DBG("SECURITY");
- rsp.status = 6; /* Security Violation */
- goto apl_finished;
- }
-
- ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
- ctx->state = AMP_APL_APL_STATUS;
- ctx->evt_type = AMP_HCI_CMD_STATUS;
- ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
- acp.phy_handle = ctx->d.apl.phy_handle;
- hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
- break;
-
- case AMP_APL_APL_STATUS:
- if (cs->status != 0)
- goto apl_finished;
- /* PAL will accept link, send a2mp response */
- rsp.local_id = ctx->id;
- rsp.remote_id = ctx->d.apl.remote_id;
- rsp.status = 0;
- send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
- A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
-
- /* send the first assoc fragment */
- wcp.phy_handle = ctx->d.apl.phy_handle;
- wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
- wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
- frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
- memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
- ctx->state = AMP_APL_WRA_COMPLETE;
- ctx->evt_type = AMP_HCI_CMD_CMPLT;
- ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
- hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
- break;
-
- case AMP_APL_WRA_COMPLETE:
- /* received write remote amp assoc command complete event */
- wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
- if (wrp->status != 0)
- goto apl_finished;
- if (wrp->phy_handle != ctx->d.apl.phy_handle)
- goto apl_finished;
- /* update progress */
- frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
- ctx->d.apl.len_so_far += frag_len;
- ctx->d.apl.rem_len -= frag_len;
- if (ctx->d.apl.rem_len > 0) {
- u8 *assoc;
- /* another assoc fragment to send */
- wcp.phy_handle = ctx->d.apl.phy_handle;
- wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
- wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
- frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
- assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
- memcpy(wcp.frag, assoc, frag_len);
- hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
- break;
- }
- /* wait for physical link complete event */
- ctx->state = AMP_APL_PL_COMPLETE;
- ctx->evt_type = AMP_HCI_EVENT;
- ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
- break;
-
- case AMP_APL_PL_COMPLETE:
- /* physical link complete event received */
- if (skb->len < sizeof(*ev))
- goto apl_finished;
- ev = (struct hci_ev_phys_link_complete *) skb->data;
- if (ev->phy_handle != ctx->d.apl.phy_handle)
- break;
- if (ev->status != 0)
- goto apl_finished;
- conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
- if (!conn)
- goto apl_finished;
- result = 0;
- BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
- conn->dst_id = ctx->d.apl.remote_id;
- bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
- goto apl_finished;
- break;
-
- default:
- goto apl_finished;
- break;
+ if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
+ BT_DBG("Use Generic AMP Key (gamp)");
+ memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
+ return err;
}
- return 0;
-apl_finished:
- if (ctx->sk)
- l2cap_amp_physical_complete(result, ctx->id,
- ctx->d.apl.remote_id, ctx->sk);
- if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
- rsp.local_id = ctx->id;
- rsp.remote_id = ctx->d.apl.remote_id;
- send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
- A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
- }
- kfree(ctx->d.apl.remote_assoc);
- if (ctx->sk)
- sock_put(ctx->sk);
- if (ctx->hdev)
- hci_dev_put(ctx->hdev);
- return 1;
+ /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
+ return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
}
-static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
+void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
{
- struct hci_cp_disconn_phys_link dcp;
+ struct hci_cp_read_local_amp_assoc cp;
+ struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- ctx->state = AMP_CPL_PL_CANCEL;
- ctx->evt_type = AMP_HCI_EVENT;
- ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
- dcp.phy_handle = ctx->d.cpl.phy_handle;
- dcp.reason = reason;
- hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
+ BT_DBG("%s handle %d", hdev->name, phy_handle);
+
+ cp.phy_handle = phy_handle;
+ cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+ cp.len_so_far = cpu_to_le16(loc_assoc->offset);
+
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
}
-static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
{
+ struct hci_cp_read_local_amp_assoc cp;
+
+ memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
+ memset(&cp, 0, sizeof(cp));
+
+ cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+
+ set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_read_local_amp_assoc cp;
+ struct amp_mgr *mgr = hcon->amp_mgr;
+
+ cp.phy_handle = hcon->handle;
+ cp.len_so_far = cpu_to_le16(0);
+ cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+
+ set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
+
+ /* Read Local AMP Assoc final link information data */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+/* Write AMP Assoc data fragments, returns true with last fragment written*/
+static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_write_remote_amp_assoc *cp;
+ struct amp_mgr *mgr = hcon->amp_mgr;
struct amp_ctrl *ctrl;
- struct sk_buff *skb = data;
- struct a2mp_cmd_hdr *hdr;
- struct hci_ev_cmd_status *cs = data;
- struct amp_ctx *cplctx;
- struct a2mp_discover_req dreq;
- struct a2mp_discover_rsp *drsp;
- u16 *efm;
- struct a2mp_getinfo_req greq;
- struct a2mp_getinfo_rsp *grsp;
- struct a2mp_cl *cl;
- struct a2mp_getampassoc_req areq;
- struct a2mp_getampassoc_rsp *arsp;
- struct hci_cp_create_phys_link cp;
- struct hci_cp_write_remote_amp_assoc wcp;
- struct hci_rp_write_remote_amp_assoc *wrp;
- struct hci_ev_channel_selected *cev;
- struct hci_cp_read_local_amp_assoc rcp;
- struct hci_rp_read_local_amp_assoc *rrp;
- struct a2mp_createphyslink_req creq;
- struct a2mp_createphyslink_rsp *crsp;
- struct hci_ev_phys_link_complete *pev;
- struct hci_ev_disconn_phys_link_complete *dev;
- u8 *assoc, *rassoc, *lassoc;
- u16 frag_len;
- u16 rem_len;
- int result = -EAGAIN;
- struct hci_conn *conn;
+ u16 frag_len, len;
- BT_DBG("state %d", ctx->state);
- if (evt_type == AMP_KILLED)
- goto cpl_finished;
+ ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
+ if (!ctrl)
+ return false;
- if (evt_type == AMP_CANCEL) {
- if ((ctx->state < AMP_CPL_CPL_STATUS) ||
- ((ctx->state == AMP_CPL_PL_COMPLETE) &&
- !(ctx->evt_type & AMP_HCI_EVENT)))
- goto cpl_finished;
+ if (!ctrl->assoc_rem_len) {
+ BT_DBG("all fragments are written");
+ ctrl->assoc_rem_len = ctrl->assoc_len;
+ ctrl->assoc_len_so_far = 0;
- cancel_cpl_ctx(ctx, 0x16);
- return 0;
+ amp_ctrl_put(ctrl);
+ return true;
}
- switch (ctx->state) {
- case AMP_CPL_INIT:
- cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
- if (cplctx) {
- BT_DBG("deferred to %p", cplctx);
- cplctx->deferred = ctx;
- break;
- }
- ctx->state = AMP_CPL_DISC_RSP;
- ctx->evt_type = AMP_A2MP_RSP;
- ctx->rsp_ident = next_ident(ctx->mgr);
- dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- dreq.ext_feat = 0;
- send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
- sizeof(dreq), &dreq);
- break;
+ frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
+ len = frag_len + sizeof(*cp);
- case AMP_CPL_DISC_RSP:
- drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
- if (skb->len < (sizeof(*drsp))) {
- result = -EINVAL;
- goto cpl_finished;
- }
-
- efm = (u16 *) skb_pull(skb, sizeof(*drsp));
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
- le16_to_cpu(drsp->ext_feat));
-
- while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
- if (skb->len < sizeof(*efm)) {
- result = -EINVAL;
- goto cpl_finished;
- }
- drsp->ext_feat = *efm;
- BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
- efm = (u16 *) skb_pull(skb, sizeof(*efm));
- }
- cl = (struct a2mp_cl *) efm;
-
- /* find the first remote and local controller with the
- * same type
- */
- greq.id = 0;
- result = -ENODEV;
- while (skb->len >= sizeof(*cl)) {
- if ((cl->id != 0) && (greq.id == 0)) {
- struct hci_dev *hdev;
- hdev = hci_dev_get_type(cl->type);
- if (hdev) {
- struct hci_conn *conn;
- ctx->hdev = hdev;
- ctx->id = hdev->id;
- ctx->d.cpl.remote_id = cl->id;
- conn = hci_conn_hash_lookup_ba(hdev,
- ACL_LINK,
- &ctx->mgr->l2cap_conn->hcon->dst);
- if (conn) {
- BT_DBG("PL_COMPLETE exists %x",
- (int) conn->handle);
- result = 0;
- }
- ctrl = get_create_ctrl(ctx->mgr,
- cl->id);
- if (ctrl) {
- ctrl->type = cl->type;
- ctrl->status = cl->status;
- }
- greq.id = cl->id;
- }
- }
- cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
- }
- if ((!greq.id) || (!result))
- goto cpl_finished;
- ctx->state = AMP_CPL_GETINFO_RSP;
- ctx->evt_type = AMP_A2MP_RSP;
- ctx->rsp_ident = next_ident(ctx->mgr);
- send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
- sizeof(greq), &greq);
- break;
-
- case AMP_CPL_GETINFO_RSP:
- if (skb->len < sizeof(*grsp))
- goto cpl_finished;
- grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
- skb_pull(skb, sizeof(*grsp));
- if (grsp->status)
- goto cpl_finished;
- if (grsp->id != ctx->d.cpl.remote_id)
- goto cpl_finished;
- ctrl = get_ctrl(ctx->mgr, grsp->id);
- if (!ctrl)
- goto cpl_finished;
- ctrl->status = grsp->status;
- ctrl->total_bw = le32_to_cpu(grsp->total_bw);
- ctrl->max_bw = le32_to_cpu(grsp->max_bw);
- ctrl->min_latency = le32_to_cpu(grsp->min_latency);
- ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
- ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
-
- ctx->d.cpl.max_len = ctrl->max_assoc_size;
-
- /* setup up GAA request */
- areq.id = ctx->d.cpl.remote_id;
-
- /* advance context state */
- ctx->state = AMP_CPL_GAA_RSP;
- ctx->evt_type = AMP_A2MP_RSP;
- ctx->rsp_ident = next_ident(ctx->mgr);
- send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
- sizeof(areq), &areq);
- break;
-
- case AMP_CPL_GAA_RSP:
- if (skb->len < sizeof(*arsp))
- goto cpl_finished;
- hdr = (void *) skb->data;
- arsp = (void *) skb_pull(skb, sizeof(*hdr));
- if (arsp->status != 0)
- goto cpl_finished;
-
- /* store away remote assoc */
- assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
- ctx->d.cpl.len_so_far = 0;
- ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
- skb_pull(skb, ctx->d.cpl.rem_len);
- rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
- if (!rassoc)
- goto cpl_finished;
- memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
- ctx->d.cpl.remote_assoc = rassoc;
-
- /* set up CPL command */
- ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
- cp.phy_handle = ctx->d.cpl.phy_handle;
- if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
- &cp.key_len, &cp.type)) {
- result = -EPERM;
- goto cpl_finished;
- }
-
- /* advance context state */
- ctx->state = AMP_CPL_CPL_STATUS;
- ctx->evt_type = AMP_HCI_CMD_STATUS;
- ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
- hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
- break;
-
- case AMP_CPL_CPL_STATUS:
- /* received create physical link command status */
- if (cs->status != 0)
- goto cpl_finished;
- /* send the first assoc fragment */
- wcp.phy_handle = ctx->d.cpl.phy_handle;
- wcp.len_so_far = ctx->d.cpl.len_so_far;
- wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
- frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
- memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
- ctx->state = AMP_CPL_WRA_COMPLETE;
- ctx->evt_type = AMP_HCI_CMD_CMPLT;
- ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
- hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
- break;
-
- case AMP_CPL_WRA_COMPLETE:
- /* received write remote amp assoc command complete event */
- if (skb->len < sizeof(*wrp))
- goto cpl_finished;
- wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
- if (wrp->status != 0)
- goto cpl_finished;
- if (wrp->phy_handle != ctx->d.cpl.phy_handle)
- goto cpl_finished;
-
- /* update progress */
- frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
- ctx->d.cpl.len_so_far += frag_len;
- ctx->d.cpl.rem_len -= frag_len;
- if (ctx->d.cpl.rem_len > 0) {
- /* another assoc fragment to send */
- wcp.phy_handle = ctx->d.cpl.phy_handle;
- wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
- wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
- frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
- memcpy(wcp.frag,
- ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
- frag_len);
- hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
- break;
- }
- /* now wait for channel selected event */
- ctx->state = AMP_CPL_CHANNEL_SELECT;
- ctx->evt_type = AMP_HCI_EVENT;
- ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
- break;
-
- case AMP_CPL_CHANNEL_SELECT:
- /* received channel selection event */
- if (skb->len < sizeof(*cev))
- goto cpl_finished;
- cev = (void *) skb->data;
-/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
- Create Physical Link collision scenario
- if (cev->phy_handle != ctx->d.cpl.phy_handle)
- goto cpl_finished;
-*/
-
- /* request the first local assoc fragment */
- rcp.phy_handle = ctx->d.cpl.phy_handle;
- rcp.len_so_far = 0;
- rcp.max_len = ctx->d.cpl.max_len;
- lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
- if (!lassoc)
- goto cpl_finished;
- ctx->d.cpl.local_assoc = lassoc;
- ctx->d.cpl.len_so_far = 0;
- ctx->state = AMP_CPL_RLA_COMPLETE;
- ctx->evt_type = AMP_HCI_CMD_CMPLT;
- ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
- hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
- break;
-
- case AMP_CPL_RLA_COMPLETE:
- /* received read local amp assoc command complete event */
- if (skb->len < 4)
- goto cpl_finished;
- rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
- if (rrp->status)
- goto cpl_finished;
- if (rrp->phy_handle != ctx->d.cpl.phy_handle)
- goto cpl_finished;
- rem_len = le16_to_cpu(rrp->rem_len);
- skb_pull(skb, 4);
- frag_len = skb->len;
-
- if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
- goto cpl_finished;
-
- /* save this fragment in context */
- lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
- memcpy(lassoc, rrp->frag, frag_len);
- ctx->d.cpl.len_so_far += frag_len;
- rem_len -= frag_len;
- if (rem_len > 0) {
- /* request another local assoc fragment */
- rcp.phy_handle = ctx->d.cpl.phy_handle;
- rcp.len_so_far = ctx->d.cpl.len_so_far;
- rcp.max_len = ctx->d.cpl.max_len;
- hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
- } else {
- creq.local_id = ctx->id;
- creq.remote_id = ctx->d.cpl.remote_id;
- /* wait for A2MP rsp AND phys link complete event */
- ctx->state = AMP_CPL_PL_COMPLETE;
- ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
- ctx->rsp_ident = next_ident(ctx->mgr);
- ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
- send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
- A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
- ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
- }
- break;
-
- case AMP_CPL_PL_COMPLETE:
- if (evt_type == AMP_A2MP_RSP) {
- /* create physical link response received */
- ctx->evt_type &= ~AMP_A2MP_RSP;
- if (skb->len < sizeof(*crsp))
- goto cpl_finished;
- crsp = (void *) skb_pull(skb, sizeof(*hdr));
- if ((crsp->local_id != ctx->d.cpl.remote_id) ||
- (crsp->remote_id != ctx->id) ||
- (crsp->status != 0)) {
- cancel_cpl_ctx(ctx, 0x13);
- break;
- }
-
- /* notify Qualcomm PAL */
- if (ctx->hdev->manufacturer == 0x001d)
- hci_send_cmd(ctx->hdev,
- hci_opcode_pack(0x3f, 0x00), 0, NULL);
- }
- if (evt_type == AMP_HCI_EVENT) {
- ctx->evt_type &= ~AMP_HCI_EVENT;
- /* physical link complete event received */
- if (skb->len < sizeof(*pev))
- goto cpl_finished;
- pev = (void *) skb->data;
- if (pev->phy_handle != ctx->d.cpl.phy_handle)
- break;
- if (pev->status != 0)
- goto cpl_finished;
- }
- if (ctx->evt_type)
- break;
- conn = hci_conn_hash_lookup_handle(ctx->hdev,
- ctx->d.cpl.phy_handle);
- if (!conn)
- goto cpl_finished;
- result = 0;
- BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
- bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
- conn->dst_id = ctx->d.cpl.remote_id;
- conn->out = 1;
- goto cpl_finished;
- break;
-
- case AMP_CPL_PL_CANCEL:
- dev = (void *) skb->data;
- BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
- result = -EISCONN;
- goto cpl_finished;
- break;
-
- default:
- goto cpl_finished;
- break;
+ cp = kzalloc(len, GFP_KERNEL);
+ if (!cp) {
+ amp_ctrl_put(ctrl);
+ return false;
}
- return 0;
-cpl_finished:
- l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
- ctx->sk);
- if (ctx->sk)
- sock_put(ctx->sk);
- if (ctx->hdev)
- hci_dev_put(ctx->hdev);
- kfree(ctx->d.cpl.remote_assoc);
- kfree(ctx->d.cpl.local_assoc);
- return 1;
+ BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
+ hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
+
+ cp->phy_handle = hcon->handle;
+ cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
+ cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
+ memcpy(cp->frag, ctrl->assoc, frag_len);
+
+ ctrl->assoc_len_so_far += frag_len;
+ ctrl->assoc_rem_len -= frag_len;
+
+ amp_ctrl_put(ctrl);
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+
+ kfree(cp);
+
+ return false;
}
-static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
+void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
{
- struct a2mp_cmd_hdr *hdr = (void *) skb->data;
- struct a2mp_disconnphyslink_req *req;
- struct a2mp_disconnphyslink_rsp rsp;
+ struct hci_conn *hcon;
+
+ BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon)
+ return;
+
+ /* Send A2MP create phylink rsp when all fragments are written */
+ if (amp_write_rem_assoc_frag(hdev, hcon))
+ a2mp_send_create_phy_link_rsp(hdev, 0);
+}
+
+void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
+{
+ struct hci_conn *hcon;
+
+ BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon)
+ return;
+
+ BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
+
+ amp_write_rem_assoc_frag(hdev, hcon);
+}
+
+void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_create_phy_link cp;
+
+ cp.phy_handle = hcon->handle;
+
+ BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+ hcon->handle);
+
+ if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+ &cp.key_type)) {
+ BT_DBG("Cannot create link key");
+ return;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+}
+
+void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_accept_phy_link cp;
+
+ cp.phy_handle = hcon->handle;
+
+ BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+ hcon->handle);
+
+ if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+ &cp.key_type)) {
+ BT_DBG("Cannot create link key");
+ return;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+}
+
+void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
+{
+ struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
+ struct amp_mgr *mgr = hs_hcon->amp_mgr;
+ struct l2cap_chan *bredr_chan;
+
+ BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
+
+ if (!bredr_hdev || !mgr || !mgr->bredr_chan)
+ return;
+
+ bredr_chan = mgr->bredr_chan;
+
+ l2cap_chan_lock(bredr_chan);
+
+ set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
+ bredr_chan->remote_amp_id = hs_hcon->remote_id;
+ bredr_chan->local_amp_id = hs_hcon->hdev->id;
+ bredr_chan->hs_hcon = hs_hcon;
+ bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
+
+ __l2cap_physical_cfm(bredr_chan, 0);
+
+ l2cap_chan_unlock(bredr_chan);
+
+ hci_dev_put(bredr_hdev);
+}
+
+void amp_create_logical_link(struct l2cap_chan *chan)
+{
+ struct hci_conn *hs_hcon = chan->hs_hcon;
+ struct hci_cp_create_accept_logical_link cp;
struct hci_dev *hdev;
- struct hci_conn *conn;
- struct amp_ctx *aplctx;
- BT_DBG("mgr %p skb %p", mgr, skb);
- if (hdr->len < sizeof(*req))
- return -EINVAL;
- req = (void *) skb_pull(skb, sizeof(*hdr));
- skb_pull(skb, sizeof(*req));
+ BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
- rsp.status = 0;
- BT_DBG("local_id %d remote_id %d",
- (int) rsp.local_id, (int) rsp.remote_id);
- hdev = hci_dev_get(rsp.local_id);
- if (!hdev) {
- rsp.status = 1; /* Invalid Controller ID */
- goto dpl_finished;
- }
- BT_DBG("hdev %p", hdev);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &mgr->l2cap_conn->hcon->dst);
- if (!conn) {
- aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
- if (aplctx) {
- kill_ctx(aplctx);
- rsp.status = 0;
- goto dpl_finished;
- }
- rsp.status = 2; /* No Physical Link exists */
- goto dpl_finished;
- }
- BT_DBG("conn %p", conn);
- hci_disconnect(conn, 0x13);
+ if (!hs_hcon)
+ return;
-dpl_finished:
- send_a2mp_cmd(mgr, hdr->ident,
- A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
- if (hdev)
- hci_dev_put(hdev);
- return 0;
-}
+ hdev = hci_dev_hold(chan->hs_hcon->hdev);
+ if (!hdev)
+ return;
-static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
-{
- struct amp_mgr *mgr = ctx->mgr;
- u8 finished = 0;
+ cp.phy_handle = hs_hcon->handle;
- if (!mgr->connected)
- return 0;
+ cp.tx_flow_spec.id = chan->local_id;
+ cp.tx_flow_spec.stype = chan->local_stype;
+ cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
+ cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
+ cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
- switch (ctx->type) {
- case AMP_GETAMPASSOC:
- finished = getampassoc_handler(ctx, evt_type, data);
- break;
- case AMP_CREATEPHYSLINK:
- finished = createphyslink_handler(ctx, evt_type, data);
- break;
- case AMP_ACCEPTPHYSLINK:
- finished = acceptphyslink_handler(ctx, evt_type, data);
- break;
- }
+ cp.rx_flow_spec.id = chan->remote_id;
+ cp.rx_flow_spec.stype = chan->remote_stype;
+ cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
+ cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
+ cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
+ cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
- if (!finished)
- mod_timer(&(ctx->timer), jiffies +
- msecs_to_jiffies(A2MP_RSP_TIMEOUT));
+ if (hs_hcon->out)
+ hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
+ &cp);
else
- destroy_ctx(ctx);
- return finished;
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
+ &cp);
+
+ hci_dev_put(hdev);
}
-static int cancel_ctx(struct amp_ctx *ctx)
+void amp_disconnect_logical_link(struct hci_chan *hchan)
{
- return execute_ctx(ctx, AMP_CANCEL, 0);
-}
+ struct hci_conn *hcon = hchan->conn;
+ struct hci_cp_disconn_logical_link cp;
-static int kill_ctx(struct amp_ctx *ctx)
-{
- return execute_ctx(ctx, AMP_KILLED, 0);
-}
-
-static void ctx_timeout_worker(struct work_struct *w)
-{
- struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
- struct amp_ctx *ctx = work->ctx;
- kill_ctx(ctx);
- kfree(work);
-}
-
-static void ctx_timeout(unsigned long data)
-{
- struct amp_ctx *ctx = (struct amp_ctx *) data;
- struct amp_work_ctx_timeout *work;
-
- BT_DBG("ctx %p", ctx);
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
- work->ctx = ctx;
- if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
- kfree(work);
- }
-}
-
-static void launch_ctx(struct amp_mgr *mgr)
-{
- struct amp_ctx *ctx = NULL;
-
- BT_DBG("mgr %p", mgr);
- read_lock(&mgr->ctx_list_lock);
- if (!list_empty(&mgr->ctx_list))
- ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
- read_unlock(&mgr->ctx_list_lock);
- BT_DBG("ctx %p", ctx);
- if (ctx)
- execute_ctx(ctx, AMP_INIT, NULL);
-}
-
-static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
-{
- struct amp_ctx *ctx;
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- u16 hdr_len = le16_to_cpu(hdr->len);
-
- /* find context waiting for A2MP rsp with this rsp's identifier */
- BT_DBG("ident %d code %d", hdr->ident, hdr->code);
- ctx = get_ctx_a2mp(mgr, hdr->ident);
- if (ctx) {
- execute_ctx(ctx, AMP_A2MP_RSP, skb);
- } else {
- BT_DBG("context not found");
- skb_pull(skb, sizeof(*hdr));
- if (hdr_len > skb->len)
- hdr_len = skb->len;
- skb_pull(skb, hdr_len);
- }
- return 0;
-}
-
-/* L2CAP-A2MP interface */
-
-static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
-{
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- int len;
- int err = 0;
- struct amp_mgr *mgr;
-
- mgr = get_amp_mgr_sk(sk);
- if (!mgr)
- goto a2mp_finished;
-
- len = skb->len;
- while (len >= sizeof(*hdr)) {
- struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
- u16 clen = le16_to_cpu(hdr->len);
-
- BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
- if (clen > len || !hdr->ident) {
- err = -EINVAL;
- break;
- }
- switch (hdr->code) {
- case A2MP_COMMAND_REJ:
- command_rej(mgr, skb);
- break;
- case A2MP_DISCOVER_REQ:
- err = discover_req(mgr, skb);
- break;
- case A2MP_CHANGE_NOTIFY:
- err = change_notify(mgr, skb);
- break;
- case A2MP_GETINFO_REQ:
- err = getinfo_req(mgr, skb);
- break;
- case A2MP_GETAMPASSOC_REQ:
- err = getampassoc_req(mgr, skb);
- break;
- case A2MP_CREATEPHYSLINK_REQ:
- err = createphyslink_req(mgr, skb);
- break;
- case A2MP_DISCONNPHYSLINK_REQ:
- err = disconnphyslink_req(mgr, skb);
- break;
- case A2MP_CHANGE_RSP:
- case A2MP_DISCOVER_RSP:
- case A2MP_GETINFO_RSP:
- case A2MP_GETAMPASSOC_RSP:
- case A2MP_CREATEPHYSLINK_RSP:
- case A2MP_DISCONNPHYSLINK_RSP:
- err = a2mp_rsp(mgr, skb);
- break;
- default:
- BT_ERR("Unknown A2MP signaling command 0x%2.2x",
- hdr->code);
- skb_pull(skb, sizeof(*hdr));
- err = -EINVAL;
- break;
- }
- len = skb->len;
- }
-
-a2mp_finished:
- if (err && mgr) {
- struct a2mp_cmd_rej rej;
- rej.reason = cpu_to_le16(0);
- send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
- sizeof(rej), &rej);
- }
-}
-
-/* L2CAP-A2MP interface */
-
-static int send_a2mp(struct socket *sock, u8 *data, int len)
-{
- struct kvec iv = { data, len };
- struct msghdr msg;
-
- memset(&msg, 0, sizeof(msg));
-
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
-}
-
-static void data_ready_worker(struct work_struct *w)
-{
- struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
- struct sock *sk = work->sk;
- struct sk_buff *skb;
-
- /* skb_dequeue() is thread-safe */
- while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
- a2mp_receive(sk, skb);
- kfree_skb(skb);
- }
- sock_put(work->sk);
- kfree(work);
-}
-
-static void data_ready(struct sock *sk, int bytes)
-{
- struct amp_work_data_ready *work;
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, data_ready_worker);
- sock_hold(sk);
- work->sk = sk;
- work->bytes = bytes;
- if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
- kfree(work);
- sock_put(sk);
- }
- }
-}
-
-static void state_change_worker(struct work_struct *w)
-{
- struct amp_work_state_change *work = (struct amp_work_state_change *) w;
- struct amp_mgr *mgr;
- switch (work->sk->sk_state) {
- case BT_CONNECTED:
- /* socket is up */
- BT_DBG("CONNECTED");
- mgr = get_amp_mgr_sk(work->sk);
- if (mgr) {
- mgr->connected = 1;
- if (mgr->skb) {
- l2cap_recv_deferred_frame(work->sk, mgr->skb);
- mgr->skb = NULL;
- }
- launch_ctx(mgr);
- }
- break;
-
- case BT_CLOSED:
- /* connection is gone */
- BT_DBG("CLOSED");
- mgr = get_amp_mgr_sk(work->sk);
- if (mgr) {
- if (!sock_flag(work->sk, SOCK_DEAD))
- sock_release(mgr->a2mp_sock);
- mgr->a2mp_sock = NULL;
- remove_amp_mgr(mgr);
- }
- break;
-
- default:
- /* something else happened */
- break;
- }
- sock_put(work->sk);
- kfree(work);
-}
-
-static void state_change(struct sock *sk)
-{
- struct amp_work_state_change *work;
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, state_change_worker);
- sock_hold(sk);
- work->sk = sk;
- if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
- kfree(work);
- sock_put(sk);
- }
- }
-}
-
-static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
-{
- int err;
- struct socket *sock;
- struct sockaddr_l2 addr;
- struct sock *sk;
- struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
- L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
- L2CAP_MODE_ERTM, 1, 0xFF, 1};
-
-
- err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
- BTPROTO_L2CAP, &sock);
-
- if (err) {
- BT_ERR("sock_create_kern failed %d", err);
- return NULL;
- }
-
- sk = sock->sk;
- sk->sk_data_ready = data_ready;
- sk->sk_state_change = state_change;
-
- memset(&addr, 0, sizeof(addr));
- bacpy(&addr.l2_bdaddr, src);
- addr.l2_family = AF_BLUETOOTH;
- addr.l2_cid = L2CAP_CID_A2MP;
- err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
- if (err) {
- BT_ERR("kernel_bind failed %d", err);
- sock_release(sock);
- return NULL;
- }
-
- l2cap_fixed_channel_config(sk, &opts);
-
- memset(&addr, 0, sizeof(addr));
- bacpy(&addr.l2_bdaddr, dst);
- addr.l2_family = AF_BLUETOOTH;
- addr.l2_cid = L2CAP_CID_A2MP;
- err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
- O_NONBLOCK);
- if ((err == 0) || (err == -EINPROGRESS))
- return sock;
- else {
- BT_ERR("kernel_connect failed %d", err);
- sock_release(sock);
- return NULL;
- }
-}
-
-static void conn_ind_worker(struct work_struct *w)
-{
- struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
- struct hci_conn *hcon = work->hcon;
- struct sk_buff *skb = work->skb;
- struct amp_mgr *mgr;
-
- mgr = get_create_amp_mgr(hcon, skb);
- BT_DBG("mgr %p", mgr);
- hci_conn_put(hcon);
- kfree(work);
-}
-
-static void create_physical_worker(struct work_struct *w)
-{
- struct amp_work_create_physical *work =
- (struct amp_work_create_physical *) w;
-
- create_physical(work->conn, work->sk);
- sock_put(work->sk);
- kfree(work);
-}
-
-static void accept_physical_worker(struct work_struct *w)
-{
- struct amp_work_accept_physical *work =
- (struct amp_work_accept_physical *) w;
-
- accept_physical(work->conn, work->id, work->sk);
- sock_put(work->sk);
- kfree(work);
-}
-
-/* L2CAP Fixed Channel interface */
-
-void amp_conn_ind(struct hci_conn *hcon, struct sk_buff *skb)
-{
- struct amp_work_conn_ind *work;
- BT_DBG("hcon %p, skb %p", hcon, skb);
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, conn_ind_worker);
- hci_conn_hold(hcon);
- work->hcon = hcon;
- work->skb = skb;
- if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
- hci_conn_put(hcon);
- kfree(work);
- }
- }
-}
-
-/* L2CAP Physical Link interface */
-
-void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
-{
- struct amp_work_create_physical *work;
- BT_DBG("conn %p", conn);
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, create_physical_worker);
- work->conn = conn;
- work->sk = sk;
- sock_hold(sk);
- if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
- sock_put(sk);
- kfree(work);
- }
- }
-}
-
-void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
-{
- struct amp_work_accept_physical *work;
- BT_DBG("conn %p", conn);
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, accept_physical_worker);
- work->conn = conn;
- work->sk = sk;
- work->id = id;
- sock_hold(sk);
- if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
- sock_put(sk);
- kfree(work);
- }
- }
-}
-
-/* HCI interface */
-
-static void amp_cmd_cmplt_worker(struct work_struct *w)
-{
- struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
- struct hci_dev *hdev = work->hdev;
- u16 opcode = work->opcode;
- struct sk_buff *skb = work->skb;
- struct amp_ctx *ctx;
-
- ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
- if (ctx)
- execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
- kfree_skb(skb);
- kfree(w);
-}
-
-static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
- struct sk_buff *skb)
-{
- struct amp_work_cmd_cmplt *work;
- struct sk_buff *skbc;
- BT_DBG("hdev %p opcode 0x%x skb %p len %d",
- hdev, opcode, skb, skb->len);
- skbc = skb_clone(skb, GFP_ATOMIC);
- if (!skbc)
+ if (hcon->state != BT_CONNECTED) {
+ BT_DBG("hchan %p not connected", hchan);
return;
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
- work->hdev = hdev;
- work->opcode = opcode;
- work->skb = skbc;
- if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
- kfree(work);
}
+
+ cp.log_handle = cpu_to_le16(hchan->handle);
+ hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
}
-static void amp_cmd_status_worker(struct work_struct *w)
+void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
{
- struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
- struct hci_dev *hdev = work->hdev;
- u16 opcode = work->opcode;
- u8 status = work->status;
- struct amp_ctx *ctx;
+ BT_DBG("hchan %p", hchan);
- ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
- if (ctx)
- execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
- kfree(w);
-}
-
-static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
-{
- struct amp_work_cmd_status *work;
- BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
- work->hdev = hdev;
- work->opcode = opcode;
- work->status = status;
- if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
- kfree(work);
- }
-}
-
-static void amp_event_worker(struct work_struct *w)
-{
- struct amp_work_event *work = (struct amp_work_event *) w;
- struct hci_dev *hdev = work->hdev;
- u8 event = work->event;
- struct sk_buff *skb = work->skb;
- struct amp_ctx *ctx;
-
- if (event == HCI_EV_AMP_STATUS_CHANGE) {
- struct hci_ev_amp_status_change *ev;
- if (skb->len < sizeof(*ev))
- goto amp_event_finished;
- ev = (void *) skb->data;
- if (ev->status != 0)
- goto amp_event_finished;
- if (ev->amp_status == hdev->amp_status)
- goto amp_event_finished;
- hdev->amp_status = ev->amp_status;
- send_a2mp_change_notify();
- goto amp_event_finished;
- }
- ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
- if (ctx)
- execute_ctx(ctx, AMP_HCI_EVENT, skb);
-
-amp_event_finished:
- kfree_skb(skb);
- kfree(w);
-}
-
-static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
-{
- struct amp_work_event *work;
- struct sk_buff *skbc;
- BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
- skbc = skb_clone(skb, GFP_ATOMIC);
- if (!skbc)
- return;
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work, amp_event_worker);
- work->hdev = hdev;
- work->event = event;
- work->skb = skbc;
- if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
- kfree(work);
- }
-}
-
-static void amp_dev_event_worker(struct work_struct *w)
-{
- send_a2mp_change_notify();
- kfree(w);
-}
-
-static int amp_dev_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct hci_dev *hdev = (struct hci_dev *) ptr;
- struct amp_work_event *work;
-
- if (hdev->amp_type == HCI_BREDR)
- return NOTIFY_DONE;
-
- switch (event) {
- case HCI_DEV_UNREG:
- case HCI_DEV_REG:
- case HCI_DEV_UP:
- case HCI_DEV_DOWN:
- BT_DBG("hdev %p event %ld", hdev, event);
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK((struct work_struct *) work,
- amp_dev_event_worker);
- if (queue_work(amp_workqueue,
- (struct work_struct *) work) == 0)
- kfree(work);
- }
- }
- return NOTIFY_DONE;
-}
-
-
-/* L2CAP module init continued */
-
-static struct notifier_block amp_notifier = {
- .notifier_call = amp_dev_event
-};
-
-static struct amp_mgr_cb hci_amp = {
- .amp_cmd_complete_event = amp_cmd_cmplt_evt,
- .amp_cmd_status_event = amp_cmd_status_evt,
- .amp_event = amp_evt
-};
-
-int amp_init(void)
-{
- hci_register_amp(&hci_amp);
- hci_register_notifier(&_notifier);
- amp_next_handle = 1;
- amp_workqueue = create_singlethread_workqueue("a2mp");
- if (!amp_workqueue)
- return -EPERM;
- return 0;
-}
-
-void amp_exit(void)
-{
- hci_unregister_amp(&hci_amp);
- hci_unregister_notifier(&_notifier);
- flush_workqueue(amp_workqueue);
- destroy_workqueue(amp_workqueue);
+ hci_chan_del(hchan);
}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index f504921..e430b1a 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,39 +26,20 @@
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/freezer.h>
-#include <linux/errno.h>
-#include <linux/net.h>
-#include <linux/slab.h>
#include <linux/kthread.h>
-#include <net/sock.h>
-
-#include <linux/socket.h>
#include <linux/file.h>
-
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
#include "bnep.h"
#define VERSION "1.3"
-static bool compress_src = 1;
-static bool compress_dst = 1;
+static bool compress_src = true;
+static bool compress_dst = true;
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
@@ -66,31 +47,24 @@
static struct bnep_session *__bnep_get_session(u8 *dst)
{
struct bnep_session *s;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &bnep_session_list) {
- s = list_entry(p, struct bnep_session, list);
- if (!compare_ether_addr(dst, s->eh.h_source))
+ list_for_each_entry(s, &bnep_session_list, list)
+ if (ether_addr_equal(dst, s->eh.h_source))
return s;
- }
+
return NULL;
}
static void __bnep_link_session(struct bnep_session *s)
{
- /* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the reference to this module.
- */
- __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
- module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -207,8 +181,7 @@
a2 = data;
data += ETH_ALEN;
- BT_DBG("mc filter %s -> %s",
- batostr((void *) a1), batostr((void *) a2));
+ BT_DBG("mc filter %pMR -> %pMR", a1, a2);
/* Iterate from a1 to a2 */
set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
@@ -314,7 +287,7 @@
ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
};
-static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct net_device *dev = s->dev;
struct sk_buff *nskb;
@@ -348,7 +321,7 @@
}
/* Strip 802.1p header */
- if (ntohs(s->eh.h_proto) == 0x8100) {
+ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
@@ -412,7 +385,7 @@
BNEP_COMPRESSED
};
-static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
@@ -430,10 +403,10 @@
iv[il++] = (struct kvec) { &type, 1 };
len++;
- if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source))
+ if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source))
type |= 0x01;
- if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest))
+ if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest))
type |= 0x02;
if (type)
@@ -510,7 +483,7 @@
schedule();
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@@ -531,6 +504,7 @@
up_write(&bnep_session_sem);
free_netdev(dev);
+ module_put_and_exit(0);
return 0;
}
@@ -617,9 +591,11 @@
__bnep_link_session(s);
+ __module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
+ module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
@@ -668,17 +644,14 @@
int bnep_get_connlist(struct bnep_connlist_req *req)
{
- struct list_head *p;
+ struct bnep_session *s;
int err = 0, n = 0;
down_read(&bnep_session_sem);
- list_for_each(p, &bnep_session_list) {
- struct bnep_session *s;
+ list_for_each_entry(s, &bnep_session_list, list) {
struct bnep_conninfo ci;
- s = list_entry(p, struct bnep_session, list);
-
__bnep_copy_ci(&ci, s);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 155ff74..4b488ec 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,17 +25,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include <linux/socket.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/wait.h>
-
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -129,7 +119,7 @@
}
#ifdef CONFIG_BT_BNEP_MC_FILTER
-static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
@@ -141,12 +131,12 @@
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
-static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
+static u16 bnep_net_eth_proto(struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
u16 proto = ntohs(eh->h_proto);
- if (proto >= 1536)
+ if (proto >= ETH_P_802_3_MIN)
return proto;
if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
@@ -155,7 +145,7 @@
return ETH_P_802_2;
}
-static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
{
u16 proto = bnep_net_eth_proto(skb);
struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 17800b1..5f05129 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,28 +24,15 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <linux/uaccess.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
#include "bnep.h"
+static struct bt_sock_list bnep_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
+};
+
static int bnep_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -55,6 +42,8 @@
if (!sk)
return 0;
+ bt_sock_unlink(&bnep_sk_list, sk);
+
sock_orphan(sk);
sock_put(sk);
return 0;
@@ -75,7 +64,7 @@
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -101,7 +90,7 @@
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
@@ -143,10 +132,10 @@
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -157,7 +146,7 @@
err = bnep_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
@@ -221,6 +210,7 @@
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
+ bt_sock_link(&bnep_sk_list, sk);
return 0;
}
@@ -239,21 +229,30 @@
return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("Can't register BNEP socket");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create BNEP proc file");
+ bt_sock_unregister(BTPROTO_BNEP);
+ goto error;
+ }
+
+ BT_INFO("BNEP socket layer initialized");
return 0;
error:
- BT_ERR("Can't register BNEP socket");
proto_unregister(&bnep_proto);
return err;
}
void __exit bnep_sock_cleanup(void)
{
- if (bt_sock_unregister(BTPROTO_BNEP) < 0)
- BT_ERR("Can't unregister BNEP socket");
-
+ bt_procfs_cleanup(&init_net, "bnep");
+ bt_sock_unregister(BTPROTO_BNEP);
proto_unregister(&bnep_proto);
}
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 744233c..cd75e4d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -20,7 +20,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/types.h>
@@ -326,7 +326,7 @@
{
struct capi_ctr *ctrl = &session->ctrl;
struct cmtp_application *application;
- __u16 cmd, appl;
+ __u16 appl;
__u32 contr;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,7 +344,6 @@
return;
}
- cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
appl = CAPIMSG_APPID(skb->data);
contr = CAPIMSG_CONTROL(skb->data);
@@ -387,7 +386,8 @@
capi_ctr_down(ctrl);
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
}
static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
@@ -539,7 +539,7 @@
static int cmtp_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, cmtp_proc_show, PDE(inode)->data);
+ return single_open(file, cmtp_proc_show, PDE_DATA(inode));
}
static const struct file_operations cmtp_proc_fops = {
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index db43b54..c32638d 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -81,6 +81,7 @@
char name[BTNAMSIZ];
+ atomic_t terminate;
struct task_struct *task;
wait_queue_head_t wait;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index bff02ad..e0a6ebf 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,28 +53,24 @@
static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
{
struct cmtp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &cmtp_session_list) {
- session = list_entry(p, struct cmtp_session, list);
+ list_for_each_entry(session, &cmtp_session_list, list)
if (!bacmp(bdaddr, &session->bdaddr))
return session;
- }
+
return NULL;
}
static void __cmtp_link_session(struct cmtp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -292,9 +288,11 @@
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (!kthread_should_stop()) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&session->terminate))
+ break;
if (sk->sk_state != BT_CONNECTED)
break;
@@ -310,7 +308,7 @@
schedule();
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
@@ -325,6 +323,7 @@
up_write(&cmtp_session_sem);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
@@ -349,11 +348,12 @@
bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
- session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu);
+ session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
+ l2cap_pi(sock->sk)->chan->imtu);
BT_DBG("mtu %d", session->mtu);
- sprintf(session->name, "%s", batostr(&bt_sk(sock->sk)->dst));
+ sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst);
session->sock = sock;
session->state = BT_CONFIG;
@@ -373,25 +373,28 @@
__cmtp_link_session(session);
+ __module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
if (IS_ERR(session->task)) {
+ module_put(THIS_MODULE);
err = PTR_ERR(session->task);
goto unlink;
}
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
- if (err < 0)
- goto detach;
+ if (err < 0) {
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
+ up_write(&cmtp_session_sem);
+ return err;
+ }
}
up_write(&cmtp_session_sem);
return 0;
-detach:
- cmtp_detach_device(session);
-
unlink:
__cmtp_unlink_session(session);
@@ -416,7 +419,8 @@
skb_queue_purge(&session->transmit);
/* Stop session thread */
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
} else
err = -ENOENT;
@@ -426,19 +430,16 @@
int cmtp_get_connlist(struct cmtp_connlist_req *req)
{
- struct list_head *p;
+ struct cmtp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&cmtp_session_sem);
- list_for_each(p, &cmtp_session_list) {
- struct cmtp_session *session;
+ list_for_each_entry(session, &cmtp_session_list, list) {
struct cmtp_conninfo ci;
- session = list_entry(p, struct cmtp_session, list);
-
__cmtp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 3f2dd5c..d82787d 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -20,7 +20,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/capability.h>
@@ -39,10 +39,13 @@
#include <linux/isdn/capilli.h>
-#include <asm/system.h>
#include "cmtp.h"
+static struct bt_sock_list cmtp_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
+};
+
static int cmtp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -52,6 +55,8 @@
if (!sk)
return 0;
+ bt_sock_unlink(&cmtp_sk_list, sk);
+
sock_orphan(sk);
sock_put(sk);
@@ -73,7 +78,7 @@
switch (cmd) {
case CMTPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -98,7 +103,7 @@
case CMTPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
@@ -137,10 +142,10 @@
{
if (cmd == CMTPGETCONNLIST) {
struct cmtp_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -151,7 +156,7 @@
err = cmtp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
@@ -215,6 +220,8 @@
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
+ bt_sock_link(&cmtp_sk_list, sk);
+
return 0;
}
@@ -233,21 +240,30 @@
return err;
err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("Can't register CMTP socket");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "cmtp", &cmtp_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create CMTP proc file");
+ bt_sock_unregister(BTPROTO_HIDP);
+ goto error;
+ }
+
+ BT_INFO("CMTP socket layer initialized");
return 0;
error:
- BT_ERR("Can't register CMTP socket");
proto_unregister(&cmtp_proto);
return err;
}
void cmtp_cleanup_sockets(void)
{
- if (bt_sock_unregister(BTPROTO_CMTP) < 0)
- BT_ERR("Can't unregister CMTP socket");
-
+ bt_procfs_cleanup(&init_net, "cmtp");
+ bt_sock_unregister(BTPROTO_CMTP);
proto_unregister(&cmtp_proto);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 44b36fe..20d4a5f 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,7 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, The Linux Foundation. All rights reserved.
- Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -25,205 +24,52 @@
/* Bluetooth HCI connection handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/smp.h>
-struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type,
- bdaddr_t *dst, __u8 sec_level, __u8 auth_type,
- struct bt_le_params *le_params)
+static void hci_le_create_connection(struct hci_conn *conn)
{
- struct hci_conn *le, *le_wlist_conn;
+ struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_create_conn cp;
- struct adv_entry *entry;
- struct link_key *key;
- BT_DBG("%p", hdev);
-
- le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le) {
- le_wlist_conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
- BDADDR_ANY);
- if (!le_wlist_conn) {
- hci_conn_hold(le);
- return le;
- } else {
- BT_DBG("remove wlist conn");
- le->out = 1;
- le->link_mode |= HCI_LM_MASTER;
- le->sec_level = BT_SECURITY_LOW;
- le->type = LE_LINK;
- hci_proto_connect_cfm(le, 0);
- hci_conn_del(le_wlist_conn);
- return le;
- }
- }
-
- key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK);
- if (!key) {
- entry = hci_find_adv_entry(hdev, dst);
- if (entry)
- le = hci_le_conn_add(hdev, dst,
- entry->bdaddr_type);
- else
- le = hci_le_conn_add(hdev, dst, 0);
- } else {
- le = hci_le_conn_add(hdev, dst, key->addr_type);
- }
-
- if (!le)
- return ERR_PTR(-ENOMEM);
-
- hci_conn_hold(le);
-
- le->state = BT_CONNECT;
- le->out = 1;
- le->link_mode |= HCI_LM_MASTER;
- le->sec_level = BT_SECURITY_LOW;
- le->type = LE_LINK;
+ conn->state = BT_CONNECT;
+ conn->out = true;
+ conn->link_mode |= HCI_LM_MASTER;
+ conn->sec_level = BT_SECURITY_LOW;
memset(&cp, 0, sizeof(cp));
- if (l2cap_sock_le_params_valid(le_params)) {
- cp.supervision_timeout =
- cpu_to_le16(le_params->supervision_timeout);
- cp.scan_interval = cpu_to_le16(le_params->scan_interval);
- cp.scan_window = cpu_to_le16(le_params->scan_window);
- cp.conn_interval_min = cpu_to_le16(le_params->interval_min);
- cp.conn_interval_max = cpu_to_le16(le_params->interval_max);
- cp.conn_latency = cpu_to_le16(le_params->latency);
- cp.min_ce_len = cpu_to_le16(le_params->min_ce_len);
- cp.max_ce_len = cpu_to_le16(le_params->max_ce_len);
- le->conn_timeout = le_params->conn_timeout;
- } else {
- cp.supervision_timeout = cpu_to_le16(BT_LE_SUP_TO_DEFAULT);
- cp.scan_interval = cpu_to_le16(BT_LE_SCAN_INTERVAL_DEF);
- cp.scan_window = cpu_to_le16(BT_LE_SCAN_WINDOW_DEF);
- cp.conn_interval_min = cpu_to_le16(BT_LE_CONN_INTERVAL_MIN_DEF);
- cp.conn_interval_max = cpu_to_le16(BT_LE_CONN_INTERVAL_MAX_DEF);
- cp.conn_latency = cpu_to_le16(BT_LE_LATENCY_DEF);
- le->conn_timeout = 5;
- }
- if (!bacmp(&le->dst, BDADDR_ANY)) {
- cp.filter_policy = 0x01;
- le->conn_timeout = 0;
- } else {
- bacpy(&cp.peer_addr, &le->dst);
- cp.peer_addr_type = le->dst_type;
- }
+ cp.scan_interval = __constant_cpu_to_le16(0x0060);
+ cp.scan_window = __constant_cpu_to_le16(0x0030);
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.peer_addr_type = conn->dst_type;
+ cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
+ cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
+ cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0000);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0000);
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
-
- return le;
}
-EXPORT_SYMBOL(hci_le_connect);
-static void hci_le_connect_cancel(struct hci_conn *conn)
+static void hci_le_create_connection_cancel(struct hci_conn *conn)
{
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
}
-void hci_le_cancel_create_connect(struct hci_dev *hdev, bdaddr_t *dst)
-{
- struct hci_conn *le;
-
- BT_DBG("%p", hdev);
-
- le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le) {
- BT_DBG("send hci connect cancel");
- hci_le_connect_cancel(le);
- hci_conn_del(le);
- }
-}
-EXPORT_SYMBOL(hci_le_cancel_create_connect);
-
-void hci_le_add_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst)
-{
- struct hci_cp_le_add_dev_white_list cp;
- struct adv_entry *entry;
- struct link_key *key;
-
- BT_DBG("%p", hdev);
-
- memset(&cp, 0, sizeof(cp));
- bacpy(&cp.addr, dst);
-
- key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK);
- if (!key) {
- entry = hci_find_adv_entry(hdev, dst);
- if (entry)
- cp.addr_type = entry->bdaddr_type;
- else
- cp.addr_type = 0x00;
- } else {
- cp.addr_type = key->addr_type;
- }
-
- hci_send_cmd(hdev, HCI_OP_LE_ADD_DEV_WHITE_LIST, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_add_dev_white_list);
-
-void hci_le_remove_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst)
-{
- struct hci_cp_le_remove_dev_white_list cp;
- struct adv_entry *entry;
- struct link_key *key;
-
- BT_DBG("%p", hdev);
-
- memset(&cp, 0, sizeof(cp));
- bacpy(&cp.addr, dst);
-
- key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK);
- if (!key) {
- entry = hci_find_adv_entry(hdev, dst);
- if (entry)
- cp.addr_type = entry->bdaddr_type;
- else
- cp.addr_type = 0x00;
- } else {
- cp.addr_type = key->addr_type;
- }
-
- hci_send_cmd(hdev, HCI_OP_LE_REMOVE_DEV_WHITE_LIST, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_remove_dev_white_list);
-
-static inline bool is_role_switch_possible(struct hci_dev *hdev)
-{
- if (hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECTED))
- return false;
- return true;
-}
-
-void hci_acl_connect(struct hci_conn *conn)
+static void hci_acl_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct inquiry_entry *ie;
struct hci_cp_create_conn cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->link_mode = HCI_LM_MASTER;
@@ -241,16 +87,16 @@
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
cp.pscan_mode = ie->data.pscan_mode;
cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
+ __constant_cpu_to_le16(0x8000);
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
- conn->ssp_mode = ie->data.ssp_mode;
+ if (ie->data.ssp_mode > 0)
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)
- && is_role_switch_possible(hdev))
+ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
cp.role_switch = 0x01;
else
cp.role_switch = 0x00;
@@ -258,48 +104,65 @@
hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
}
-static void hci_acl_connect_cancel(struct hci_conn *conn)
+static void hci_acl_create_connection_cancel(struct hci_conn *conn)
{
struct hci_cp_create_conn_cancel cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
- if (conn->hdev->hci_ver < 2)
+ if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
}
-void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
+static void hci_reject_sco(struct hci_conn *conn)
{
- BT_DBG("%p", conn);
+ struct hci_cp_reject_sync_conn_req cp;
+
+ cp.reason = HCI_ERROR_REMOTE_USER_TERM;
+ bacpy(&cp.bdaddr, &conn->dst);
+
+ hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
+}
+
+void hci_disconnect(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_cp_disconnect cp;
+
+ BT_DBG("hcon %p", conn);
conn->state = BT_DISCONN;
- if (conn->hdev->dev_type == HCI_BREDR) {
- struct hci_cp_disconnect cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.reason = reason;
- hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
- } else {
- struct hci_cp_disconn_phys_link cp;
- cp.phy_handle = (u8) conn->handle;
- cp.reason = reason;
- hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHYS_LINK,
- sizeof(cp), &cp);
- }
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
}
-void hci_add_sco(struct hci_conn *conn, __u16 handle)
+static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_cp_disconn_phy_link cp;
+
+ BT_DBG("hcon %p", conn);
+
+ conn->state = BT_DISCONN;
+
+ cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
+ sizeof(cp), &cp);
+}
+
+static void hci_add_sco(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_add_sco cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->attempt++;
@@ -314,37 +177,27 @@
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->attempt++;
cp.handle = cpu_to_le16(handle);
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- if (conn->hdev->is_wbs) {
- /* Transparent Data */
- uint16_t voice_setting = hdev->voice_setting | ACF_TRANS;
- cp.max_latency = cpu_to_le16(0x000D);
- cp.pkt_type = cpu_to_le16(ESCO_WBS);
- cp.voice_setting = cpu_to_le16(voice_setting);
- /* Retransmission Effort */
- cp.retrans_effort = RE_LINK_QUALITY;
- } else {
- cp.max_latency = cpu_to_le16(0x000A);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = RE_POWER_CONSUMP;
- }
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
+ cp.voice_setting = cpu_to_le16(hdev->voice_setting);
+ cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
}
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
- u16 latency, u16 to_multiplier)
+ u16 latency, u16 to_multiplier)
{
struct hci_cp_le_conn_update cp;
struct hci_dev *hdev = conn->hdev;
@@ -356,32 +209,19 @@
cp.conn_interval_max = cpu_to_le16(max);
cp.conn_latency = cpu_to_le16(latency);
cp.supervision_timeout = cpu_to_le16(to_multiplier);
- cp.min_ce_len = cpu_to_le16(0x0001);
- cp.max_ce_len = cpu_to_le16(0x0001);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0001);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0001);
hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
}
-EXPORT_SYMBOL(hci_le_conn_update);
-
-void hci_read_rssi(struct hci_conn *conn)
-{
- struct hci_cp_read_rssi cp;
- struct hci_dev *hdev = conn->hdev;
-
- memset(&cp, 0, sizeof(cp));
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_READ_RSSI, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_read_rssi);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
- __u8 ltk[16])
+ __u8 ltk[16])
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_start_enc cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
memset(&cp, 0, sizeof(cp));
@@ -392,48 +232,17 @@
hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
-EXPORT_SYMBOL(hci_le_start_enc);
-
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
- memcpy(cp.ltk, ltk, sizeof(cp.ltk));
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_ltk_reply);
-
-void hci_le_ltk_neg_reply(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_neg_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
-}
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
struct hci_conn *sco = conn->link;
- BT_DBG("%p", conn);
-
if (!sco)
return;
+ BT_DBG("hcon %p", conn);
+
if (!status) {
if (lmp_esco_capable(conn->hdev))
hci_setup_sync(sco, conn->handle);
@@ -445,79 +254,107 @@
}
}
-static void hci_conn_timeout(unsigned long arg)
+static void hci_conn_disconnect(struct hci_conn *conn)
{
- struct hci_conn *conn = (void *) arg;
- struct hci_dev *hdev = conn->hdev;
- __u8 reason;
+ __u8 reason = hci_proto_disconn_ind(conn);
- BT_DBG("conn %p state %d", conn, conn->state);
+ switch (conn->type) {
+ case AMP_LINK:
+ hci_amp_disconn(conn, reason);
+ break;
+ default:
+ hci_disconnect(conn, reason);
+ break;
+ }
+}
- hci_dev_lock(hdev);
+static void hci_conn_timeout(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ disc_work.work);
+
+ BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
+
+ if (atomic_read(&conn->refcnt))
+ return;
switch (conn->state) {
case BT_CONNECT:
case BT_CONNECT2:
if (conn->out) {
if (conn->type == ACL_LINK)
- hci_acl_connect_cancel(conn);
+ hci_acl_create_connection_cancel(conn);
else if (conn->type == LE_LINK)
- hci_le_connect_cancel(conn);
+ hci_le_create_connection_cancel(conn);
+ } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+ hci_reject_sco(conn);
}
break;
case BT_CONFIG:
case BT_CONNECTED:
- if (!atomic_read(&conn->refcnt)) {
- reason = hci_proto_disconn_ind(conn);
- hci_acl_disconn(conn, reason);
- }
+ hci_conn_disconnect(conn);
break;
default:
- if (!atomic_read(&conn->refcnt))
- conn->state = BT_CLOSED;
+ conn->state = BT_CLOSED;
break;
}
+}
- hci_dev_unlock(hdev);
+/* Enter sniff mode */
+static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
+
+ if (test_bit(HCI_RAW, &hdev->flags))
+ return;
+
+ if (conn->type == LE_LINK)
+ return;
+
+ if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
+ return;
+
+ if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
+ return;
+
+ if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
+ struct hci_cp_sniff_subrate cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_latency = __constant_cpu_to_le16(0);
+ cp.min_remote_timeout = __constant_cpu_to_le16(0);
+ cp.min_local_timeout = __constant_cpu_to_le16(0);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
+ }
+
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
+ struct hci_cp_sniff_mode cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+ cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+ cp.attempt = __constant_cpu_to_le16(4);
+ cp.timeout = __constant_cpu_to_le16(1);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
+ }
}
static void hci_conn_idle(unsigned long arg)
{
struct hci_conn *conn = (void *) arg;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
hci_conn_enter_sniff_mode(conn);
}
-static void hci_conn_rssi_update(struct work_struct *work)
+static void hci_conn_auto_accept(unsigned long arg)
{
- struct delayed_work *delayed =
- container_of(work, struct delayed_work, work);
- struct hci_conn *conn =
- container_of(delayed, struct hci_conn, rssi_update_work);
+ struct hci_conn *conn = (void *) arg;
+ struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
-
- hci_read_rssi(conn);
-}
-
-static void encryption_disabled_timeout(unsigned long userdata)
-{
- struct hci_conn *conn = (struct hci_conn *)userdata;
- BT_INFO("conn %p Grace Prd Exp ", conn);
-
- hci_encrypt_cfm(conn, 0, 0);
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- struct hci_cp_set_conn_encrypt cp;
- BT_INFO("HCI_CONN_ENCRYPT_PEND is set");
- cp.handle = cpu_to_le16(conn->handle);
- cp.encrypt = 1;
- hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- }
-
+ hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
+ &conn->dst);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
@@ -525,9 +362,9 @@
{
struct hci_conn *conn;
- BT_DBG("%s dst %s", hdev->name, batostr(dst));
+ BT_DBG("%s dst %pMR", hdev->name, dst);
- conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
+ conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
if (!conn)
return NULL;
@@ -539,17 +376,14 @@
conn->auth_type = HCI_AT_GENERAL_BONDING;
conn->io_capability = hdev->io_capability;
conn->remote_auth = 0xff;
+ conn->key_type = 0xff;
- conn->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- conn->conn_valid = true;
- spin_lock_init(&conn->lock);
- wake_lock_init(&conn->idle_lock, WAKE_LOCK_SUSPEND, "bt_idle");
switch (type) {
case ACL_LINK:
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
- conn->link_policy = hdev->link_policy;
break;
case SCO_LINK:
if (!pkt_type)
@@ -573,40 +407,23 @@
skb_queue_head_init(&conn->data_q);
- setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
+ INIT_LIST_HEAD(&conn->chan_list);
+
+ INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
- INIT_DELAYED_WORK(&conn->rssi_update_work, hci_conn_rssi_update);
- setup_timer(&conn->encrypt_pause_timer, encryption_disabled_timeout,
- (unsigned long)conn);
+ setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
+ (unsigned long) conn);
atomic_set(&conn->refcnt, 0);
hci_dev_hold(hdev);
- tasklet_disable(&hdev->tx_task);
-
hci_conn_hash_add(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
- atomic_set(&conn->devref, 0);
-
hci_conn_init_sysfs(conn);
- tasklet_enable(&hdev->tx_task);
-
- return conn;
-}
-
-struct hci_conn *hci_le_conn_add(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 addr_type)
-{
- struct hci_conn *conn = hci_conn_add(hdev, LE_LINK, 0, dst);
- if (!conn)
- return NULL;
-
- conn->dst_type = addr_type;
-
return conn;
}
@@ -614,19 +431,13 @@
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
+ BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
- spin_lock_bh(&conn->lock);
- conn->conn_valid = false; /* conn data is being released */
- spin_unlock_bh(&conn->lock);
-
- /* Make sure no timers are running */
del_timer(&conn->idle_timer);
- wake_lock_destroy(&conn->idle_lock);
- del_timer(&conn->disc_timer);
- del_timer(&conn->smp_timer);
- __cancel_delayed_work(&conn->rssi_update_work);
- del_timer(&conn->encrypt_pause_timer);
+
+ cancel_delayed_work_sync(&conn->disc_work);
+
+ del_timer(&conn->auto_accept_timer);
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
@@ -644,110 +455,43 @@
struct hci_conn *acl = conn->link;
if (acl) {
acl->link = NULL;
- hci_conn_put(acl);
+ hci_conn_drop(acl);
}
}
- tasklet_disable(&hdev->tx_task);
+ hci_chan_list_flush(conn);
+
+ if (conn->amp_mgr)
+ amp_mgr_put(conn->amp_mgr);
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
- tasklet_schedule(&hdev->tx_task);
-
- tasklet_enable(&hdev->tx_task);
-
skb_queue_purge(&conn->data_q);
- hci_conn_put_device(conn);
-
- if (conn->hidp_session_valid)
- hci_conn_put_device(conn);
+ hci_conn_del_sysfs(conn);
hci_dev_put(hdev);
- return 0;
-}
-
-struct hci_chan *hci_chan_add(struct hci_dev *hdev)
-{
- struct hci_chan *chan;
-
- BT_DBG("%s", hdev->name);
-
- chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
- if (!chan)
- return NULL;
-
- atomic_set(&chan->refcnt, 0);
-
- hci_dev_hold(hdev);
-
- chan->hdev = hdev;
-
- list_add(&chan->list, &hdev->chan_list.list);
-
- return chan;
-}
-EXPORT_SYMBOL(hci_chan_add);
-
-int hci_chan_del(struct hci_chan *chan)
-{
- BT_DBG("%s chan %p", chan->hdev->name, chan);
-
- list_del(&chan->list);
-
- hci_conn_put(chan->conn);
- hci_dev_put(chan->hdev);
-
- kfree(chan);
+ hci_conn_put(conn);
return 0;
}
-int hci_chan_put(struct hci_chan *chan)
-{
- struct hci_cp_disconn_logical_link cp;
- struct hci_conn *hcon;
- u16 ll_handle;
-
- BT_DBG("chan %p refcnt %d", chan, atomic_read(&chan->refcnt));
- if (!atomic_dec_and_test(&chan->refcnt))
- return 0;
-
- hcon = chan->conn;
- ll_handle = chan->ll_handle;
-
- hci_chan_del(chan);
-
- BT_DBG("chan->conn->state %d", hcon->state);
- if (hcon->state == BT_CONNECTED) {
- cp.log_handle = cpu_to_le16(ll_handle);
- hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK,
- sizeof(cp), &cp);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(hci_chan_put);
-
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
- BT_DBG("%s -> %s", batostr(src), batostr(dst));
+ BT_DBG("%pMR -> %pMR", src, dst);
- read_lock_bh(&hci_dev_list_lock);
+ read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
- if (d->dev_type != HCI_BREDR)
- continue;
- if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (!test_bit(HCI_UP, &d->flags) ||
+ test_bit(HCI_RAW, &d->flags) ||
+ d->dev_type != HCI_BREDR)
continue;
/* Simple routing:
@@ -769,83 +513,51 @@
if (hdev)
hdev = hci_dev_hold(hdev);
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
return hdev;
}
EXPORT_SYMBOL(hci_get_route);
-struct hci_dev *hci_dev_get_type(u8 amp_type)
+static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level, u8 auth_type)
{
- struct hci_dev *hdev = NULL;
- struct hci_dev *d;
+ struct hci_conn *le;
- BT_DBG("amp_type %d", amp_type);
+ if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
+ return ERR_PTR(-ENOTSUPP);
- read_lock_bh(&hci_dev_list_lock);
+ le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (!le) {
+ le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (le)
+ return ERR_PTR(-EBUSY);
- list_for_each_entry(d, &hci_dev_list, list) {
- if ((d->amp_type == amp_type) && test_bit(HCI_UP, &d->flags)) {
- hdev = d;
- break;
- }
+ le = hci_conn_add(hdev, LE_LINK, 0, dst);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+
+ le->dst_type = bdaddr_to_le(dst_type);
+ hci_le_create_connection(le);
}
- if (hdev)
- hdev = hci_dev_hold(hdev);
+ le->pending_sec_level = sec_level;
+ le->auth_type = auth_type;
- read_unlock_bh(&hci_dev_list_lock);
- return hdev;
+ hci_conn_hold(le);
+
+ return le;
}
-EXPORT_SYMBOL(hci_dev_get_type);
-struct hci_dev *hci_dev_get_amp(bdaddr_t *dst)
-{
- struct hci_dev *d;
- struct hci_dev *hdev = NULL;
-
- BT_DBG("%s dst %s", hdev->name, batostr(dst));
-
- read_lock_bh(&hci_dev_list_lock);
-
- list_for_each_entry(d, &hci_dev_list, list) {
- struct hci_conn *conn;
- if (d->dev_type == HCI_BREDR)
- continue;
- conn = hci_conn_hash_lookup_ba(d, ACL_LINK, dst);
- if (conn) {
- hdev = d;
- break;
- }
- }
-
- if (hdev)
- hdev = hci_dev_hold(hdev);
-
- read_unlock_bh(&hci_dev_list_lock);
- return hdev;
-}
-EXPORT_SYMBOL(hci_dev_get_amp);
-
-/* Create SCO, ACL or LE connection.
- * Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
- __u16 pkt_type, bdaddr_t *dst,
- __u8 sec_level, __u8 auth_type)
+static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type)
{
struct hci_conn *acl;
- struct hci_conn *sco;
-
- BT_DBG("%s dst %s", hdev->name, batostr(dst));
-
- if (type == LE_LINK)
- return hci_le_connect(hdev, pkt_type, dst, sec_level,
- auth_type, NULL);
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
if (!acl)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
hci_conn_hold(acl);
@@ -854,10 +566,21 @@
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
- hci_acl_connect(acl);
+ hci_acl_create_connection(acl);
}
- if (type == ACL_LINK)
+ return acl;
+}
+
+static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
+ __u16 pkt_type, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type)
+{
+ struct hci_conn *acl;
+ struct hci_conn *sco;
+
+ acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
+ if (IS_ERR(acl))
return acl;
/* type of connection already existing can be ESCO or SCO
@@ -875,8 +598,8 @@
if (!sco) {
sco = hci_conn_add(hdev, type, pkt_type, dst);
if (!sco) {
- hci_conn_put(acl);
- return NULL;
+ hci_conn_drop(acl);
+ return ERR_PTR(-ENOMEM);
}
}
@@ -886,13 +609,13 @@
hci_conn_hold(sco);
if (acl->state == BT_CONNECTED &&
- (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
- acl->power_save = 1;
- hci_conn_enter_active_mode(acl, 1);
+ (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+ set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
+ hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
- if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
+ if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
/* defer SCO setup until mode change completed */
- set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
+ set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
return sco;
}
@@ -901,55 +624,42 @@
return sco;
}
-EXPORT_SYMBOL(hci_connect);
-void hci_disconnect(struct hci_conn *conn, __u8 reason)
+/* Create SCO, ACL or LE connection. */
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+ __u16 pkt_type, bdaddr_t *dst,
+ __u8 dst_type, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
- hci_proto_disconn_cfm(conn, reason, 0);
-}
-EXPORT_SYMBOL(hci_disconnect);
-
-void hci_disconnect_amp(struct hci_conn *conn, __u8 reason)
-{
- struct hci_dev *hdev = NULL;
-
- BT_DBG("conn %p", conn);
-
- read_lock_bh(&hci_dev_list_lock);
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- struct hci_conn *c;
- if (hdev == conn->hdev)
- continue;
- if (hdev->amp_type == HCI_BREDR)
- continue;
- c = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &conn->dst);
- if (c)
- hci_disconnect(c, reason);
+ switch (type) {
+ case LE_LINK:
+ return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
+ case ACL_LINK:
+ return hci_connect_acl(hdev, dst, sec_level, auth_type);
+ case SCO_LINK:
+ case ESCO_LINK:
+ return hci_connect_sco(hdev, type, pkt_type, dst, sec_level, auth_type);
}
- read_unlock_bh(&hci_dev_list_lock);
+ return ERR_PTR(-EINVAL);
}
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
- if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
- !(conn->link_mode & HCI_LM_ENCRYPT))
+ if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
return 0;
return 1;
}
-EXPORT_SYMBOL(hci_conn_check_link_mode);
/* Authenticate remote device */
static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (conn->pending_sec_level > sec_level)
sec_level = conn->pending_sec_level;
@@ -961,90 +671,134 @@
/* Make sure we preserve an existing MITM requirement*/
auth_type |= (conn->auth_type & 0x01);
- conn->auth_type = auth_type;
- conn->auth_initiator = 1;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ conn->auth_type = auth_type;
+
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
/* encrypt must be pending if auth is also pending */
- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
+ if (conn->key_type != 0xff)
+ set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
}
return 0;
}
+/* Encrypt the the link */
+static void hci_conn_encrypt(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p", conn);
+
+ if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.encrypt = 0x01;
+ hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+ &cp);
+ }
+}
+
/* Enable security */
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p %d %d", conn, sec_level, auth_type);
+ BT_DBG("hcon %p", conn);
+ if (conn->type == LE_LINK)
+ return smp_conn_security(conn, sec_level);
+
+ /* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
return 1;
- if (sec_level == BT_SECURITY_LOW &&
- (!conn->ssp_mode || !conn->hdev->ssp_mode))
+ /* For non 2.1 devices and low security level we don't need the link
+ key. */
+ if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
return 1;
- if (conn->type == LE_LINK) {
- if (conn->pending_sec_level > sec_level)
- sec_level = conn->pending_sec_level;
+ /* For other security levels we need the link key. */
+ if (!(conn->link_mode & HCI_LM_AUTH))
+ goto auth;
- if (sec_level > conn->sec_level)
- conn->pending_sec_level = sec_level;
- hci_proto_connect_cfm(conn, 0);
+ /* An authenticated combination key has sufficient security for any
+ security level. */
+ if (conn->key_type == HCI_LK_AUTH_COMBINATION)
+ goto encrypt;
+
+ /* An unauthenticated combination key has sufficient security for
+ security level 1 and 2. */
+ if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
+ (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
+ goto encrypt;
+
+ /* A combination key has always sufficient security for the security
+ levels 1 or 2. High security level requires the combination key
+ is generated using maximum PIN code length (16).
+ For pre 2.1 units. */
+ if (conn->key_type == HCI_LK_COMBINATION &&
+ (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
+ goto encrypt;
+
+auth:
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return 0;
- } else if (conn->link_mode & HCI_LM_ENCRYPT) {
- return hci_conn_auth(conn, sec_level, auth_type);
- } else if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+
+ if (!hci_conn_auth(conn, sec_level, auth_type))
return 0;
- }
- if (hci_conn_auth(conn, sec_level, auth_type)) {
- struct hci_cp_set_conn_encrypt cp;
- if (timer_pending(&conn->encrypt_pause_timer)) {
- BT_INFO("encrypt_pause_timer is pending");
- return 0;
- }
- cp.handle = cpu_to_le16(conn->handle);
- cp.encrypt = 1;
- hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- }
+encrypt:
+ if (conn->link_mode & HCI_LM_ENCRYPT)
+ return 1;
+ hci_conn_encrypt(conn);
return 0;
}
EXPORT_SYMBOL(hci_conn_security);
+/* Check secure link requirement */
+int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
+{
+ BT_DBG("hcon %p", conn);
+
+ if (sec_level != BT_SECURITY_HIGH)
+ return 1; /* Accept if non-secure is required */
+
+ if (conn->sec_level == BT_SECURITY_HIGH)
+ return 1;
+
+ return 0; /* Reject not secure link */
+}
+EXPORT_SYMBOL(hci_conn_check_secure);
+
/* Change link key */
int hci_conn_change_link_key(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
return 0;
}
-EXPORT_SYMBOL(hci_conn_change_link_key);
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!role && conn->link_mode & HCI_LM_MASTER)
return 1;
- if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
struct hci_cp_switch_role cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.role = role;
@@ -1060,7 +814,7 @@
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
if (test_bit(HCI_RAW, &hdev->flags))
return;
@@ -1071,181 +825,33 @@
if (conn->mode != HCI_CM_SNIFF)
goto timer;
- if (!conn->power_save && !force_active)
+ if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
goto timer;
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
struct hci_cp_exit_sniff_mode cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
}
timer:
- if (hdev->idle_timeout > 0) {
- spin_lock_bh(&conn->lock);
- if (conn->conn_valid) {
- mod_timer(&conn->idle_timer,
- jiffies + msecs_to_jiffies(hdev->idle_timeout));
- wake_lock(&conn->idle_lock);
- }
- spin_unlock_bh(&conn->lock);
- }
+ if (hdev->idle_timeout > 0)
+ mod_timer(&conn->idle_timer,
+ jiffies + msecs_to_jiffies(hdev->idle_timeout));
}
-static inline void hci_conn_stop_rssi_timer(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
- cancel_delayed_work(&conn->rssi_update_work);
-}
-
-static inline void hci_conn_start_rssi_timer(struct hci_conn *conn,
- u16 interval)
-{
- struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p, pending %d", conn,
- delayed_work_pending(&conn->rssi_update_work));
- if (!delayed_work_pending(&conn->rssi_update_work)) {
- queue_delayed_work(hdev->workqueue, &conn->rssi_update_work,
- msecs_to_jiffies(interval));
- }
-}
-
-void hci_conn_set_rssi_reporter(struct hci_conn *conn,
- s8 rssi_threshold, u16 interval, u8 updateOnThreshExceed)
-{
- if (conn) {
- conn->rssi_threshold = rssi_threshold;
- conn->rssi_update_interval = interval;
- conn->rssi_update_thresh_exceed = updateOnThreshExceed;
- hci_conn_start_rssi_timer(conn, interval);
- }
-}
-
-void hci_conn_unset_rssi_reporter(struct hci_conn *conn)
-{
- if (conn) {
- BT_DBG("Deleting the rssi_update_timer");
- hci_conn_stop_rssi_timer(conn);
- }
-}
-
-/* Enter sniff mode */
-void hci_conn_enter_sniff_mode(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p mode %d", conn, conn->mode);
-
- if (test_bit(HCI_RAW, &hdev->flags))
- return;
-
- if (conn->type == LE_LINK)
- return;
-
- if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
- return;
-
- if (conn->mode != HCI_CM_ACTIVE ||
- !(conn->link_policy & HCI_LP_SNIFF) ||
- (hci_find_link_key(hdev, &conn->dst) == NULL))
- return;
-
- if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
- struct hci_cp_sniff_subrate cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
- hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
- }
-
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
- struct hci_cp_sniff_mode cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
- cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
- hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
- }
-}
-
-struct hci_chan *hci_chan_create(struct hci_chan *chan,
- struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
-{
- struct hci_cp_create_logical_link cp;
-
- chan->state = BT_CONNECT;
- chan->tx_fs = *tx_fs;
- chan->rx_fs = *rx_fs;
- cp.phy_handle = chan->conn->handle;
- cp.tx_fs.id = chan->tx_fs.id;
- cp.tx_fs.type = chan->tx_fs.type;
- cp.tx_fs.max_sdu = cpu_to_le16(chan->tx_fs.max_sdu);
- cp.tx_fs.sdu_arr_time = cpu_to_le32(chan->tx_fs.sdu_arr_time);
- cp.tx_fs.acc_latency = cpu_to_le32(chan->tx_fs.acc_latency);
- cp.tx_fs.flush_to = cpu_to_le32(chan->tx_fs.flush_to);
- cp.rx_fs.id = chan->rx_fs.id;
- cp.rx_fs.type = chan->rx_fs.type;
- cp.rx_fs.max_sdu = cpu_to_le16(chan->rx_fs.max_sdu);
- cp.rx_fs.sdu_arr_time = cpu_to_le32(chan->rx_fs.sdu_arr_time);
- cp.rx_fs.acc_latency = cpu_to_le32(chan->rx_fs.acc_latency);
- cp.rx_fs.flush_to = cpu_to_le32(chan->rx_fs.flush_to);
- hci_conn_hold(chan->conn);
- if (chan->conn->out)
- hci_send_cmd(chan->conn->hdev, HCI_OP_CREATE_LOGICAL_LINK,
- sizeof(cp), &cp);
- else
- hci_send_cmd(chan->conn->hdev, HCI_OP_ACCEPT_LOGICAL_LINK,
- sizeof(cp), &cp);
- return chan;
-}
-EXPORT_SYMBOL(hci_chan_create);
-
-void hci_chan_modify(struct hci_chan *chan,
- struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
-{
- struct hci_cp_flow_spec_modify cp;
-
- chan->tx_fs = *tx_fs;
- chan->rx_fs = *rx_fs;
- cp.log_handle = cpu_to_le16(chan->ll_handle);
- cp.tx_fs.id = tx_fs->id;
- cp.tx_fs.type = tx_fs->type;
- cp.tx_fs.max_sdu = cpu_to_le16(tx_fs->max_sdu);
- cp.tx_fs.sdu_arr_time = cpu_to_le32(tx_fs->sdu_arr_time);
- cp.tx_fs.acc_latency = cpu_to_le32(tx_fs->acc_latency);
- cp.tx_fs.flush_to = cpu_to_le32(tx_fs->flush_to);
- cp.rx_fs.id = rx_fs->id;
- cp.rx_fs.type = rx_fs->type;
- cp.rx_fs.max_sdu = cpu_to_le16(rx_fs->max_sdu);
- cp.rx_fs.sdu_arr_time = cpu_to_le32(rx_fs->sdu_arr_time);
- cp.rx_fs.acc_latency = cpu_to_le32(rx_fs->acc_latency);
- cp.rx_fs.flush_to = cpu_to_le32(rx_fs->flush_to);
- hci_conn_hold(chan->conn);
- hci_send_cmd(chan->conn->hdev, HCI_OP_FLOW_SPEC_MODIFY, sizeof(cp),
- &cp);
-}
-EXPORT_SYMBOL(hci_chan_modify);
-
/* Drop all connection on the device */
-void hci_conn_hash_flush(struct hci_dev *hdev, u8 is_process)
+void hci_conn_hash_flush(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
+ struct hci_conn *c, *n;
BT_DBG("hdev %s", hdev->name);
- p = h->list.next;
- while (p != &h->list) {
- struct hci_conn *c;
-
- c = list_entry(p, struct hci_conn, list);
- p = p->next;
-
+ list_for_each_entry_safe(c, n, &h->list, list) {
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, 0x16, is_process);
+ hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
hci_conn_del(c);
}
}
@@ -1261,32 +867,17 @@
conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
if (conn)
- hci_acl_connect(conn);
+ hci_acl_create_connection(conn);
hci_dev_unlock(hdev);
}
-void hci_conn_hold_device(struct hci_conn *conn)
-{
- atomic_inc(&conn->devref);
-}
-EXPORT_SYMBOL(hci_conn_hold_device);
-
-void hci_conn_put_device(struct hci_conn *conn)
-{
- if (atomic_dec_and_test(&conn->devref)) {
- conn->hidp_session_valid = false;
- hci_conn_del_sysfs(conn);
- }
-}
-EXPORT_SYMBOL(hci_conn_put_device);
-
int hci_get_conn_list(void __user *arg)
{
+ struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
- struct list_head *p;
int n = 0, size, err;
if (copy_from_user(&req, arg, sizeof(req)))
@@ -1309,11 +900,8 @@
ci = cl->conn_info;
- hci_dev_lock_bh(hdev);
- list_for_each(p, &hdev->conn_hash.list) {
- register struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
-
+ hci_dev_lock(hdev);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
@@ -1332,7 +920,7 @@
if (++n >= req.conn_num)
break;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
cl->dev_id = hdev->id;
cl->conn_num = n;
@@ -1356,7 +944,7 @@
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
if (conn) {
bacpy(&ci.bdaddr, &conn->dst);
@@ -1374,10 +962,8 @@
ci.cnt = hdev->acl_cnt;
ci.pkts = hdev->acl_pkts;
}
- ci.pending_sec_level = conn->pending_sec_level;
- ci.ssp_mode = conn->ssp_mode;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
@@ -1393,11 +979,11 @@
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
if (conn)
req.type = conn->auth_type;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
@@ -1405,38 +991,81 @@
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
-int hci_set_auth_info(struct hci_dev *hdev, void __user *arg)
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
{
- struct hci_auth_info_req req;
- struct hci_conn *conn;
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_chan *chan;
- if (copy_from_user(&req, arg, sizeof(req)))
- return -EFAULT;
+ BT_DBG("%s hcon %p", hdev->name, conn);
- hci_dev_lock_bh(hdev);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
- if (conn) {
- conn->auth_type = req.type;
- switch (conn->auth_type) {
- case HCI_AT_NO_BONDING:
- conn->pending_sec_level = BT_SECURITY_LOW;
- break;
- case HCI_AT_DEDICATED_BONDING:
- case HCI_AT_GENERAL_BONDING:
- conn->pending_sec_level = BT_SECURITY_MEDIUM;
- break;
- case HCI_AT_DEDICATED_BONDING_MITM:
- case HCI_AT_GENERAL_BONDING_MITM:
- conn->pending_sec_level = BT_SECURITY_HIGH;
- break;
- default:
- break;
- }
+ chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
+ if (!chan)
+ return NULL;
+
+ chan->conn = conn;
+ skb_queue_head_init(&chan->data_q);
+ chan->state = BT_CONNECTED;
+
+ list_add_rcu(&chan->list, &conn->chan_list);
+
+ return chan;
+}
+
+void hci_chan_del(struct hci_chan *chan)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
+
+ list_del_rcu(&chan->list);
+
+ synchronize_rcu();
+
+ hci_conn_drop(conn);
+
+ skb_queue_purge(&chan->data_q);
+ kfree(chan);
+}
+
+void hci_chan_list_flush(struct hci_conn *conn)
+{
+ struct hci_chan *chan, *n;
+
+ BT_DBG("hcon %p", conn);
+
+ list_for_each_entry_safe(chan, n, &conn->chan_list, list)
+ hci_chan_del(chan);
+}
+
+static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
+ __u16 handle)
+{
+ struct hci_chan *hchan;
+
+ list_for_each_entry(hchan, &hcon->chan_list, list) {
+ if (hchan->handle == handle)
+ return hchan;
}
- hci_dev_unlock_bh(hdev);
- if (!conn)
- return -ENOENT;
+ return NULL;
+}
- return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *hcon;
+ struct hci_chan *hchan = NULL;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(hcon, &h->list, list) {
+ hchan = __hci_chan_lookup_handle(hcon, handle);
+ if (hchan)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return hchan;
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d369e4c..7f13a9f 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010-2012 The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -24,43 +25,17 @@
/* Bluetooth HCI core. */
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
+#include <linux/export.h>
+#include <linux/idr.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
#include <linux/rfkill.h>
-#include <linux/timer.h>
-#include <linux/crypto.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define AUTO_OFF_TIMEOUT 2000
-
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
-
-static DEFINE_RWLOCK(hci_task_lock);
-
-static bool enable_smp = 1;
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
/* HCI device list */
LIST_HEAD(hci_dev_list);
@@ -70,45 +45,21 @@
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
-/* AMP Manager event callbacks */
-LIST_HEAD(amp_mgr_cb_list);
-DEFINE_RWLOCK(amp_mgr_cb_list_lock);
-
-/* HCI protocols */
-#define HCI_MAX_PROTO 2
-struct hci_proto *hci_proto[HCI_MAX_PROTO];
-
-/* HCI notifiers list */
-static ATOMIC_NOTIFIER_HEAD(hci_notifier);
+/* HCI ID Numbering */
+static DEFINE_IDA(hci_index_ida);
/* ---- HCI notifications ---- */
-int hci_register_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&hci_notifier, nb);
-}
-
-int hci_unregister_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&hci_notifier, nb);
-}
-
static void hci_notify(struct hci_dev *hdev, int event)
{
- atomic_notifier_call_chain(&hci_notifier, event, hdev);
+ hci_sock_dev_event(hdev, event);
}
/* ---- HCI requests ---- */
-void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
{
- BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
-
- /* If this is the init phase check if the completed command matches
- * the last init command, and if not just return.
- */
- if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
- return;
+ BT_DBG("%s result 0x%2.2x", hdev->name, result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -128,21 +79,158 @@
}
}
-/* Execute request and wait for completion. */
-static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
+ u8 event)
{
+ struct hci_ev_cmd_complete *ev;
+ struct hci_event_hdr *hdr;
+ struct sk_buff *skb;
+
+ hci_dev_lock(hdev);
+
+ skb = hdev->recv_evt;
+ hdev->recv_evt = NULL;
+
+ hci_dev_unlock(hdev);
+
+ if (!skb)
+ return ERR_PTR(-ENODATA);
+
+ if (skb->len < sizeof(*hdr)) {
+ BT_ERR("Too short HCI event");
+ goto failed;
+ }
+
+ hdr = (void *) skb->data;
+ skb_pull(skb, HCI_EVENT_HDR_SIZE);
+
+ if (event) {
+ if (hdr->evt != event)
+ goto failed;
+ return skb;
+ }
+
+ if (hdr->evt != HCI_EV_CMD_COMPLETE) {
+ BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
+ goto failed;
+ }
+
+ if (skb->len < sizeof(*ev)) {
+ BT_ERR("Too short cmd_complete event");
+ goto failed;
+ }
+
+ ev = (void *) skb->data;
+ skb_pull(skb, sizeof(*ev));
+
+ if (opcode == __le16_to_cpu(ev->opcode))
+ return skb;
+
+ BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
+ __le16_to_cpu(ev->opcode));
+
+failed:
+ kfree_skb(skb);
+ return ERR_PTR(-ENODATA);
+}
+
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct hci_request req;
+ int err = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add_ev(&req, opcode, plen, param, event);
+
+ hdev->req_status = HCI_REQ_PEND;
+
+ err = hci_req_run(&req, hci_req_sync_complete);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ add_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ schedule_timeout(timeout);
+
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+
+ if (signal_pending(current))
+ return ERR_PTR(-EINTR);
+
+ switch (hdev->req_status) {
+ case HCI_REQ_DONE:
+ err = -bt_to_errno(hdev->req_result);
+ break;
+
+ case HCI_REQ_CANCELED:
+ err = -hdev->req_result;
+ break;
+
+ default:
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ hdev->req_status = hdev->req_result = 0;
+
+ BT_DBG("%s end: err %d", hdev->name, err);
+
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return hci_get_cmd_complete(hdev, opcode, event);
+}
+EXPORT_SYMBOL(__hci_cmd_sync_ev);
+
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
+}
+EXPORT_SYMBOL(__hci_cmd_sync);
+
+/* Execute request and wait for completion. */
+static int __hci_req_sync(struct hci_dev *hdev,
+ void (*func)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout)
+{
+ struct hci_request req;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
BT_DBG("%s start", hdev->name);
+ hci_req_init(&req, hdev);
+
hdev->req_status = HCI_REQ_PEND;
+ func(&req, opt);
+
+ err = hci_req_run(&req, hci_req_sync_complete);
+ if (err < 0) {
+ hdev->req_status = 0;
+
+ /* ENODATA means the HCI request command queue is empty.
+ * This can happen when a request with conditionals doesn't
+ * trigger any commands to be sent. This is normal behavior
+ * and should not trigger an error return.
+ */
+ if (err == -ENODATA)
+ return 0;
+
+ return err;
+ }
+
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- req(hdev, opt);
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
@@ -152,7 +240,7 @@
switch (hdev->req_status) {
case HCI_REQ_DONE:
- err = -bt_err(hdev->req_result);
+ err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
@@ -171,8 +259,10 @@
return err;
}
-static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int hci_req_sync(struct hci_dev *hdev,
+ void (*req)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int ret;
@@ -181,184 +271,427 @@
/* Serialize all requests */
hci_req_lock(hdev);
- ret = __hci_request(hdev, req, opt, timeout);
+ ret = __hci_req_sync(hdev, req, opt, timeout);
hci_req_unlock(hdev);
return ret;
}
-static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_reset_req(struct hci_request *req, unsigned long opt)
{
- BT_DBG("%s %ld", hdev->name, opt);
+ BT_DBG("%s %ld", req->hdev->name, opt);
/* Reset device */
- set_bit(HCI_RESET, &hdev->flags);
- memset(&hdev->features, 0, sizeof(hdev->features));
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+ set_bit(HCI_RESET, &req->hdev->flags);
+ hci_req_add(req, HCI_OP_RESET, 0, NULL);
}
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void bredr_init(struct hci_request *req)
{
- struct hci_cp_delete_stored_link_key cp;
- struct sk_buff *skb;
+ req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
+
+ /* Read Local Supported Features */
+ hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
+ /* Read Local Version */
+ hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Read BD Address */
+ hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
+}
+
+static void amp_init(struct hci_request *req)
+{
+ req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+
+ /* Read Local Version */
+ hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Read Local AMP Info */
+ hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+
+ /* Read Data Blk size */
+ hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+}
+
+static void hci_init1_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ BT_DBG("%s %ld", hdev->name, opt);
+
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
+ hci_reset_req(req, 0);
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ bredr_init(req);
+ break;
+
+ case HCI_AMP:
+ amp_init(req);
+ break;
+
+ default:
+ BT_ERR("Unknown device type %d", hdev->dev_type);
+ break;
+ }
+}
+
+static void bredr_setup(struct hci_request *req)
+{
__le16 param;
__u8 flt_type;
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Driver initialization */
-
- /* Special commands */
- while ((skb = skb_dequeue(&hdev->driver_init))) {
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- skb->dev = (void *) hdev;
-
- skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
- }
- skb_queue_purge(&hdev->driver_init);
-
- /* Mandatory initialization */
-
- /* Reset */
- if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
- set_bit(HCI_RESET, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
- }
-
- /* Read Local Version */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
-
- /* Set default HCI Flow Control Mode */
- if (hdev->dev_type == HCI_BREDR)
- hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
- else
- hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
-
- /* Read HCI Flow Control Mode */
- hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
-
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
- hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
- /* Read Data Block Size (ACL mtu, max pkt, etc.) */
- hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+ /* Read Class of Device */
+ hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-#if 0
- /* Host buffer size */
- {
- struct hci_cp_host_buffer_size cp;
- cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
- cp.sco_mtu = HCI_MAX_SCO_SIZE;
- cp.acl_max_pkt = cpu_to_le16(0xffff);
- cp.sco_max_pkt = cpu_to_le16(0xffff);
- hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
+ /* Read Local Name */
+ hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+
+ /* Read Voice Setting */
+ hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+
+ /* Clear Event Filters */
+ flt_type = HCI_FLT_CLEAR_ALL;
+ hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+
+ /* Connection accept timeout ~20 secs */
+ param = __constant_cpu_to_le16(0x7d00);
+ hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+
+ /* Read page scan parameters */
+ if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
}
-#endif
+}
- if (hdev->dev_type == HCI_BREDR) {
- /* BR-EDR initialization */
+static void le_setup(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
- /* Read Local Supported Features */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+ /* Read LE Buffer Size */
+ hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
- /* Read BD Address */
- hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
+ /* Read LE Local Supported Features */
+ hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
- /* Read Class of Device */
- hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+ /* Read LE Advertising Channel TX Power */
+ hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
- /* Read Local Name */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+ /* Read LE White List Size */
+ hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
- /* Read Voice Setting */
- hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+ /* Read LE Supported States */
+ hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
- /* Optional initialization */
- /* Clear Event Filters */
- flt_type = HCI_FLT_CLEAR_ALL;
- hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+ /* LE-only controllers have LE implicitly enabled */
+ if (!lmp_bredr_capable(hdev))
+ set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+}
- /* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
- hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+ if (lmp_ext_inq_capable(hdev))
+ return 0x02;
+
+ if (lmp_inq_rssi_capable(hdev))
+ return 0x01;
+
+ if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+ hdev->lmp_subver == 0x0757)
+ return 0x01;
+
+ if (hdev->manufacturer == 15) {
+ if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+ return 0x01;
+ if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+ return 0x01;
+ if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+ return 0x01;
+ }
+
+ if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+ hdev->lmp_subver == 0x1805)
+ return 0x01;
+
+ return 0x00;
+}
+
+static void hci_setup_inquiry_mode(struct hci_request *req)
+{
+ u8 mode;
+
+ mode = hci_get_inquiry_mode(req->hdev);
+
+ hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* The second byte is 0xff instead of 0x9f (two reserved bits
+ * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+ * command otherwise.
+ */
+ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+ * any event mask for pre 1.2 devices.
+ */
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return;
+
+ if (lmp_bredr_capable(hdev)) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
+
+ if (lmp_inq_rssi_capable(hdev))
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+
+ if (lmp_sniffsubr_capable(hdev))
+ events[5] |= 0x20; /* Sniff Subrating */
+
+ if (lmp_pause_enc_capable(hdev))
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+ if (lmp_ext_inq_capable(hdev))
+ events[5] |= 0x40; /* Extended Inquiry Result */
+
+ if (lmp_no_flush_capable(hdev))
+ events[7] |= 0x01; /* Enhanced Flush Complete */
+
+ if (lmp_lsto_capable(hdev))
+ events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+ if (lmp_ssp_capable(hdev)) {
+ events[6] |= 0x01; /* IO Capability Request */
+ events[6] |= 0x02; /* IO Capability Response */
+ events[6] |= 0x04; /* User Confirmation Request */
+ events[6] |= 0x08; /* User Passkey Request */
+ events[6] |= 0x10; /* Remote OOB Data Request */
+ events[6] |= 0x20; /* Simple Pairing Complete */
+ events[7] |= 0x04; /* User Passkey Notification */
+ events[7] |= 0x08; /* Keypress Notification */
+ events[7] |= 0x10; /* Remote Host Supported
+ * Features Notification
+ */
+ }
+
+ if (lmp_le_capable(hdev))
+ events[7] |= 0x20; /* LE Meta-Event */
+
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+
+ if (lmp_le_capable(hdev)) {
+ memset(events, 0, sizeof(events));
+ events[0] = 0x1f;
+ hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
+ sizeof(events), events);
+ }
+}
+
+static void hci_init2_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ if (lmp_bredr_capable(hdev))
+ bredr_setup(req);
+
+ if (lmp_le_capable(hdev))
+ le_setup(req);
+
+ hci_setup_event_mask(req);
+
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1)
+ hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+ if (lmp_ssp_capable(hdev)) {
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ u8 mode = 0x01;
+ hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode);
+ } else {
+ struct hci_cp_write_eir cp;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(&cp, 0, sizeof(cp));
+
+ hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+ }
+ }
+
+ if (lmp_inq_rssi_capable(hdev))
+ hci_setup_inquiry_mode(req);
+
+ if (lmp_inq_tx_pwr_capable(hdev))
+ hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+
+ if (lmp_ext_feat_capable(hdev)) {
+ struct hci_cp_read_local_ext_features cp;
+
+ cp.page = 0x01;
+ hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
+ sizeof(cp), &cp);
+ }
+
+ if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
+ u8 enable = 1;
+ hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
+ &enable);
+ }
+}
+
+static void hci_setup_link_policy(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_def_link_policy cp;
+ u16 link_policy = 0;
+
+ if (lmp_rswitch_capable(hdev))
+ link_policy |= HCI_LP_RSWITCH;
+ if (lmp_hold_capable(hdev))
+ link_policy |= HCI_LP_HOLD;
+ if (lmp_sniff_capable(hdev))
+ link_policy |= HCI_LP_SNIFF;
+ if (lmp_park_capable(hdev))
+ link_policy |= HCI_LP_PARK;
+
+ cp.policy = cpu_to_le16(link_policy);
+ hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
+}
+
+static void hci_set_le_support(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_le_host_supported cp;
+
+ /* LE-only devices do not support explicit enablement */
+ if (!lmp_bredr_capable(hdev))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ cp.le = 0x01;
+ cp.simul = lmp_le_br_capable(hdev);
+ }
+
+ if (cp.le != lmp_host_le_capable(hdev))
+ hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
+ &cp);
+}
+
+static void hci_init3_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 p;
+
+ /* Only send HCI_Delete_Stored_Link_Key if it is supported */
+ if (hdev->commands[6] & 0x80) {
+ struct hci_cp_delete_stored_link_key cp;
bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 1;
- hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
- sizeof(cp), &cp);
- } else {
- /* AMP initialization */
- /* Connection accept timeout ~5 secs */
- param = cpu_to_le16(0x1f40);
- hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+ cp.delete_all = 0x01;
+ hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp);
+ }
- /* Read AMP Info */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ if (hdev->commands[5] & 0x10)
+ hci_setup_link_policy(req);
+
+ if (lmp_le_capable(hdev)) {
+ hci_set_le_support(req);
+ hci_update_ad(req);
+ }
+
+ /* Read features beyond page 1 if available */
+ for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+ struct hci_cp_read_local_ext_features cp;
+
+ cp.page = p;
+ hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
+ sizeof(cp), &cp);
}
}
-static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
+static int __hci_init(struct hci_dev *hdev)
{
- BT_DBG("%s", hdev->name);
+ int err;
- /* Read LE buffer size */
- hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
+ err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
- /* Read LE clear white list */
- hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
+ /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
+ * BR/EDR/LE type controllers. AMP controllers only need the
+ * first stage init.
+ */
+ if (hdev->dev_type != HCI_BREDR)
+ return 0;
- /* Read LE white list size */
- hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
+ err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
}
-static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_scan_req(struct hci_request *req, unsigned long opt)
{
__u8 scan = opt;
- BT_DBG("%s %x", hdev->name, scan);
+ BT_DBG("%s %x", req->hdev->name, scan);
/* Inquiry and Page scans */
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
-static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_auth_req(struct hci_request *req, unsigned long opt)
{
__u8 auth = opt;
- BT_DBG("%s %x", hdev->name, auth);
+ BT_DBG("%s %x", req->hdev->name, auth);
/* Authentication */
- hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
+ hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
}
-static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
{
__u8 encrypt = opt;
- BT_DBG("%s %x", hdev->name, encrypt);
+ BT_DBG("%s %x", req->hdev->name, encrypt);
/* Encryption */
- hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
+ hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
}
-static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
{
__le16 policy = cpu_to_le16(opt);
- BT_DBG("%s %x", hdev->name, policy);
+ BT_DBG("%s %x", req->hdev->name, policy);
/* Default link policy */
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
+ hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
}
/* Get HCI device by index.
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%d", index);
@@ -366,8 +699,7 @@
return NULL;
read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
@@ -376,84 +708,223 @@
read_unlock(&hci_dev_list_lock);
return hdev;
}
-EXPORT_SYMBOL(hci_dev_get);
/* ---- Inquiry support ---- */
-static void inquiry_cache_flush(struct hci_dev *hdev)
+
+bool hci_discovery_active(struct hci_dev *hdev)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *next = cache->list, *e;
+ struct discovery_state *discov = &hdev->discovery;
- BT_DBG("cache %p", cache);
+ switch (discov->state) {
+ case DISCOVERY_FINDING:
+ case DISCOVERY_RESOLVING:
+ return true;
- cache->list = NULL;
- while ((e = next)) {
- next = e->next;
- kfree(e);
+ default:
+ return false;
}
}
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *e;
+ BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
- BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+ if (hdev->discovery.state == state)
+ return;
- for (e = cache->list; e; e = e->next)
- if (!bacmp(&e->data.bdaddr, bdaddr))
- break;
- return e;
+ switch (state) {
+ case DISCOVERY_STOPPED:
+ if (hdev->discovery.state != DISCOVERY_STARTING)
+ mgmt_discovering(hdev, 0);
+ break;
+ case DISCOVERY_STARTING:
+ break;
+ case DISCOVERY_FINDING:
+ mgmt_discovering(hdev, 1);
+ break;
+ case DISCOVERY_RESOLVING:
+ break;
+ case DISCOVERY_STOPPING:
+ break;
+ }
+
+ hdev->discovery.state = state;
}
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
+static void inquiry_cache_flush(struct hci_dev *hdev)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *p, *n;
+
+ list_for_each_entry_safe(p, n, &cache->all, all) {
+ list_del(&p->all);
+ kfree(p);
+ }
+
+ INIT_LIST_HEAD(&cache->unknown);
+ INIT_LIST_HEAD(&cache->resolve);
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p, %pMR", cache, bdaddr);
+
+ list_for_each_entry(e, &cache->all, all) {
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p, %pMR", cache, bdaddr);
+
+ list_for_each_entry(e, &cache->unknown, list) {
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
+
+ list_for_each_entry(e, &cache->resolve, list) {
+ if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
+ return e;
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct list_head *pos = &cache->resolve;
+ struct inquiry_entry *p;
+
+ list_del(&ie->list);
+
+ list_for_each_entry(p, &cache->resolve, list) {
+ if (p->name_state != NAME_PENDING &&
+ abs(p->data.rssi) >= abs(ie->data.rssi))
+ break;
+ pos = &p->list;
+ }
+
+ list_add(&ie->list, pos);
+}
+
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known, bool *ssp)
+{
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *ie;
- BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
+ BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
+
+ hci_remove_remote_oob_data(hdev, &data->bdaddr);
+
+ if (ssp)
+ *ssp = data->ssp_mode;
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
- if (!ie) {
- /* Entry not in the cache. Add new one. */
- ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
- if (!ie)
- return;
+ if (ie) {
+ if (ie->data.ssp_mode && ssp)
+ *ssp = true;
- ie->next = cache->list;
- cache->list = ie;
+ if (ie->name_state == NAME_NEEDED &&
+ data->rssi != ie->data.rssi) {
+ ie->data.rssi = data->rssi;
+ hci_inquiry_cache_update_resolve(hdev, ie);
+ }
+
+ goto update;
+ }
+
+ /* Entry not in the cache. Add new one. */
+ ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+ if (!ie)
+ return false;
+
+ list_add(&ie->all, &cache->all);
+
+ if (name_known) {
+ ie->name_state = NAME_KNOWN;
+ } else {
+ ie->name_state = NAME_NOT_KNOWN;
+ list_add(&ie->list, &cache->unknown);
+ }
+
+update:
+ if (name_known && ie->name_state != NAME_KNOWN &&
+ ie->name_state != NAME_PENDING) {
+ ie->name_state = NAME_KNOWN;
+ list_del(&ie->list);
}
memcpy(&ie->data, data, sizeof(*data));
ie->timestamp = jiffies;
cache->timestamp = jiffies;
+
+ if (ie->name_state == NAME_NOT_KNOWN)
+ return false;
+
+ return true;
}
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_info *info = (struct inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
- for (e = cache->list; e && copied < num; e = e->next, copied++) {
+ list_for_each_entry(e, &cache->all, all) {
struct inquiry_data *data = &e->data;
+
+ if (copied >= num)
+ break;
+
bacpy(&info->bdaddr, &data->bdaddr);
info->pscan_rep_mode = data->pscan_rep_mode;
info->pscan_period_mode = data->pscan_period_mode;
info->pscan_mode = data->pscan_mode;
memcpy(info->dev_class, data->dev_class, 3);
info->clock_offset = data->clock_offset;
+
info++;
+ copied++;
}
BT_DBG("cache %p, copied %d", cache, copied);
return copied;
}
-static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_inq_req(struct hci_request *req, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
+ struct hci_dev *hdev = req->hdev;
struct hci_cp_inquiry cp;
BT_DBG("%s", hdev->name);
@@ -465,7 +936,13 @@
memcpy(&cp.lap, &ir->lap, 3);
cp.length = ir->length;
cp.num_rsp = ir->num_rsp;
- hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+static int wait_inquiry(void *word)
+{
+ schedule();
+ return signal_pending(current);
}
int hci_inquiry(void __user *arg)
@@ -484,24 +961,33 @@
if (!hdev)
return -ENODEV;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
- inquiry_cache_empty(hdev) ||
- ir.flags & IREQ_CACHE_FLUSH) {
+ inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
if (do_inquiry) {
- err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
+ err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
+ timeo);
if (err < 0)
goto done;
+
+ /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
+ * cleared). If it is interrupted by a signal, return -EINTR.
+ */
+ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
+ TASK_INTERRUPTIBLE))
+ return -EINTR;
}
- /* for unlimited number of responses we will use buffer with 255 entries */
+ /* for unlimited number of responses we will use buffer with
+ * 255 entries
+ */
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -513,16 +999,16 @@
goto done;
}
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
ptr += sizeof(ir);
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
- ir.num_rsp))
+ ir.num_rsp))
err = -EFAULT;
} else
err = -EFAULT;
@@ -534,6 +1020,89 @@
return err;
}
+static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
+{
+ u8 ad_len = 0, flags = 0;
+ size_t name_len;
+
+ if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
+ flags |= LE_AD_GENERAL;
+
+ if (!lmp_bredr_capable(hdev))
+ flags |= LE_AD_NO_BREDR;
+
+ if (lmp_le_br_capable(hdev))
+ flags |= LE_AD_SIM_LE_BREDR_CTRL;
+
+ if (lmp_host_le_br_capable(hdev))
+ flags |= LE_AD_SIM_LE_BREDR_HOST;
+
+ if (flags) {
+ BT_DBG("adv flags 0x%02x", flags);
+
+ ptr[0] = 2;
+ ptr[1] = EIR_FLAGS;
+ ptr[2] = flags;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->adv_tx_power;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+
+ name_len = strlen(hdev->dev_name);
+ if (name_len > 0) {
+ size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+
+ if (name_len > max_len) {
+ name_len = max_len;
+ ptr[1] = EIR_NAME_SHORT;
+ } else
+ ptr[1] = EIR_NAME_COMPLETE;
+
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ ad_len += (name_len + 2);
+ ptr += (name_len + 2);
+ }
+
+ return ad_len;
+}
+
+void hci_update_ad(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_adv_data cp;
+ u8 len;
+
+ if (!lmp_le_capable(hdev))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = create_ad(hdev, cp.data);
+
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+}
+
/* ---- HCI ioctl helpers ---- */
int hci_dev_open(__u16 dev)
@@ -549,7 +1118,16 @@
hci_req_lock(hdev);
- if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ /* Check for rfkill but allow the HCI setup stage to proceed
+ * (which in itself doesn't cause any RF activity).
+ */
+ if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
ret = -ERFKILL;
goto done;
}
@@ -559,57 +1137,48 @@
goto done;
}
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
- set_bit(HCI_RAW, &hdev->flags);
-
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
- if (!skb_queue_empty(&hdev->cmd_q)) {
- BT_ERR("command queue is not empty, purging");
- skb_queue_purge(&hdev->cmd_q);
- }
- if (!skb_queue_empty(&hdev->rx_q)) {
- BT_ERR("rx queue is not empty, purging");
- skb_queue_purge(&hdev->rx_q);
- }
- if (!skb_queue_empty(&hdev->raw_q)) {
- BT_ERR("raw queue is not empty, purging");
- skb_queue_purge(&hdev->raw_q);
+ atomic_set(&hdev->cmd_cnt, 1);
+ set_bit(HCI_INIT, &hdev->flags);
+
+ if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
+ ret = hdev->setup(hdev);
+
+ if (!ret) {
+ /* Treat all non BR/EDR controllers as raw devices if
+ * enable_hs is not set.
+ */
+ if (hdev->dev_type != HCI_BREDR && !enable_hs)
+ set_bit(HCI_RAW, &hdev->flags);
+
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ set_bit(HCI_RAW, &hdev->flags);
+
+ if (!test_bit(HCI_RAW, &hdev->flags))
+ ret = __hci_init(hdev);
}
- if (!test_bit(HCI_RAW, &hdev->flags)) {
- atomic_set(&hdev->cmd_cnt, 1);
- set_bit(HCI_INIT, &hdev->flags);
- hdev->init_last_cmd = 0;
-
- ret = __hci_request(hdev, hci_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
-
- if (lmp_le_capable(hdev))
- ret = __hci_request(hdev, hci_le_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
-
- clear_bit(HCI_INIT, &hdev->flags);
- }
+ clear_bit(HCI_INIT, &hdev->flags);
if (!ret) {
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->flags) &&
- hdev->dev_type == HCI_BREDR) {
- hci_dev_lock_bh(hdev);
- mgmt_powered(hdev->id, 1);
- hci_dev_unlock_bh(hdev);
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+ mgmt_valid_hdev(hdev)) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 1);
+ hci_dev_unlock(hdev);
}
} else {
/* Init failed, cleanup */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
- tasklet_kill(&hdev->cmd_task);
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->cmd_work);
+ flush_work(&hdev->rx_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
@@ -632,12 +1201,14 @@
return ret;
}
-static int hci_dev_do_close(struct hci_dev *hdev, u8 is_process)
+static int hci_dev_do_close(struct hci_dev *hdev)
{
- unsigned long keepflags = 0;
-
BT_DBG("%s %p", hdev->name, hdev);
+ cancel_work_sync(&hdev->le_scan);
+
+ cancel_delayed_work(&hdev->power_off);
+
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
@@ -647,38 +1218,43 @@
return 0;
}
- /* Kill RX and TX tasks */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
- hci_dev_lock_bh(hdev);
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->service_cache);
+
+ cancel_delayed_work_sync(&hdev->le_scan_disable);
+
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
- hci_conn_hash_flush(hdev, is_process);
- hci_dev_unlock_bh(hdev);
+ hci_conn_hash_flush(hdev);
+ hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
- if (hdev->dev_type == HCI_BREDR) {
- hci_dev_lock_bh(hdev);
- mgmt_powered(hdev->id, 0);
- hci_dev_unlock_bh(hdev);
- }
-
if (hdev->flush)
hdev->flush(hdev);
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
- if (!test_bit(HCI_RAW, &hdev->flags)) {
+ if (!test_bit(HCI_RAW, &hdev->flags) &&
+ test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
- __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(250));
+ __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
- /* Kill cmd task */
- tasklet_kill(&hdev->cmd_task);
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -692,19 +1268,29 @@
hdev->sent_cmd = NULL;
}
+ kfree_skb(hdev->recv_evt);
+ hdev->recv_evt = NULL;
+
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
- /* Clear only non-persistent flags */
- if (test_bit(HCI_MGMT, &hdev->flags))
- set_bit(HCI_MGMT, &keepflags);
- if (test_bit(HCI_LINK_KEYS, &hdev->flags))
- set_bit(HCI_LINK_KEYS, &keepflags);
- if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
- set_bit(HCI_DEBUG_KEYS, &keepflags);
+ /* Clear flags */
+ hdev->flags = 0;
+ hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- hdev->flags = keepflags;
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+ mgmt_valid_hdev(hdev)) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
+ }
+
+ /* Controller radio is available but is currently powered down */
+ hdev->amp_status = 0;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
hci_req_unlock(hdev);
@@ -720,7 +1306,12 @@
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
- err = hci_dev_do_close(hdev, 1);
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
+
+ err = hci_dev_do_close(hdev);
+
hci_dev_put(hdev);
return err;
}
@@ -735,7 +1326,6 @@
return -ENODEV;
hci_req_lock(hdev);
- tasklet_disable(&hdev->tx_task);
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
@@ -744,10 +1334,10 @@
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
- hci_conn_hash_flush(hdev, 0);
- hci_dev_unlock_bh(hdev);
+ hci_conn_hash_flush(hdev);
+ hci_dev_unlock(hdev);
if (hdev->flush)
hdev->flush(hdev);
@@ -756,11 +1346,9 @@
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
- ret = __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
done:
- tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
@@ -797,8 +1385,8 @@
switch (cmd) {
case HCISETAUTH:
- err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETENCRYPT:
@@ -809,24 +1397,24 @@
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
- err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
if (err)
break;
}
- err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETSCAN:
- err = hci_request(hdev, hci_scan_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKPOL:
- err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKMODE:
@@ -859,9 +1447,9 @@
int hci_get_dev_list(void __user *arg)
{
+ struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
@@ -879,16 +1467,13 @@
dr = dl->dev_req;
- read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *hdev;
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
- hdev = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(hdev);
-
- if (!test_bit(HCI_MGMT, &hdev->flags))
- set_bit(HCI_PAIRABLE, &hdev->flags);
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
@@ -896,7 +1481,7 @@
if (++n >= dev_num)
break;
}
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
@@ -920,20 +1505,28 @@
if (!hdev)
return -ENODEV;
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work_sync(&hdev->power_off);
- if (!test_bit(HCI_MGMT, &hdev->flags))
- set_bit(HCI_PAIRABLE, &hdev->flags);
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
di.flags = hdev->flags;
di.pkt_type = hdev->pkt_type;
- di.acl_mtu = hdev->acl_mtu;
- di.acl_pkts = hdev->acl_pkts;
- di.sco_mtu = hdev->sco_mtu;
- di.sco_pkts = hdev->sco_pkts;
+ if (lmp_bredr_capable(hdev)) {
+ di.acl_mtu = hdev->acl_mtu;
+ di.acl_pkts = hdev->acl_pkts;
+ di.sco_mtu = hdev->sco_mtu;
+ di.sco_pkts = hdev->sco_pkts;
+ } else {
+ di.acl_mtu = hdev->le_mtu;
+ di.acl_pkts = hdev->le_pkts;
+ di.sco_mtu = 0;
+ di.sco_pkts = 0;
+ }
di.link_policy = hdev->link_policy;
di.link_mode = hdev->link_mode;
@@ -956,10 +1549,13 @@
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
- if (!blocked)
- return 0;
-
- hci_dev_do_close(hdev, 0);
+ if (blocked) {
+ set_bit(HCI_RFKILLED, &hdev->dev_flags);
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+ hci_dev_do_close(hdev);
+ } else {
+ clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+}
return 0;
}
@@ -968,31 +1564,6 @@
.set_block = hci_rfkill_set_block,
};
-/* Alloc HCI device */
-struct hci_dev *hci_alloc_dev(void)
-{
- struct hci_dev *hdev;
-
- hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
- if (!hdev)
- return NULL;
-
- skb_queue_head_init(&hdev->driver_init);
-
- return hdev;
-}
-EXPORT_SYMBOL(hci_alloc_dev);
-
-/* Free HCI device */
-void hci_free_dev(struct hci_dev *hdev)
-{
- skb_queue_purge(&hdev->driver_init);
-
- /* will free via device release */
- put_device(&hdev->dev);
-}
-EXPORT_SYMBOL(hci_free_dev);
-
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -1001,57 +1572,57 @@
BT_DBG("%s", hdev->name);
err = hci_dev_open(hdev->id);
- if (err && err != -EALREADY)
+ if (err < 0) {
+ mgmt_set_powered_failed(hdev, err);
return;
+ }
- if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
- hdev->dev_type == HCI_BREDR)
- mod_timer(&hdev->off_timer,
- jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+ clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ hci_dev_do_close(hdev);
+ } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+ HCI_AUTO_OFF_TIMEOUT);
+ }
- if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
- hdev->dev_type == HCI_BREDR)
- mgmt_index_added(hdev->id);
+ if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
+ mgmt_index_added(hdev);
}
static void hci_power_off(struct work_struct *work)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ power_off.work);
BT_DBG("%s", hdev->name);
- hci_dev_close(hdev->id);
+ hci_dev_do_close(hdev);
}
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) data;
+ struct hci_dev *hdev;
+ u8 scan = SCAN_PAGE;
+
+ hdev = container_of(work, struct hci_dev, discov_off.work);
BT_DBG("%s", hdev->name);
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ hci_dev_lock(hdev);
- queue_work(hdev->workqueue, &hdev->power_off);
-}
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-void hci_del_off_timer(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
+ hdev->discov_timeout = 0;
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
- del_timer(&hdev->off_timer);
+ hci_dev_unlock(hdev);
}
int hci_uuids_clear(struct hci_dev *hdev)
{
- struct list_head *p, *n;
+ struct bt_uuid *uuid, *tmp;
- list_for_each_safe(p, n, &hdev->uuids) {
- struct bt_uuid *uuid;
-
- uuid = list_entry(p, struct bt_uuid, list);
-
- list_del(p);
+ list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
+ list_del(&uuid->list);
kfree(uuid);
}
@@ -1074,158 +1645,179 @@
return 0;
}
+int hci_smp_ltks_clear(struct hci_dev *hdev)
+{
+ struct smp_ltk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ list_del(&k->list);
+ kfree(k);
+ }
+
+ return 0;
+}
+
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct list_head *p;
+ struct link_key *k;
- list_for_each(p, &hdev->link_keys) {
- struct link_key *k;
-
- k = list_entry(p, struct link_key, list);
-
+ list_for_each_entry(k, &hdev->link_keys, list)
if (bacmp(bdaddr, &k->bdaddr) == 0)
return k;
- }
return NULL;
}
-struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
+static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 key_type, u8 old_key_type)
{
- struct list_head *p;
+ /* Legacy key */
+ if (key_type < 0x03)
+ return true;
- list_for_each(p, &hdev->link_keys) {
- struct link_key *k;
- struct key_master_id *id;
+ /* Debug keys are insecure so don't store them persistently */
+ if (key_type == HCI_LK_DEBUG_COMBINATION)
+ return false;
- k = list_entry(p, struct link_key, list);
+ /* Changed combination key and there's no previous one */
+ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
+ return false;
- if (k->key_type != KEY_TYPE_LTK)
+ /* Security mode 3 case */
+ if (!conn)
+ return true;
+
+ /* Neither local nor remote side had no-bonding as requirement */
+ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
+ return true;
+
+ /* Local side had dedicated bonding as requirement */
+ if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
+ return true;
+
+ /* Remote side had dedicated bonding as requirement */
+ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
+ return true;
+
+ /* If none of the above criteria match, then don't store the key
+ * persistently */
+ return false;
+}
+
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
+{
+ struct smp_ltk *k;
+
+ list_for_each_entry(k, &hdev->long_term_keys, list) {
+ if (k->ediv != ediv ||
+ memcmp(rand, k->rand, sizeof(k->rand)))
continue;
- if (k->dlen != sizeof(*id))
- continue;
-
- id = (void *) &k->data;
- if (id->ediv == ediv &&
- (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
- return k;
+ return k;
}
return NULL;
}
-EXPORT_SYMBOL(hci_find_ltk);
-struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
- bdaddr_t *bdaddr, u8 type)
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type)
{
- struct list_head *p;
+ struct smp_ltk *k;
- list_for_each(p, &hdev->link_keys) {
- struct link_key *k;
-
- k = list_entry(p, struct link_key, list);
-
- if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
+ list_for_each_entry(k, &hdev->long_term_keys, list)
+ if (addr_type == k->bdaddr_type &&
+ bacmp(bdaddr, &k->bdaddr) == 0)
return k;
- }
return NULL;
}
-EXPORT_SYMBOL(hci_find_link_key_type);
-int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
- u8 *val, u8 type, u8 pin_len)
+int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
+ bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
- struct hci_conn *conn;
u8 old_key_type;
- u8 bonded = 0;
+ bool persistent;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
- old_key_type = old_key->key_type;
+ old_key_type = old_key->type;
key = old_key;
} else {
- old_key_type = 0xff;
+ old_key_type = conn ? conn->key_type : 0xff;
key = kzalloc(sizeof(*key), GFP_ATOMIC);
if (!key)
return -ENOMEM;
list_add(&key->list, &hdev->link_keys);
}
- BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+ BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
- bacpy(&key->bdaddr, bdaddr);
- memcpy(key->val, val, 16);
- key->auth = 0x01;
- key->key_type = type;
- key->pin_len = pin_len;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
- /* Store the link key persistently if one of the following is true:
- * 1. the remote side is using dedicated bonding since in that case
- * also the local requirements are set to dedicated bonding
- * 2. the local side had dedicated bonding as a requirement
- * 3. this is a legacy link key
- * 4. this is a changed combination key and there was a previously
- * stored one
- * If none of the above match only keep the link key around for
- * this connection and set the temporary flag for the device.
- */
-
- if (conn) {
- if ((conn->remote_auth > 0x01) ||
- (conn->auth_initiator && conn->auth_type > 0x01) ||
- (key->key_type < 0x03) ||
- (key->key_type == 0x06 && old_key_type != 0xff))
- bonded = 1;
+ /* Some buggy controller combinations generate a changed
+ * combination key for legacy pairing even when there's no
+ * previous key */
+ if (type == HCI_LK_CHANGED_COMBINATION &&
+ (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
+ type = HCI_LK_COMBINATION;
+ if (conn)
+ conn->key_type = type;
}
- if (new_key)
- mgmt_new_key(hdev->id, key, bonded);
+ bacpy(&key->bdaddr, bdaddr);
+ memcpy(key->val, val, HCI_LINK_KEY_SIZE);
+ key->pin_len = pin_len;
- if (type == 0x06)
- key->key_type = old_key_type;
+ if (type == HCI_LK_CHANGED_COMBINATION)
+ key->type = old_key_type;
+ else
+ key->type = type;
+
+ if (!new_key)
+ return 0;
+
+ persistent = hci_persistent_key(hdev, conn, type, old_key_type);
+
+ mgmt_new_link_key(hdev, key, persistent);
+
+ if (conn)
+ conn->flush_key = !persistent;
return 0;
}
-int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
- u8 addr_type, u8 key_size, u8 auth,
- __le16 ediv, u8 rand[8], u8 ltk[16])
+int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
+ ediv, u8 rand[8])
{
- struct link_key *key, *old_key;
- struct key_master_id *id;
+ struct smp_ltk *key, *old_key;
- BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
- batostr(bdaddr), addr_type);
+ if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
+ return 0;
- old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
- if (old_key) {
+ old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
+ if (old_key)
key = old_key;
- } else {
- key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
+ else {
+ key = kzalloc(sizeof(*key), GFP_ATOMIC);
if (!key)
return -ENOMEM;
- list_add(&key->list, &hdev->link_keys);
+ list_add(&key->list, &hdev->long_term_keys);
}
- key->dlen = sizeof(*id);
-
bacpy(&key->bdaddr, bdaddr);
- key->addr_type = addr_type;
- memcpy(key->val, ltk, sizeof(key->val));
- key->key_type = KEY_TYPE_LTK;
- key->pin_len = key_size;
- key->auth = auth;
+ key->bdaddr_type = addr_type;
+ memcpy(key->val, tk, sizeof(key->val));
+ key->authenticated = authenticated;
+ key->ediv = ediv;
+ key->enc_size = enc_size;
+ key->type = type;
+ memcpy(key->rand, rand, sizeof(key->rand));
- id = (void *) &key->data;
- id->ediv = ediv;
- memcpy(id->rand, rand, sizeof(id->rand));
+ if (!new_key)
+ return 0;
- if (new_key)
- mgmt_new_key(hdev->id, key, auth & 0x01);
+ if (type & HCI_SMP_LTK)
+ mgmt_new_ltk(hdev, key, 1);
return 0;
}
@@ -1238,7 +1830,7 @@
if (!key)
return -ENOENT;
- BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
list_del(&key->list);
kfree(key);
@@ -1246,19 +1838,43 @@
return 0;
}
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct smp_ltk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr))
+ continue;
+
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+ list_del(&k->list);
+ kfree(k);
+ }
+
+ return 0;
+}
+
/* HCI command timer function */
-static void hci_cmd_timer(unsigned long arg)
+static void hci_cmd_timeout(unsigned long arg)
{
struct hci_dev *hdev = (void *) arg;
- BT_ERR("%s command tx timeout", hdev->name);
+ if (hdev->sent_cmd) {
+ struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
+
+ BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
+ } else {
+ BT_ERR("%s command tx timeout", hdev->name);
+ }
+
atomic_set(&hdev->cmd_cnt, 1);
- clear_bit(HCI_RESET, &hdev->flags);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
+ bdaddr_t *bdaddr)
{
struct oob_data *data;
@@ -1277,7 +1893,7 @@
if (!data)
return -ENOENT;
- BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
list_del(&data->list);
kfree(data);
@@ -1297,67 +1913,8 @@
return 0;
}
-static void hci_adv_clear(unsigned long arg)
-{
- struct hci_dev *hdev = (void *) arg;
-
- hci_adv_entries_clear(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct list_head *p, *n;
-
- BT_DBG("");
- write_lock_bh(&hdev->adv_entries_lock);
-
- list_for_each_safe(p, n, &hdev->adv_entries) {
- struct adv_entry *entry;
-
- entry = list_entry(p, struct adv_entry, list);
-
- list_del(p);
- kfree(entry);
- }
-
- write_unlock_bh(&hdev->adv_entries_lock);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct list_head *p;
- struct adv_entry *res = NULL;
-
- BT_DBG("");
- read_lock_bh(&hdev->adv_entries_lock);
-
- list_for_each(p, &hdev->adv_entries) {
- struct adv_entry *entry;
-
- entry = list_entry(p, struct adv_entry, list);
-
- if (bacmp(bdaddr, &entry->bdaddr) == 0) {
- res = entry;
- goto out;
- }
- }
-out:
- read_unlock_bh(&hdev->adv_entries_lock);
- return res;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
- u8 *randomizer)
+ u8 *randomizer)
{
struct oob_data *data;
@@ -1375,165 +1932,319 @@
memcpy(data->hash, hash, sizeof(data->hash));
memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
- BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
+ BT_DBG("%s for %pMR", hdev->name, bdaddr);
return 0;
}
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct adv_entry *entry;
- u8 flags = 0;
- int i;
+ struct bdaddr_list *b;
- BT_DBG("");
+ list_for_each_entry(b, &hdev->blacklist, list)
+ if (bacmp(bdaddr, &b->bdaddr) == 0)
+ return b;
- if (!is_connectable_adv(ev->evt_type))
- return -EINVAL;
+ return NULL;
+}
- if (ev->data && ev->length) {
- for (i = 0; (i + 2) < ev->length; i++)
- if (ev->data[i+1] == 0x01) {
- flags = ev->data[i+2];
- BT_DBG("flags: %2.2x", flags);
- break;
- } else {
- i += ev->data[i];
- }
+int hci_blacklist_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->blacklist) {
+ struct bdaddr_list *b;
+
+ b = list_entry(p, struct bdaddr_list, list);
+
+ list_del(p);
+ kfree(b);
}
- entry = hci_find_adv_entry(hdev, &ev->bdaddr);
- /* Only new entries should be added to adv_entries. So, if
- * bdaddr was found, don't add it. */
- if (entry) {
- entry->flags = flags;
- return 0;
- }
+ return 0;
+}
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *entry;
+
+ if (bacmp(bdaddr, BDADDR_ANY) == 0)
+ return -EBADF;
+
+ if (hci_blacklist_lookup(hdev, bdaddr))
+ return -EEXIST;
+
+ entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- bacpy(&entry->bdaddr, &ev->bdaddr);
- entry->bdaddr_type = ev->bdaddr_type;
- entry->flags = flags;
+ bacpy(&entry->bdaddr, bdaddr);
- write_lock(&hdev->adv_entries_lock);
- list_add(&entry->list, &hdev->adv_entries);
- write_unlock(&hdev->adv_entries_lock);
+ list_add(&entry->list, &hdev->blacklist);
+
+ return mgmt_device_blocked(hdev, bdaddr, type);
+}
+
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *entry;
+
+ if (bacmp(bdaddr, BDADDR_ANY) == 0)
+ return hci_blacklist_clear(hdev);
+
+ entry = hci_blacklist_lookup(hdev, bdaddr);
+ if (!entry)
+ return -ENOENT;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ return mgmt_device_unblocked(hdev, bdaddr, type);
+}
+
+static void le_scan_param_req(struct hci_request *req, unsigned long opt)
+{
+ struct le_scan_params *param = (struct le_scan_params *) opt;
+ struct hci_cp_le_set_scan_param cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.type = param->type;
+ cp.interval = cpu_to_le16(param->interval);
+ cp.window = cpu_to_le16(param->window);
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
+}
+
+static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_cp_le_set_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_ENABLE;
+ cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
+ u16 window, int timeout)
+{
+ long timeo = msecs_to_jiffies(3000);
+ struct le_scan_params param;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EINPROGRESS;
+
+ param.type = type;
+ param.interval = interval;
+ param.window = window;
+
+ hci_req_lock(hdev);
+
+ err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
+ timeo);
+ if (!err)
+ err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
+
+ hci_req_unlock(hdev);
+
+ if (err < 0)
+ return err;
+
+ queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
+ timeout);
return 0;
}
-static struct crypto_blkcipher *alloc_cypher(void)
+int hci_cancel_le_scan(struct hci_dev *hdev)
{
- if (enable_smp)
- return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ BT_DBG("%s", hdev->name);
- return ERR_PTR(-ENOTSUPP);
-}
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EALREADY;
-/* Register HCI device */
-int hci_register_dev(struct hci_dev *hdev)
-{
- struct list_head *head = &hci_dev_list, *p;
- int i, id;
+ if (cancel_delayed_work(&hdev->le_scan_disable)) {
+ struct hci_cp_le_set_scan_enable cp;
- BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
- hdev->bus, hdev->owner);
-
- if (!hdev->open || !hdev->close || !hdev->destruct)
- return -EINVAL;
-
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
-
- write_lock_bh(&hci_dev_list_lock);
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- if (list_entry(p, struct hci_dev, list)->id != id)
- break;
- head = p; id++;
+ /* Send HCI command to disable LE Scan */
+ memset(&cp, 0, sizeof(cp));
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
- snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
- hdev->id = id;
- list_add(&hdev->list, head);
+ return 0;
+}
- atomic_set(&hdev->refcnt, 1);
- spin_lock_init(&hdev->lock);
+static void le_scan_disable_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_disable.work);
+ struct hci_cp_le_set_scan_enable cp;
- hdev->flags = 0;
+ BT_DBG("%s", hdev->name);
+
+ memset(&cp, 0, sizeof(cp));
+
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static void le_scan_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
+ struct le_scan_params *param = &hdev->le_scan_params;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_do_le_scan(hdev, param->type, param->interval, param->window,
+ param->timeout);
+}
+
+int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
+ int timeout)
+{
+ struct le_scan_params *param = &hdev->le_scan_params;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
+ return -ENOTSUPP;
+
+ if (work_busy(&hdev->le_scan))
+ return -EINPROGRESS;
+
+ param->type = type;
+ param->interval = interval;
+ param->window = window;
+ param->timeout = timeout;
+
+ queue_work(system_long_wq, &hdev->le_scan);
+
+ return 0;
+}
+
+/* Alloc HCI device */
+struct hci_dev *hci_alloc_dev(void)
+{
+ struct hci_dev *hdev;
+
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
+
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
+ hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+ hdev->adv_tx_power = HCI_TX_POWER_INVALID;
- hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
- tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
- tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+ INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
+ INIT_LIST_HEAD(&hdev->remote_oob_data);
+ INIT_LIST_HEAD(&hdev->conn_hash.list);
+
+ INIT_WORK(&hdev->rx_work, hci_rx_work);
+ INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+ INIT_WORK(&hdev->tx_work, hci_tx_work);
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->le_scan, le_scan_work);
+
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
- setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
- (unsigned long) hdev);
- setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
- (unsigned long) hdev);
-
- for (i = 0; i < NUM_REASSEMBLY; i++)
- hdev->reassembly[i] = NULL;
-
init_waitqueue_head(&hdev->req_wait_q);
- mutex_init(&hdev->req_lock);
- inquiry_cache_init(hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
- hci_conn_hash_init(hdev);
- hci_chan_list_init(hdev);
+ hci_init_sysfs(hdev);
+ discovery_init(hdev);
- INIT_LIST_HEAD(&hdev->blacklist);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_alloc_dev);
- INIT_LIST_HEAD(&hdev->uuids);
+/* Free HCI device */
+void hci_free_dev(struct hci_dev *hdev)
+{
+ /* will free via device release */
+ put_device(&hdev->dev);
+}
+EXPORT_SYMBOL(hci_free_dev);
- INIT_LIST_HEAD(&hdev->link_keys);
+/* Register HCI device */
+int hci_register_dev(struct hci_dev *hdev)
+{
+ int id, error;
- INIT_LIST_HEAD(&hdev->remote_oob_data);
+ if (!hdev->open || !hdev->close)
+ return -EINVAL;
- INIT_LIST_HEAD(&hdev->adv_entries);
- rwlock_init(&hdev->adv_entries_lock);
- setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
+ break;
+ case HCI_AMP:
+ id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
+ break;
+ default:
+ return -EINVAL;
+ }
- INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_WORK(&hdev->power_off, hci_power_off);
- setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+ if (id < 0)
+ return id;
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+ snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
+ hdev->id = id;
- atomic_set(&hdev->promisc, 0);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_unlock_bh(&hci_dev_list_lock);
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
+ write_unlock(&hci_dev_list_lock);
- hdev->workqueue = create_singlethread_workqueue(hdev->name);
- if (!hdev->workqueue)
- goto nomem;
+ hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!hdev->workqueue) {
+ error = -ENOMEM;
+ goto err;
+ }
- hdev->tfm = alloc_cypher();
- if (IS_ERR(hdev->tfm))
- BT_INFO("Failed to load transform for ecb(aes): %ld",
- PTR_ERR(hdev->tfm));
+ hdev->req_workqueue = alloc_workqueue(hdev->name,
+ WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!hdev->req_workqueue) {
+ destroy_workqueue(hdev->workqueue);
+ error = -ENOMEM;
+ goto err;
+ }
- hci_register_sysfs(hdev);
+ error = hci_add_sysfs(hdev);
+ if (error < 0)
+ goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
- RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
+ RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
+ hdev);
if (hdev->rfkill) {
if (rfkill_register(hdev->rfkill) < 0) {
rfkill_destroy(hdev->rfkill);
@@ -1541,49 +2252,66 @@
}
}
- set_bit(HCI_AUTO_OFF, &hdev->flags);
- set_bit(HCI_SETUP, &hdev->flags);
- queue_work(hdev->workqueue, &hdev->power_on);
+ if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
+ set_bit(HCI_RFKILLED, &hdev->dev_flags);
+
+ set_bit(HCI_SETUP, &hdev->dev_flags);
+
+ if (hdev->dev_type != HCI_AMP)
+ set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
hci_notify(hdev, HCI_DEV_REG);
+ hci_dev_hold(hdev);
+
+ queue_work(hdev->req_workqueue, &hdev->power_on);
return id;
-nomem:
- write_lock_bh(&hci_dev_list_lock);
+err_wqueue:
+ destroy_workqueue(hdev->workqueue);
+ destroy_workqueue(hdev->req_workqueue);
+err:
+ ida_simple_remove(&hci_index_ida, hdev->id);
+ write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
- return -ENOMEM;
+ return error;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
{
- int i;
+ int i, id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_lock_bh(&hci_dev_list_lock);
- list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ set_bit(HCI_UNREGISTER, &hdev->dev_flags);
- hci_dev_do_close(hdev, hdev->bus == HCI_SMD);
+ id = hdev->id;
+
+ write_lock(&hci_dev_list_lock);
+ list_del(&hdev->list);
+ write_unlock(&hci_dev_list_lock);
+
+ hci_dev_do_close(hdev);
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
+ cancel_work_sync(&hdev->power_on);
+
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->flags) &&
- hdev->dev_type == HCI_BREDR) {
- hci_dev_lock_bh(hdev);
- mgmt_index_removed(hdev->id);
- hci_dev_unlock_bh(hdev);
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ hci_dev_lock(hdev);
+ mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
}
- if (!IS_ERR(hdev->tfm))
- crypto_free_blkcipher(hdev->tfm);
+ /* mgmt_index_removed should take care of emptying the
+ * pending list */
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
hci_notify(hdev, HCI_DEV_UNREG);
@@ -1592,28 +2320,22 @@
rfkill_destroy(hdev->rfkill);
}
- hci_unregister_sysfs(hdev);
-
- /* Disable all timers */
- hci_del_off_timer(hdev);
- del_timer(&hdev->adv_timer);
- del_timer(&hdev->cmd_timer);
- del_timer(&hdev->disco_timer);
- del_timer(&hdev->disco_le_timer);
+ hci_del_sysfs(hdev);
destroy_workqueue(hdev->workqueue);
+ destroy_workqueue(hdev->req_workqueue);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
+ hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
- hci_adv_entries_clear(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
- __hci_dev_put(hdev);
+ hci_dev_put(hdev);
- return 0;
+ ida_simple_remove(&hci_index_ida, id);
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1638,27 +2360,26 @@
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
- && !test_bit(HCI_INIT, &hdev->flags))) {
+ && !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
- /* Incomming skb */
+ /* Incoming skb */
bt_cb(skb)->incoming = 1;
/* Time stamp */
__net_timestamp(skb);
- /* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
- tasklet_schedule(&hdev->rx_task);
+ queue_work(hdev->workqueue, &hdev->rx_work);
return 0;
}
EXPORT_SYMBOL(hci_recv_frame);
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
- int count, __u8 index)
+ int count, __u8 index)
{
int len = 0;
int hlen = 0;
@@ -1667,7 +2388,7 @@
struct bt_skb_cb *scb;
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
- index >= NUM_REASSEMBLY)
+ index >= NUM_REASSEMBLY)
return -EILSEQ;
skb = hdev->reassembly[index];
@@ -1702,7 +2423,7 @@
while (count) {
scb = (void *) skb->cb;
- len = min(scb->expect, (__u16)count);
+ len = min_t(uint, scb->expect, count);
memcpy(skb_put(skb, len), data, len);
@@ -1780,7 +2501,7 @@
data += (count - rem);
count = rem;
- };
+ }
return rem;
}
@@ -1809,13 +2530,13 @@
type = bt_cb(skb)->pkt_type;
rem = hci_reassembly(hdev, type, data, count,
- STREAM_REASSEMBLY);
+ STREAM_REASSEMBLY);
if (rem < 0)
return rem;
data += (count - rem);
count = rem;
- };
+ }
return rem;
}
@@ -1823,59 +2544,13 @@
/* ---- Interface to upper protocols ---- */
-/* Register/Unregister protocols.
- * hci_task_lock is used to ensure that no tasks are running. */
-int hci_register_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (!hci_proto[hp->id])
- hci_proto[hp->id] = hp;
- else
- err = -EEXIST;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_register_proto);
-
-int hci_unregister_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (hci_proto[hp->id])
- hci_proto[hp->id] = NULL;
- else
- err = -ENOENT;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_unregister_proto);
-
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_add(&cb->list, &hci_cb_list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
@@ -1885,82 +2560,14 @@
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_del(&cb->list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);
-int hci_register_amp(struct amp_mgr_cb *cb)
-{
- BT_DBG("%p", cb);
-
- write_lock_bh(&_mgr_cb_list_lock);
- list_add(&cb->list, &_mgr_cb_list);
- write_unlock_bh(&_mgr_cb_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(hci_register_amp);
-
-int hci_unregister_amp(struct amp_mgr_cb *cb)
-{
- BT_DBG("%p", cb);
-
- write_lock_bh(&_mgr_cb_list_lock);
- list_del(&cb->list);
- write_unlock_bh(&_mgr_cb_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(hci_unregister_amp);
-
-void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
- struct sk_buff *skb)
-{
- struct amp_mgr_cb *cb;
-
- BT_DBG("opcode 0x%x", opcode);
-
- read_lock_bh(&_mgr_cb_list_lock);
- list_for_each_entry(cb, &_mgr_cb_list, list) {
- if (cb->amp_cmd_complete_event)
- cb->amp_cmd_complete_event(hdev, opcode, skb);
- }
- read_unlock_bh(&_mgr_cb_list_lock);
-}
-
-void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
-{
- struct amp_mgr_cb *cb;
-
- BT_DBG("opcode 0x%x, status %d", opcode, status);
-
- read_lock_bh(&_mgr_cb_list_lock);
- list_for_each_entry(cb, &_mgr_cb_list, list) {
- if (cb->amp_cmd_status_event)
- cb->amp_cmd_status_event(hdev, opcode, status);
- }
- read_unlock_bh(&_mgr_cb_list_lock);
-}
-
-void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
- struct sk_buff *skb)
-{
- struct amp_mgr_cb *cb;
-
- BT_DBG("ev_code 0x%x", ev_code);
-
- read_lock_bh(&_mgr_cb_list_lock);
- list_for_each_entry(cb, &_mgr_cb_list, list) {
- if (cb->amp_event)
- cb->amp_event(hdev, ev_code, skb);
- }
- read_unlock_bh(&_mgr_cb_list_lock);
-}
-
static int hci_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
@@ -1972,34 +2579,72 @@
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
- if (atomic_read(&hdev->promisc)) {
- /* Time stamp */
- __net_timestamp(skb);
+ /* Time stamp */
+ __net_timestamp(skb);
- hci_send_to_sock(hdev, skb, NULL);
+ /* Send copy to monitor */
+ hci_send_to_monitor(hdev, skb);
+
+ if (atomic_read(&hdev->promisc)) {
+ /* Send copy to the sockets */
+ hci_send_to_sock(hdev, skb);
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
- hci_notify(hdev, HCI_DEV_WRITE);
return hdev->send(skb);
}
-/* Send HCI command */
-int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
+{
+ skb_queue_head_init(&req->cmd_q);
+ req->hdev = hdev;
+ req->err = 0;
+}
+
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+
+ /* If an error occured during request building, remove all HCI
+ * commands queued on the HCI request queue.
+ */
+ if (req->err) {
+ skb_queue_purge(&req->cmd_q);
+ return req->err;
+ }
+
+ /* Do not allow empty requests */
+ if (skb_queue_empty(&req->cmd_q))
+ return -ENODATA;
+
+ skb = skb_peek_tail(&req->cmd_q);
+ bt_cb(skb)->req.complete = complete;
+
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+
+ return 0;
+}
+
+static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
+ u32 plen, const void *param)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
struct sk_buff *skb;
- BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
-
skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb) {
- BT_ERR("%s no memory for command", hdev->name);
- return -ENOMEM;
- }
+ if (!skb)
+ return NULL;
hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
@@ -2013,15 +2658,70 @@
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
- if (test_bit(HCI_INIT, &hdev->flags))
- hdev->init_last_cmd = opcode;
+ return skb;
+}
+
+/* Send HCI command */
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
+ const void *param)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+ skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ if (!skb) {
+ BT_ERR("%s no memory for command", hdev->name);
+ return -ENOMEM;
+ }
+
+ /* Stand-alone HCI commands must be flaged as
+ * single-command requests.
+ */
+ bt_cb(skb)->req.start = true;
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
-EXPORT_SYMBOL(hci_send_cmd);
+
+/* Queue a command to an asynchronous HCI request */
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+ /* If an error occured during request building, there is no point in
+ * queueing the HCI command. We can simply return.
+ */
+ if (req->err)
+ return;
+
+ skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ if (!skb) {
+ BT_ERR("%s no memory for command (opcode 0x%4.4x)",
+ hdev->name, opcode);
+ req->err = -ENOMEM;
+ return;
+ }
+
+ if (skb_queue_empty(&req->cmd_q))
+ bt_cb(skb)->req.start = true;
+
+ bt_cb(skb)->req.event = event;
+
+ skb_queue_tail(&req->cmd_q, skb);
+}
+
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param)
+{
+ hci_req_add_ev(req, opcode, plen, param, 0);
+}
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
@@ -2036,7 +2736,7 @@
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
@@ -2054,27 +2754,36 @@
hdr->dlen = cpu_to_le16(len);
}
-void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
- struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
+ struct sk_buff *skb, __u16 flags)
{
+ struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
- BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
+ skb->len = skb_headlen(skb);
+ skb->data_len = 0;
- skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- if (hdev->dev_type == HCI_BREDR)
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
hci_add_acl_hdr(skb, conn->handle, flags);
- else
- hci_add_acl_hdr(skb, chan->ll_handle, flags);
+ break;
+ case HCI_AMP:
+ hci_add_acl_hdr(skb, chan->handle, flags);
+ break;
+ default:
+ BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+ return;
+ }
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
- skb_queue_tail(&conn->data_q, skb);
+ skb_queue_tail(queue, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -2082,10 +2791,11 @@
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock_bh(&conn->data_q.lock);
+ spin_lock(&queue->lock);
- __skb_queue_tail(&conn->data_q, skb);
- flags &= ~ACL_PB_MASK;
+ __skb_queue_tail(queue, skb);
+
+ flags &= ~ACL_START;
flags |= ACL_CONT;
do {
skb = list; list = list->next;
@@ -2096,15 +2806,25 @@
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
} while (list);
- spin_unlock_bh(&conn->data_q.lock);
+ spin_unlock(&queue->lock);
}
-
- tasklet_schedule(&hdev->tx_task);
}
-EXPORT_SYMBOL(hci_send_acl);
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+ struct hci_dev *hdev = chan->conn->hdev;
+
+ BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
+
+ skb->dev = (void *) hdev;
+
+ hci_queue_acl(chan, &chan->data_q, skb, flags);
+
+ queue_work(hdev->workqueue, &hdev->tx_work);
+}
/* Send SCO data */
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2125,75 +2845,25 @@
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
-/* HCI ACL Connection scheduler */
-static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev,
- int *quote)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL;
- int num = 0, min = ~0, conn_num = 0;
- struct list_head *p;
-
- /* We don't have to lock device here. Connections are always
- * added and removed with TX task disabled. */
- list_for_each(p, &h->list) {
- struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
- if (c->type == ACL_LINK)
- conn_num++;
-
- if (skb_queue_empty(&c->data_q))
- continue;
-
- if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
- continue;
-
- num++;
-
- if (c->sent < min) {
- min = c->sent;
- conn = c;
- }
- }
-
- if (conn) {
- int cnt, q;
- cnt = hdev->acl_cnt;
- q = cnt / num;
- *quote = q ? q : 1;
- } else
- *quote = 0;
-
- if ((*quote == hdev->acl_cnt) &&
- (conn->sent == (hdev->acl_pkts - 1)) &&
- (conn_num > 1)) {
- *quote = 0;
- conn = NULL;
- }
-
- BT_DBG("conn %p quote %d", conn, *quote);
- return conn;
-}
/* HCI Connection scheduler */
-static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
+static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL;
- int num = 0, min = ~0;
- struct list_head *p;
+ struct hci_conn *conn = NULL, *c;
+ unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
- list_for_each(p, &h->list) {
- struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
@@ -2206,8 +2876,13 @@
min = c->sent;
conn = c;
}
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
}
+ rcu_read_unlock();
+
if (conn) {
int cnt, q;
@@ -2236,76 +2911,293 @@
return conn;
}
-static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
+static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
- struct hci_conn *c;
+ struct hci_conn *c;
BT_ERR("%s link tx timeout", hdev->name);
+ rcu_read_lock();
+
/* Kill stalled connections */
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
- BT_ERR("%s killing stalled connection %s",
- hdev->name, batostr(&c->dst));
- hci_acl_disconn(c, 0x13);
+ BT_ERR("%s killing stalled connection %pMR",
+ hdev->name, &c->dst);
+ hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
}
}
+
+ rcu_read_unlock();
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_chan *chan = NULL;
+ unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
- struct sk_buff *skb;
- int quote;
+ int cnt, q, conn_num = 0;
BT_DBG("%s", hdev->name);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *tmp;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ conn_num++;
+
+ list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&tmp->data_q))
+ continue;
+
+ skb = skb_peek(&tmp->data_q);
+ if (skb->priority < cur_prio)
+ continue;
+
+ if (skb->priority > cur_prio) {
+ num = 0;
+ min = ~0;
+ cur_prio = skb->priority;
+ }
+
+ num++;
+
+ if (conn->sent < min) {
+ min = conn->sent;
+ chan = tmp;
+ }
+ }
+
+ if (hci_conn_num(hdev, type) == conn_num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ if (!chan)
+ return NULL;
+
+ switch (chan->conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case AMP_LINK:
+ cnt = hdev->block_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
+ *quote = q ? q : 1;
+ BT_DBG("chan %p quote %d", chan, *quote);
+ return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn;
+ int num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *chan;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ num++;
+
+ list_for_each_entry_rcu(chan, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (chan->sent) {
+ chan->sent = 0;
+ continue;
+ }
+
+ if (skb_queue_empty(&chan->data_q))
+ continue;
+
+ skb = skb_peek(&chan->data_q);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ continue;
+
+ skb->priority = HCI_PRIO_MAX - 1;
+
+ BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+ skb->priority);
+ }
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+}
+
+static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Calculate count of blocks used by this packet */
+ return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+}
+
+static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+{
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
- if (hdev->acl_cnt <= 0 &&
- time_after(jiffies, hdev->acl_last_tx + HZ * 45))
+ if (!cnt && time_after(jiffies, hdev->acl_last_tx +
+ HCI_ACL_TX_TIMEOUT))
hci_link_tx_to(hdev, ACL_LINK);
}
+}
- while (hdev->acl_cnt > 0 &&
- ((conn = hci_low_sent_acl(hdev, "e)) != NULL)) {
+static void hci_sched_acl_pkt(struct hci_dev *hdev)
+{
+ unsigned int cnt = hdev->acl_cnt;
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+ int quote;
- while (quote > 0 &&
- (skb = skb_dequeue(&conn->data_q))) {
- int count = 1;
+ __check_timeout(hdev, cnt);
- BT_DBG("skb %p len %d", skb, skb->len);
+ while (hdev->acl_cnt &&
+ (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
- if (hdev->flow_ctl_mode ==
- HCI_BLOCK_BASED_FLOW_CTL_MODE)
- /* Calculate count of blocks used by
- * this packet
- */
- count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
- hdev->data_block_len) + 1;
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
- if (count > hdev->acl_cnt)
- return;
+ skb = skb_dequeue(&chan->data_q);
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
- hdev->acl_cnt -= count;
- quote -= count;
-
- conn->sent += count;
+ hdev->acl_cnt--;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
+ if (cnt != hdev->acl_cnt)
+ hci_prio_recalculate(hdev, ACL_LINK);
+}
+
+static void hci_sched_acl_blk(struct hci_dev *hdev)
+{
+ unsigned int cnt = hdev->block_cnt;
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+ int quote;
+ u8 type;
+
+ __check_timeout(hdev, cnt);
+
+ BT_DBG("%s", hdev->name);
+
+ if (hdev->dev_type == HCI_AMP)
+ type = AMP_LINK;
+ else
+ type = ACL_LINK;
+
+ while (hdev->block_cnt > 0 &&
+ (chan = hci_chan_sent(hdev, type, "e))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
+ int blocks;
+
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
+
+ blocks = __get_blocks(hdev, skb);
+ if (blocks > hdev->block_cnt)
+ return;
+
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
+
+ hci_send_frame(skb);
+ hdev->acl_last_tx = jiffies;
+
+ hdev->block_cnt -= blocks;
+ quote -= blocks;
+
+ chan->sent += blocks;
+ chan->conn->sent += blocks;
+ }
+ }
+
+ if (cnt != hdev->block_cnt)
+ hci_prio_recalculate(hdev, type);
+}
+
+static void hci_sched_acl(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ /* No ACL link over BR/EDR controller */
+ if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
+ return;
+
+ /* No AMP link over AMP controller */
+ if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
+ return;
+
+ switch (hdev->flow_ctl_mode) {
+ case HCI_FLOW_CTL_MODE_PACKET_BASED:
+ hci_sched_acl_pkt(hdev);
+ break;
+
+ case HCI_FLOW_CTL_MODE_BLOCK_BASED:
+ hci_sched_acl_blk(hdev);
+ break;
+ }
}
/* Schedule SCO */
-static inline void hci_sched_sco(struct hci_dev *hdev)
+static void hci_sched_sco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -2313,6 +3205,9 @@
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, SCO_LINK))
+ return;
+
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2325,7 +3220,7 @@
}
}
-static inline void hci_sched_esco(struct hci_dev *hdev)
+static void hci_sched_esco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -2333,7 +3228,11 @@
BT_DBG("%s", hdev->name);
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
+ if (!hci_conn_num(hdev, ESCO_LINK))
+ return;
+
+ while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
+ "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
@@ -2345,49 +3244,64 @@
}
}
-static inline void hci_sched_le(struct hci_dev *hdev)
+static void hci_sched_le(struct hci_dev *hdev)
{
- struct hci_conn *conn;
+ struct hci_chan *chan;
struct sk_buff *skb;
- int quote, cnt;
+ int quote, cnt, tmp;
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, LE_LINK))
+ return;
+
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
- time_after(jiffies, hdev->le_last_tx + HZ * 45))
+ time_after(jiffies, hdev->le_last_tx + HZ * 45))
hci_link_tx_to(hdev, LE_LINK);
}
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
- while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ tmp = cnt;
+ while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
hci_send_frame(skb);
hdev->le_last_tx = jiffies;
cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
if (hdev->le_pkts)
hdev->le_cnt = cnt;
else
hdev->acl_cnt = cnt;
+
+ if (cnt != tmp)
+ hci_prio_recalculate(hdev, LE_LINK);
}
-static void hci_tx_task(unsigned long arg)
+static void hci_tx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
struct sk_buff *skb;
- read_lock(&hci_task_lock);
-
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
- hdev->sco_cnt, hdev->le_cnt);
+ hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
@@ -2402,14 +3316,12 @@
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
-
- read_unlock(&hci_task_lock);
}
-/* ----- HCI RX task (incoming data proccessing) ----- */
+/* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */
-static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -2421,7 +3333,8 @@
flags = hci_flags(handle);
handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
+ BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
+ handle, flags);
hdev->stat.acl_rx++;
@@ -2430,26 +3343,21 @@
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->recv_acldata) {
- hp->recv_acldata(conn, skb, flags);
- return;
- }
+ l2cap_recv_acldata(conn, skb, flags);
+ return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
}
/* SCO data packet */
-static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -2459,7 +3367,7 @@
handle = __le16_to_cpu(hdr->handle);
- BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
+ BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
hdev->stat.sco_rx++;
@@ -2468,35 +3376,122 @@
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->recv_scodata) {
- hp->recv_scodata(conn, skb);
- return;
- }
+ sco_recv_scodata(conn, skb);
+ return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
}
-static void hci_rx_task(unsigned long arg)
+static bool hci_req_is_complete(struct hci_dev *hdev)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct sk_buff *skb;
+
+ skb = skb_peek(&hdev->cmd_q);
+ if (!skb)
+ return true;
+
+ return bt_cb(skb)->req.start;
+}
+
+static void hci_resend_last(struct hci_dev *hdev)
+{
+ struct hci_command_hdr *sent;
+ struct sk_buff *skb;
+ u16 opcode;
+
+ if (!hdev->sent_cmd)
+ return;
+
+ sent = (void *) hdev->sent_cmd->data;
+ opcode = __le16_to_cpu(sent->opcode);
+ if (opcode == HCI_OP_RESET)
+ return;
+
+ skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+}
+
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
+{
+ hci_req_complete_t req_complete = NULL;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
+
+ /* If the completed command doesn't match the last one that was
+ * sent we need to do special handling of it.
+ */
+ if (!hci_sent_cmd_data(hdev, opcode)) {
+ /* Some CSR based controllers generate a spontaneous
+ * reset complete event during init and any pending
+ * command will never be completed. In such a case we
+ * need to resend whatever was the last sent
+ * command.
+ */
+ if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
+ hci_resend_last(hdev);
+
+ return;
+ }
+
+ /* If the command succeeded and there's still more commands in
+ * this request the request is not yet complete.
+ */
+ if (!status && !hci_req_is_complete(hdev))
+ return;
+
+ /* If this was the last command in a request the complete
+ * callback would be found in hdev->sent_cmd instead of the
+ * command queue (hdev->cmd_q).
+ */
+ if (hdev->sent_cmd) {
+ req_complete = bt_cb(hdev->sent_cmd)->req.complete;
+ if (req_complete)
+ goto call_complete;
+ }
+
+ /* Remove all pending commands belonging to this request */
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ while ((skb = __skb_dequeue(&hdev->cmd_q))) {
+ if (bt_cb(skb)->req.start) {
+ __skb_queue_head(&hdev->cmd_q, skb);
+ break;
+ }
+
+ req_complete = bt_cb(skb)->req.complete;
+ kfree_skb(skb);
+ }
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+call_complete:
+ if (req_complete)
+ req_complete(hdev, status);
+}
+
+static void hci_rx_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
- read_lock(&hci_task_lock);
-
while ((skb = skb_dequeue(&hdev->rx_q))) {
+ /* Send copy to monitor */
+ hci_send_to_monitor(hdev, skb);
+
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
- hci_send_to_sock(hdev, skb, NULL);
+ hci_send_to_sock(hdev, skb);
}
if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -2517,6 +3512,7 @@
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
+ BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
break;
@@ -2535,16 +3531,15 @@
break;
}
}
-
- read_unlock(&hci_task_lock);
}
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
- BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
+ BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
+ atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
/* Send queued commands */
if (atomic_read(&hdev->cmd_cnt)) {
@@ -2558,14 +3553,56 @@
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
- mod_timer(&hdev->cmd_timer,
- jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
+ if (test_bit(HCI_RESET, &hdev->flags))
+ del_timer(&hdev->cmd_timer);
+ else
+ mod_timer(&hdev->cmd_timer,
+ jiffies + HCI_CMD_TIMEOUT);
} else {
skb_queue_head(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
}
-module_param(enable_smp, bool, 0644);
-MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EINPROGRESS;
+
+ inquiry_cache_flush(hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = length;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EALREADY;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
+
+u8 bdaddr_to_le(u8 bdaddr_type)
+{
+ switch (bdaddr_type) {
+ case BDADDR_LE_PUBLIC:
+ return ADDR_LE_DEV_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return ADDR_LE_DEV_RANDOM;
+ }
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
old mode 100644
new mode 100755
index 5318889..287ecb0
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010-2013 The Linux Foundation. All rights reserved.
+ Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -24,26 +24,13 @@
/* Bluetooth HCI event handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
/* Handle HCI Event packets */
@@ -51,58 +38,54 @@
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ clear_bit(HCI_INQUIRY, &hdev->flags);
+ smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ wake_up_bit(&hdev->flags, HCI_INQUIRY);
+
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+
+ hci_conn_check_pending(hdev);
+}
+
+static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
- clear_bit(HCI_INQUIRY, &hdev->flags);
-
- hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
-
- hci_conn_check_pending(hdev);
+ set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
}
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
- clear_bit(HCI_INQUIRY, &hdev->flags);
+ clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
hci_conn_check_pending(hdev);
}
-static void hci_cc_link_key_reply(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_rp_link_key_reply *rp = (void *) skb->data;
- struct hci_conn *conn;
- struct hci_cp_link_key_reply *cp;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
- if (rp->status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_LINK_KEY_REPLY);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (conn) {
- hci_conn_hold(conn);
- memcpy(conn->link_key, cp->link_key, sizeof(conn->link_key));
- conn->key_type = 5;
- hci_conn_put(conn);
- }
- hci_dev_unlock(hdev);
-}
-
-static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
@@ -112,7 +95,7 @@
struct hci_rp_role_discovery *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -135,7 +118,7 @@
struct hci_rp_read_link_policy *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -155,7 +138,7 @@
struct hci_conn *conn;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -173,11 +156,12 @@
hci_dev_unlock(hdev);
}
-static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -185,12 +169,13 @@
hdev->link_policy = __le16_to_cpu(rp->policy);
}
-static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
if (!sent)
@@ -198,19 +183,25 @@
if (!status)
hdev->link_policy = get_unaligned_le16(sent);
-
- hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
}
static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
clear_bit(HCI_RESET, &hdev->flags);
- hci_req_complete(hdev, HCI_OP_RESET, status);
+ /* Reset all non-persistent flags */
+ hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
+
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+ hdev->adv_tx_power = HCI_TX_POWER_INVALID;
+
+ memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
+ hdev->adv_data_len = 0;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -218,17 +209,19 @@
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
return;
+
hci_dev_lock(hdev);
- if (!status)
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_set_local_name_complete(hdev, sent, status);
+ else if (!status)
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_set_local_name_complete(hdev->id, sent, status);
hci_dev_unlock(hdev);
}
@@ -236,12 +229,13 @@
{
struct hci_rp_read_local_name *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
+ if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -249,7 +243,7 @@
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
if (!sent)
@@ -264,7 +258,8 @@
clear_bit(HCI_AUTH, &hdev->flags);
}
- hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_auth_enable_complete(hdev, status);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -272,7 +267,7 @@
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
if (!sent)
@@ -286,53 +281,61 @@
else
clear_bit(HCI_ENCRYPT, &hdev->flags);
}
-
- hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
}
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ __u8 param, status = *((__u8 *) skb->data);
+ int old_pscan, old_iscan;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
if (!sent)
return;
- if (!status) {
- __u8 param = *((__u8 *) sent);
- int old_pscan, old_iscan;
- hci_dev_lock(hdev);
+ param = *((__u8 *) sent);
- old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
- old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+ hci_dev_lock(hdev);
- if (param & SCAN_INQUIRY) {
- set_bit(HCI_ISCAN, &hdev->flags);
- if (!old_iscan)
- mgmt_discoverable(hdev->id, 1);
- } else if (old_iscan)
- mgmt_discoverable(hdev->id, 0);
-
- if (param & SCAN_PAGE) {
- set_bit(HCI_PSCAN, &hdev->flags);
- if (!old_pscan)
- mgmt_connectable(hdev->id, 1);
- } else if (old_pscan)
- mgmt_connectable(hdev->id, 0);
- hci_dev_unlock(hdev);
+ if (status) {
+ mgmt_write_scan_failed(hdev, param, status);
+ hdev->discov_timeout = 0;
+ goto done;
}
- hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+ if (param & SCAN_INQUIRY) {
+ set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev, 1);
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+ } else if (old_iscan)
+ mgmt_discoverable(hdev, 0);
+
+ if (param & SCAN_PAGE) {
+ set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev, 0);
+
+done:
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -340,7 +343,7 @@
memcpy(hdev->dev_class, rp->dev_class, 3);
BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -348,16 +351,21 @@
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- if (status)
- return;
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
return;
- memcpy(hdev->dev_class, sent, 3);
+ hci_dev_lock(hdev);
+
+ if (status == 0)
+ memcpy(hdev->dev_class, sent, 3);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_set_class_of_dev_complete(hdev, sent, status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -365,7 +373,7 @@
struct hci_rp_read_voice_setting *rp = (void *) skb->data;
__u16 setting;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -377,22 +385,20 @@
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
-static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_voice_setting(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
__u16 setting;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -408,177 +414,45 @@
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
-}
-
-static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
-{
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
-}
-
-static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- hci_req_complete(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, status);
-}
-
-static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
- if (rp->status)
- return;
-
- hdev->ssp_mode = rp->mode;
}
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- void *sent;
+ struct hci_cp_write_ssp_mode *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- if (status)
- return;
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
return;
- hdev->ssp_mode = *((__u8 *) sent);
-}
-
-static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
-{
- if (hdev->features[6] & LMP_EXT_INQ)
- return 2;
-
- if (hdev->features[3] & LMP_RSSI_INQ)
- return 1;
-
- if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
- hdev->lmp_subver == 0x0757)
- return 1;
-
- if (hdev->manufacturer == 15) {
- if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
- return 1;
- if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
- return 1;
- if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
- return 1;
+ if (!status) {
+ if (sent->mode)
+ hdev->features[1][0] |= LMP_HOST_SSP;
+ else
+ hdev->features[1][0] &= ~LMP_HOST_SSP;
}
- if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
- hdev->lmp_subver == 0x1805)
- return 1;
-
- return 0;
-}
-
-static void hci_setup_inquiry_mode(struct hci_dev *hdev)
-{
- u8 mode;
-
- mode = hci_get_inquiry_mode(hdev);
-
- hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
-}
-
-static void hci_setup_event_mask(struct hci_dev *hdev)
-{
- /* The second byte is 0xff instead of 0x9f (two reserved bits
- * disabled) since a Broadcom 1.2 dongle doesn't respond to the
- * command otherwise */
- u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
-
- BT_DBG("");
-
- /* Events for 1.2 and newer controllers */
- if (hdev->lmp_ver > 1) {
- events[4] |= 0x01; /* Flow Specification Complete */
- events[4] |= 0x02; /* Inquiry Result with RSSI */
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_ssp_enable_complete(hdev, sent->mode, status);
+ else if (!status) {
+ if (sent->mode)
+ set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ else
+ clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
}
-
- if (hdev->features[3] & LMP_RSSI_INQ)
- events[4] |= 0x04; /* Inquiry Result with RSSI */
-
- if (hdev->features[5] & LMP_SNIFF_SUBR)
- events[5] |= 0x20; /* Sniff Subrating */
-
- if (hdev->features[5] & LMP_PAUSE_ENC)
- events[5] |= 0x80; /* Encryption Key Refresh Complete */
-
- if (hdev->features[6] & LMP_EXT_INQ)
- events[5] |= 0x40; /* Extended Inquiry Result */
-
- if (hdev->features[6] & LMP_NO_FLUSH)
- events[7] |= 0x01; /* Enhanced Flush Complete */
-
- if (hdev->features[7] & LMP_LSTO)
- events[6] |= 0x80; /* Link Supervision Timeout Changed */
-
- if (hdev->features[6] & LMP_SIMPLE_PAIR) {
- events[6] |= 0x01; /* IO Capability Request */
- events[6] |= 0x02; /* IO Capability Response */
- events[6] |= 0x04; /* User Confirmation Request */
- events[6] |= 0x08; /* User Passkey Request */
- events[6] |= 0x10; /* Remote OOB Data Request */
- events[6] |= 0x20; /* Simple Pairing Complete */
- events[7] |= 0x04; /* User Passkey Notification */
- events[7] |= 0x08; /* Keypress Notification */
- events[7] |= 0x10; /* Remote Host Supported
- * Features Notification */
- }
-
- if (hdev->features[4] & LMP_LE)
- events[7] |= 0x20; /* LE Meta-Event */
-
- hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
-}
-
-static void hci_setup(struct hci_dev *hdev)
-{
- if (hdev->lmp_ver > 1)
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
-
- if (hdev->features[6] & LMP_SIMPLE_PAIR) {
- u8 mode = 0x01;
- hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
- }
-
- if (hdev->features[3] & LMP_RSSI_INQ)
- hci_setup_inquiry_mode(hdev);
-
- if (hdev->features[7] & LMP_INQ_TX_PWR)
- hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -589,183 +463,197 @@
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
- BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
- hdev->manufacturer,
- hdev->hci_ver, hdev->hci_rev);
-
- if (hdev->dev_type == HCI_BREDR && test_bit(HCI_INIT, &hdev->flags))
- hci_setup(hdev);
+ BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
+ hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
}
-static void hci_setup_link_policy(struct hci_dev *hdev)
-{
- u16 link_policy = 0;
-
- if (hdev->features[0] & LMP_RSWITCH)
- link_policy |= HCI_LP_RSWITCH;
- if (hdev->features[0] & LMP_HOLD)
- link_policy |= HCI_LP_HOLD;
- if (hdev->features[0] & LMP_SNIFF)
- link_policy |= HCI_LP_SNIFF;
- if (hdev->features[1] & LMP_PARK)
- link_policy |= HCI_LP_PARK;
-
- link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
- sizeof(link_policy), &link_policy);
-}
-
-static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_commands(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_commands *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- if (rp->status)
- goto done;
-
- memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
-
- if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
- hci_setup_link_policy(hdev);
-
-done:
- hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
+ if (!rp->status)
+ memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
-static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
memcpy(hdev->features, rp->features, 8);
- if (hdev->dev_type == HCI_BREDR && test_bit(HCI_INIT, &hdev->flags)) {
- if (hdev->features[6] & LMP_SIMPLE_PAIR) {
- u8 mode = 0x01;
- hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
- sizeof(mode), &mode);
- }
-
- if (hdev->features[3] & LMP_RSSI_INQ)
- hci_setup_inquiry_mode(hdev);
-
- if (hdev->features[7] & LMP_INQ_TX_PWR)
- hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
- 0, NULL);
-
- hci_setup_event_mask(hdev);
- }
-
/* Adjust default settings according to features
* supported by device. */
- if (hdev->features[0] & LMP_3SLOT)
+ if (hdev->features[0][0] & LMP_3SLOT)
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
- if (hdev->features[0] & LMP_5SLOT)
+ if (hdev->features[0][0] & LMP_5SLOT)
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
- if (hdev->features[1] & LMP_HV2) {
+ if (hdev->features[0][1] & LMP_HV2) {
hdev->pkt_type |= (HCI_HV2);
hdev->esco_type |= (ESCO_HV2);
}
- if (hdev->features[1] & LMP_HV3) {
+ if (hdev->features[0][1] & LMP_HV3) {
hdev->pkt_type |= (HCI_HV3);
hdev->esco_type |= (ESCO_HV3);
}
- if (hdev->features[3] & LMP_ESCO)
+ if (lmp_esco_capable(hdev))
hdev->esco_type |= (ESCO_EV3);
- if (hdev->features[4] & LMP_EV4)
+ if (hdev->features[0][4] & LMP_EV4)
hdev->esco_type |= (ESCO_EV4);
- if (hdev->features[4] & LMP_EV5)
+ if (hdev->features[0][4] & LMP_EV5)
hdev->esco_type |= (ESCO_EV5);
- if (hdev->features[5] & LMP_EDR_ESCO_2M)
+ if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
hdev->esco_type |= (ESCO_2EV3);
- if (hdev->features[5] & LMP_EDR_ESCO_3M)
+ if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
hdev->esco_type |= (ESCO_3EV3);
- if (hdev->features[5] & LMP_EDR_3S_ESCO)
+ if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ hdev->features[0][0], hdev->features[0][1],
+ hdev->features[0][2], hdev->features[0][3],
+ hdev->features[0][4], hdev->features[0][5],
+ hdev->features[0][6], hdev->features[0][7]);
}
-static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+ struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- hdev->flow_ctl_mode = rp->mode;
+ hdev->max_page = rp->max_page;
+
+ if (rp->page < HCI_MAX_PAGES)
+ memcpy(hdev->features[rp->page], rp->features, 8);
+}
+
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->flow_ctl_mode = rp->mode;
}
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- if (hdev->flow_ctl_mode == HCI_PACKET_BASED_FLOW_CTL_MODE) {
- hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
- hdev->sco_mtu = rp->sco_mtu;
- hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
- hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
- hdev->acl_cnt = hdev->acl_pkts;
- hdev->sco_cnt = hdev->sco_pkts;
- }
+ hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
+ hdev->sco_mtu = rp->sco_mtu;
+ hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
+ hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
hdev->sco_mtu = 64;
hdev->sco_pkts = 8;
}
+ hdev->acl_cnt = hdev->acl_pkts;
+ hdev->sco_cnt = hdev->sco_pkts;
- BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
- hdev->acl_mtu, hdev->acl_pkts,
- hdev->sco_mtu, hdev->sco_pkts);
+ BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
+ hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
}
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_bd_addr *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (!rp->status)
bacpy(&hdev->bdaddr, &rp->bdaddr);
-
- hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
}
-static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
+ if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
+ hdev->page_scan_interval = __le16_to_cpu(rp->interval);
+ hdev->page_scan_window = __le16_to_cpu(rp->window);
+ }
+}
+
+static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ u8 status = *((u8 *) skb->data);
+ struct hci_cp_write_page_scan_activity *sent;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
+ if (!sent)
+ return;
+
+ hdev->page_scan_interval = __le16_to_cpu(sent->interval);
+ hdev->page_scan_window = __le16_to_cpu(sent->window);
+}
+
+static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
+ hdev->page_scan_type = rp->type;
+}
+
+static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ u8 status = *((u8 *) skb->data);
+ u8 *type;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
+ if (type)
+ hdev->page_scan_type = *type;
}
static void hci_cc_read_data_block_size(struct hci_dev *hdev,
@@ -773,35 +661,30 @@
{
struct hci_rp_read_data_block_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- if (hdev->flow_ctl_mode == HCI_BLOCK_BASED_FLOW_CTL_MODE) {
- hdev->acl_mtu = __le16_to_cpu(rp->max_acl_len);
- hdev->sco_mtu = 0;
- hdev->data_block_len = __le16_to_cpu(rp->data_block_len);
- /* acl_pkts indicates the number of blocks */
- hdev->acl_pkts = __le16_to_cpu(rp->num_blocks);
- hdev->sco_pkts = 0;
- hdev->acl_cnt = hdev->acl_pkts;
- hdev->sco_cnt = 0;
- }
+ hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+ hdev->block_len = __le16_to_cpu(rp->block_len);
+ hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
- BT_DBG("%s acl mtu %d:%d, data block len %d", hdev->name,
- hdev->acl_mtu, hdev->acl_cnt, hdev->data_block_len);
+ hdev->block_cnt = hdev->num_blocks;
+
+ BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+ hdev->block_cnt, hdev->block_len);
}
static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
- return;
+ goto a2mp_rsp;
hdev->amp_status = rp->amp_status;
hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -814,55 +697,56 @@
hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
- hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+a2mp_rsp:
+ a2mp_send_getinfo_rsp(hdev);
}
-static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
+ struct amp_assoc *assoc = &hdev->loc_assoc;
+ size_t rem_len, frag_len;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
-}
+ if (rp->status)
+ goto a2mp_rsp;
-static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
-{
- __u8 status = *((__u8 *) skb->data);
+ frag_len = skb->len - sizeof(*rp);
+ rem_len = __le16_to_cpu(rp->rem_len);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ if (rem_len > frag_len) {
+ BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
- hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
-}
+ memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+ assoc->offset += frag_len;
-static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- __u8 status = *((__u8 *) skb->data);
+ /* Read other fragments */
+ amp_read_loc_assoc_frag(hdev, rp->phy_handle);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ return;
+ }
- hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
+ memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+ assoc->len = assoc->offset + rem_len;
+ assoc->offset = 0;
+
+a2mp_rsp:
+ /* Send A2MP Rsp when all fragments are received */
+ a2mp_send_getampassoc_rsp(hdev, rp->status);
+ a2mp_send_create_phy_link_req(hdev, rp->status);
}
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
-}
-
-static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
+ if (!rp->status)
+ hdev->inq_tx_power = rp->tx_power;
}
static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -871,13 +755,14 @@
struct hci_cp_pin_code_reply *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
- if (rp->status != 0)
+ if (rp->status)
goto unlock;
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
@@ -887,6 +772,7 @@
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (conn)
conn->pin_length = cp->pin_len;
+
unlock:
hci_dev_unlock(hdev);
}
@@ -895,20 +781,23 @@
{
struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
- rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
+ rp->status);
+
hci_dev_unlock(hdev);
}
+
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -919,151 +808,295 @@
hdev->le_cnt = hdev->le_pkts;
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
-
- hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
}
-static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_read_local_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
+ struct hci_rp_le_read_local_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- if (rp->status)
- return;
+ if (!rp->status)
+ memcpy(hdev->le_features, rp->features, 8);
+}
- hdev->le_white_list_size = rp->size;
+static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
- BT_DBG("%s le white list %d", hdev->name, hdev->le_white_list_size);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
+ if (!rp->status)
+ hdev->adv_tx_power = rp->tx_power;
}
static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
- rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
+ rp->status);
+
hci_dev_unlock(hdev);
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
- rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
+ ACL_LINK, 0, rp->status);
+
hci_dev_unlock(hdev);
}
-static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_conn *conn;
- struct hci_rp_read_rssi *rp = (void *) skb->data;
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- BT_DBG("%s rssi : %d handle : %d", hdev->name, rp->rssi, rp->handle);
+ hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
- if (conn)
- mgmt_read_rssi_complete(hdev->id, rp->rssi, &conn->dst,
- __le16_to_cpu(rp->handle), rp->status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
+ 0, rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
+ ACL_LINK, 0, rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
- hci_dev_lock(hdev);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
+ hci_dev_lock(hdev);
+ mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
rp->randomizer, rp->status);
hci_dev_unlock(hdev);
}
-static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
+ __u8 *sent, status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
- if (rp->status)
- return;
-
- hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
-}
-
-static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
-
- if (rp->status)
- return;
-
- hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
-}
-
-static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- void *sent;
- __u8 param_scan_enable;
- __u8 status = *((__u8 *) skb->data);
-
- if (status)
- return;
-
- sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
if (!sent)
return;
- param_scan_enable = *((__u8 *) sent);
- if (param_scan_enable == 0x01) {
- del_timer(&hdev->adv_timer);
- } else if (param_scan_enable == 0x00) {
- mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
+ hci_dev_lock(hdev);
+
+ if (!status) {
+ if (*sent)
+ set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
+ else
+ clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
}
+
+ if (!test_bit(HCI_INIT, &hdev->flags)) {
+ struct hci_request req;
+
+ hci_req_init(&req, hdev);
+ hci_update_ad(&req);
+ hci_req_run(&req, NULL);
+ }
+
+ hci_dev_unlock(hdev);
}
-static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
{
- BT_DBG("%s status 0x%x", hdev->name, status);
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status) {
- hci_req_complete(hdev, HCI_OP_INQUIRY, status);
-
- hci_conn_check_pending(hdev);
- } else {
- set_bit(HCI_INQUIRY, &hdev->flags);
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_inquiry_started(hdev->id);
+ mgmt_start_discovery_failed(hdev, status);
hci_dev_unlock(hdev);
+ return;
}
}
-static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_set_scan_enable *cp;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+ if (!cp)
+ return;
+
+ switch (cp->enable) {
+ case LE_SCAN_ENABLE:
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_dev_unlock(hdev);
+ break;
+
+ case LE_SCAN_DISABLE:
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ hdev->discovery.state == DISCOVERY_FINDING) {
+ mgmt_interleaved_discovery(hdev);
+ } else {
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+ }
+
+ break;
+
+ default:
+ BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
+ break;
+ }
+}
+
+static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
+
+ if (!rp->status)
+ hdev->le_white_list_size = rp->size;
+}
+
+static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ memcpy(hdev->le_states, rp->le_states, 8);
+}
+
+static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_write_le_host_supported *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
+ if (!sent)
+ return;
+
+ if (!status) {
+ if (sent->le)
+ hdev->features[1][0] |= LMP_HOST_LE;
+ else
+ hdev->features[1][0] &= ~LMP_HOST_LE;
+
+ if (sent->simul)
+ hdev->features[1][0] |= LMP_HOST_LE_BREDR;
+ else
+ hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_bit(HCI_INIT, &hdev->flags))
+ mgmt_le_enable_complete(hdev, sent->le, status);
+}
+
+static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+ hdev->name, rp->status, rp->phy_handle);
+
+ if (rp->status)
+ return;
+
+ amp_write_rem_assoc_continue(hdev, rp->phy_handle);
+}
+
+static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+{
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status) {
+ hci_conn_check_pending(hdev);
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ set_bit(HCI_INQUIRY, &hdev->flags);
+
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
if (!cp)
@@ -1073,7 +1106,7 @@
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
+ BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -1088,7 +1121,7 @@
if (!conn) {
conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
if (conn) {
- conn->out = 1;
+ conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
} else
BT_ERR("No memory for new connection");
@@ -1104,7 +1137,7 @@
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1115,7 +1148,7 @@
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
@@ -1138,7 +1171,10 @@
struct hci_cp_auth_requested *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (!status)
+ return;
cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
if (!cp)
@@ -1148,27 +1184,10 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- if (status) {
- mgmt_auth_failed(hdev->id, &conn->dst, status);
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
-
- if (conn->state == BT_CONFIG) {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
- } else {
- hci_auth_cfm(conn, status);
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_put(conn);
- }
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
- hci_encrypt_cfm(conn, status, 0x00);
- }
+ if (conn->state == BT_CONFIG) {
+ hci_proto_connect_cfm(conn, status);
+ hci_conn_drop(conn);
}
- conn->auth_initiator = 1;
}
hci_dev_unlock(hdev);
@@ -1179,7 +1198,7 @@
struct hci_cp_set_conn_encrypt *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1194,7 +1213,7 @@
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -1202,7 +1221,7 @@
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
@@ -1211,21 +1230,98 @@
return 0;
/* Only request authentication for SSP connections or non-SSP
- * devices with sec_level >= BT_SECURITY_MEDIUM*/
- BT_DBG("Pending sec level is %d", conn->pending_sec_level);
- if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
- conn->pending_sec_level < BT_SECURITY_MEDIUM)
+ * devices with sec_level HIGH or if MITM protection is requested */
+ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
+ conn->pending_sec_level != BT_SECURITY_HIGH)
return 0;
return 1;
}
+static int hci_resolve_name(struct hci_dev *hdev,
+ struct inquiry_entry *e)
+{
+ struct hci_cp_remote_name_req cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ cp.pscan_rep_mode = e->data.pscan_rep_mode;
+ cp.pscan_mode = e->data.pscan_mode;
+ cp.clock_offset = e->data.clock_offset;
+
+ return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
+}
+
+static bool hci_resolve_next_name(struct hci_dev *hdev)
+{
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ if (list_empty(&discov->resolve))
+ return false;
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (!e)
+ return false;
+
+ if (hci_resolve_name(hdev, e) == 0) {
+ e->name_state = NAME_PENDING;
+ return true;
+ }
+
+ return false;
+}
+
+static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
+ bdaddr_t *bdaddr, u8 *name, u8 name_len)
+{
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
+ name_len, conn->dev_class);
+
+ if (discov->state == DISCOVERY_STOPPED)
+ return;
+
+ if (discov->state == DISCOVERY_STOPPING)
+ goto discov_complete;
+
+ if (discov->state != DISCOVERY_RESOLVING)
+ return;
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
+ /* If the device was not found in a list of found devices names of which
+ * are pending. there is no need to continue resolving a next name as it
+ * will be done upon receiving another Remote Name Request Complete
+ * Event */
+ if (!e)
+ return;
+
+ list_del(&e->list);
+ if (name) {
+ e->name_state = NAME_KNOWN;
+ mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
+ e->data.rssi, name, name_len);
+ } else {
+ e->name_state = NAME_NOT_KNOWN;
+ }
+
+ if (hci_resolve_next_name(hdev))
+ return;
+
+discov_complete:
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+}
+
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_remote_name_req *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
/* If successful wait for the name req complete event before
* checking for the need to do authentication */
@@ -1239,12 +1335,23 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (conn && hci_outgoing_auth_needed(hdev, conn)) {
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
+
+ if (!conn)
+ goto unlock;
+
+ if (!hci_outgoing_auth_needed(hdev, conn))
+ goto unlock;
+
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -1253,7 +1360,7 @@
struct hci_cp_read_remote_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1268,7 +1375,7 @@
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -1280,7 +1387,7 @@
struct hci_cp_read_remote_ext_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1295,7 +1402,7 @@
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -1308,7 +1415,7 @@
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1319,7 +1426,7 @@
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
@@ -1342,7 +1449,7 @@
struct hci_cp_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1355,9 +1462,9 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, status);
}
@@ -1369,7 +1476,7 @@
struct hci_cp_exit_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1382,236 +1489,147 @@
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, status);
}
hci_dev_unlock(hdev);
}
-static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
+static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
{
- struct hci_cp_le_create_conn *cp;
- struct hci_conn *conn;
- unsigned long exp = msecs_to_jiffies(5000);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
-
- BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
- conn);
-
- if (status) {
- if (conn && conn->state == BT_CONNECT) {
- conn->state = BT_CLOSED;
- hci_proto_connect_cfm(conn, status);
- hci_conn_del(conn);
- }
- } else {
- if (!conn) {
- conn = hci_le_conn_add(hdev, &cp->peer_addr,
- cp->peer_addr_type);
- if (conn)
- conn->out = 1;
- else
- BT_ERR("No memory for new connection");
- } else
- exp = msecs_to_jiffies(conn->conn_timeout * 1000);
-
- if (conn && exp)
- mod_timer(&conn->disc_timer, jiffies + exp);
- }
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_logical_link(struct hci_dev *hdev, __u8 status)
-{
- struct hci_cp_create_logical_link *ap;
- struct hci_chan *chan;
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- ap = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_LOGICAL_LINK);
- if (!ap)
- return;
-
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_id(hdev, ap->phy_handle);
-
- BT_DBG("%s chan %p", hdev->name, chan);
-
- if (status) {
- if (chan && chan->state == BT_CONNECT) {
- chan->state = BT_CLOSED;
- hci_proto_create_cfm(chan, status);
- }
- } else if (chan) {
- chan->state = BT_CONNECT2;
- }
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_create_logical_link(struct hci_dev *hdev, __u8 status)
-{
- struct hci_cp_create_logical_link *cp;
- struct hci_chan *chan;
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_LOGICAL_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_id(hdev, cp->phy_handle);
-
- BT_DBG("%s chan %p", hdev->name, chan);
-
- if (status) {
- if (chan && chan->state == BT_CONNECT) {
- chan->state = BT_CLOSED;
- hci_proto_create_cfm(chan, status);
- }
- } else if (chan)
- chan->state = BT_CONNECT2;
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_flow_spec_modify(struct hci_dev *hdev, __u8 status)
-{
- struct hci_cp_flow_spec_modify *cp;
- struct hci_chan *chan;
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_FLOW_SPEC_MODIFY);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_handle(hdev, cp->log_handle);
- if (chan) {
- if (status)
- hci_proto_modify_cfm(chan, status);
- else {
- chan->tx_fs = cp->tx_fs;
- chan->rx_fs = cp->rx_fs;
- }
- }
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_disconn_logical_link(struct hci_dev *hdev, __u8 status)
-{
- struct hci_cp_disconn_logical_link *cp;
- struct hci_chan *chan;
-
- if (!status)
- return;
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONN_LOGICAL_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_handle(hdev, cp->log_handle);
- if (chan)
- hci_chan_del(chan);
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_disconn_physical_link(struct hci_dev *hdev, __u8 status)
-{
- struct hci_cp_disconn_phys_link *cp;
+ struct hci_cp_disconnect *cp;
struct hci_conn *conn;
if (!status)
return;
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONN_PHYS_LINK);
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
if (!cp)
return;
hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (conn) {
- conn->state = BT_CLOSED;
- hci_conn_del(conn);
- }
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
-{
- struct hci_cp_le_start_enc *cp;
- struct hci_conn *conn;
-
- BT_DBG("%s status 0x%x", hdev->name, status);
- if (!status) {
- return;
- }
-
- BT_DBG("%s Le start enc failed 0x%x", hdev->name, status);
- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
- if (!cp) {
- BT_DBG("CP is null");
- return;
- }
- hci_dev_lock(hdev);
-
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
- if (conn) {
- BT_DBG("conn exists");
- hci_conn_put(conn);
- }
+ if (conn)
+ mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, status);
+
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
- if (!hdev->disco_state)
- clear_bit(HCI_INQUIRY, &hdev->flags);
+ if (status) {
+ hci_dev_lock(hdev);
- hci_req_complete(hdev, HCI_OP_INQUIRY, status);
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (!conn) {
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
+
+ conn->state = BT_CLOSED;
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, status);
+ hci_proto_connect_cfm(conn, status);
+ hci_conn_del(conn);
+
+ hci_dev_unlock(hdev);
+ }
+}
+
+static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_create_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+ if (!cp)
+ return;
+
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_inquiry_complete_evt(hdev->id, status);
- hci_dev_unlock(hdev);
+ if (status) {
+ struct hci_conn *hcon;
- if (!lmp_le_capable(hdev))
- hci_conn_check_pending(hdev);
+ hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+ if (hcon)
+ hci_conn_del(hcon);
+ } else {
+ amp_write_remote_assoc(hdev, cp->phy_handle);
+ }
+
+ hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_accept_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+ if (!cp)
+ return;
+
+ amp_write_remote_assoc(hdev, cp->phy_handle);
+}
+
+static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ hci_conn_check_pending(hdev);
+
+ if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ return;
+
+ smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ wake_up_bit(&hdev->flags, HCI_INQUIRY);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (discov->state != DISCOVERY_FINDING)
+ goto unlock;
+
+ if (list_empty(&discov->resolve)) {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (e && hci_resolve_name(hdev, e) == 0) {
+ e->name_state = NAME_PENDING;
+ hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
+ } else {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1622,9 +1640,14 @@
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
+ bool name_known, ssp;
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -1633,15 +1656,17 @@
data.clock_offset = info->clock_offset;
data.rssi = 0x00;
data.ssp_mode = 0x00;
- hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
- info->dev_class, 0, 0, NULL);
+
+ name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, 0, !name_known, ssp, NULL,
+ 0);
}
hci_dev_unlock(hdev);
}
-static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1668,16 +1693,15 @@
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr, 0);
- } else if (conn->type == LE_LINK) {
- conn->state = BT_CONNECTED;
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr, 1);
+
+ if (!conn->out && !hci_conn_ssp_enabled(conn) &&
+ !hci_find_link_key(hdev, &ev->bdaddr))
+ conn->disc_timeout = HCI_PAIRING_TIMEOUT;
+ else
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
} else
conn->state = BT_CONNECTED;
- hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
@@ -1686,28 +1710,27 @@
if (test_bit(HCI_ENCRYPT, &hdev->flags))
conn->link_mode |= HCI_LM_ENCRYPT;
- /* Get remote version */
+ /* Get remote features */
if (conn->type == ACL_LINK) {
- struct hci_cp_read_remote_version cp;
+ struct hci_cp_read_remote_features cp;
cp.handle = ev->handle;
- hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
- sizeof(cp), &cp);
- hci_send_cmd(hdev, HCI_OP_READ_REMOTE_VERSION,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
+ sizeof(cp), &cp);
}
/* Set packet type for incoming connection */
- if (!conn->out && hdev->hci_ver < 3) {
+ if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
+ &cp);
}
} else {
conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK || conn->type == LE_LINK)
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ if (conn->type == ACL_LINK)
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, ev->status);
}
if (conn->type == ACL_LINK)
@@ -1734,18 +1757,20 @@
return false;
}
-static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
int mask = hdev->link_mode;
+ __u8 flags = 0;
- BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
- batostr(&ev->bdaddr), ev->link_type);
+ BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
+ ev->link_type);
- mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
+ mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
+ &flags);
if ((mask & HCI_LM_ACCEPT) &&
- !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1756,7 +1781,8 @@
if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
- conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ &ev->bdaddr);
if (!conn) {
/* pkt_type not yet used for incoming connections */
conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
@@ -1768,14 +1794,13 @@
}
memcpy(conn->dev_class, ev->dev_class, 3);
- /* For incoming connection update remote class to userspace */
- mgmt_remote_class(hdev->id, &ev->bdaddr, ev->dev_class);
- conn->state = BT_CONNECT;
hci_dev_unlock(hdev);
- if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
+ if (ev->link_type == ACL_LINK ||
+ (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
struct hci_cp_accept_conn_req cp;
+ conn->state = BT_CONNECT;
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -1785,46 +1810,59 @@
else
cp.role = 0x01; /* Remain slave */
- hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
- sizeof(cp), &cp);
- } else {
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
+ &cp);
+ } else if (!(flags & HCI_PROTO_DEFER)) {
struct hci_cp_accept_sync_conn_req cp;
+ conn->state = BT_CONNECT;
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0x000A);
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0x01;
+ cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
+ } else {
+ conn->state = BT_CONNECT2;
+ hci_proto_connect_cfm(conn, 0);
}
} else {
/* Connection rejected */
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x0f;
+ cp.reason = HCI_ERROR_REJ_BAD_ADDR;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
-static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_to_mgmt_reason(u8 err)
+{
+ switch (err) {
+ case HCI_ERROR_CONNECTION_TIMEOUT:
+ return MGMT_DEV_DISCONN_TIMEOUT;
+ case HCI_ERROR_REMOTE_USER_TERM:
+ case HCI_ERROR_REMOTE_LOW_RESOURCES:
+ case HCI_ERROR_REMOTE_POWER_OFF:
+ return MGMT_DEV_DISCONN_REMOTE;
+ case HCI_ERROR_LOCAL_HOST_TERM:
+ return MGMT_DEV_DISCONN_LOCAL_HOST;
+ default:
+ return MGMT_DEV_DISCONN_UNKNOWN;
+ }
+}
+
+static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_disconn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d reason %d", hdev->name, ev->status, ev->reason);
-
- if (ev->status) {
- hci_dev_lock(hdev);
- mgmt_disconnect_failed(hdev->id);
- hci_dev_unlock(hdev);
- return;
- }
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1832,113 +1870,100 @@
if (!conn)
goto unlock;
- conn->state = BT_CLOSED;
+ if (ev->status == 0)
+ conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK || conn->type == LE_LINK)
- mgmt_disconnected(hdev->id, &conn->dst, ev->reason);
+ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
+ (conn->type == ACL_LINK || conn->type == LE_LINK)) {
+ if (ev->status) {
+ mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
+ } else {
+ u8 reason = hci_to_mgmt_reason(ev->reason);
- if (conn->type == LE_LINK)
- del_timer(&conn->smp_timer);
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type,
+ conn->dst_type, reason);
+ }
+ }
- hci_proto_disconn_cfm(conn, ev->reason, 0);
- hci_conn_del(conn);
+ if (ev->status == 0) {
+ if (conn->type == ACL_LINK && conn->flush_key)
+ hci_remove_link_key(hdev, &conn->dst);
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+ }
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_auth_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- if (ev->status == 0x06 && hdev->ssp_mode > 0 &&
- conn->ssp_mode > 0) {
- struct hci_cp_auth_requested cp;
- hci_remove_link_key(hdev, &conn->dst);
- cp.handle = cpu_to_le16(conn->handle);
- hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
- sizeof(cp), &cp);
- hci_dev_unlock(hdev);
- BT_INFO("Pin or key missing");
- return;
- }
+ if (!conn)
+ goto unlock;
- if (!ev->status) {
+ if (!ev->status) {
+ if (!hci_conn_ssp_enabled(conn) &&
+ test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+ BT_INFO("re-auth of legacy device is not possible.");
+ } else {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
- } else {
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
- conn->sec_level = BT_SECURITY_LOW;
}
+ } else {
+ mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ ev->status);
+ }
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+ clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
- if (conn->state == BT_CONFIG) {
- if (!ev->status && hdev->ssp_mode > 0 &&
- conn->ssp_mode > 0) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- } else {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
- if (ev->status)
- conn->disc_timeout = HCI_DISCONN_AUTH_FAILED_TIMEOUT;
- hci_conn_put(conn);
- }
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hci_conn_ssp_enabled(conn)) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+ &cp);
} else {
- hci_auth_cfm(conn, ev->status);
-
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_put(conn);
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_drop(conn);
}
+ } else {
+ hci_auth_cfm(conn, ev->status);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- if (!ev->status) {
- if (conn->link_mode & HCI_LM_ENCRYPT) {
- /* Encryption implies authentication */
- conn->link_mode |= HCI_LM_AUTH;
- conn->link_mode |= HCI_LM_ENCRYPT;
- conn->sec_level =
- conn->pending_sec_level;
- clear_bit(HCI_CONN_ENCRYPT_PEND,
- &conn->pend);
- hci_encrypt_cfm(conn, ev->status, 1);
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(conn);
+ }
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_encrypt_change(hdev->id,
- &conn->dst,
- ev->status);
-
- } else {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev,
- HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- }
- } else {
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
- hci_encrypt_cfm(conn, ev->status, 0x00);
- }
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
+ if (!ev->status) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+ &cp);
+ } else {
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ hci_encrypt_cfm(conn, ev->status, 0x00);
}
}
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_name *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1949,25 +1974,40 @@
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_remote_name(hdev->id, &ev->bdaddr, ev->status, ev->name);
-
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn && hci_outgoing_auth_needed(hdev, conn)) {
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto check_auth;
+
+ if (ev->status == 0)
+ hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
+ strnlen(ev->name, HCI_MAX_NAME_LENGTH));
+ else
+ hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
+
+check_auth:
+ if (!conn)
+ goto unlock;
+
+ if (!hci_outgoing_auth_needed(hdev, conn))
+ goto unlock;
+
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_encrypt_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1983,47 +2023,35 @@
conn->link_mode &= ~HCI_LM_ENCRYPT;
}
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+ if (ev->status && conn->state == BT_CONNECTED) {
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
if (conn->state == BT_CONFIG) {
if (!ev->status)
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
- } else {
- /*
- * If the remote device does not support
- * Pause Encryption, usually during the
- * roleSwitch we see Encryption disable
- * for short duration. Allow remote device
- * to disable encryption
- * for short duration in this case.
- */
- if ((ev->encrypt == 0) && (ev->status == 0) &&
- ((conn->features[5] & LMP_PAUSE_ENC) == 0)) {
- mod_timer(&conn->encrypt_pause_timer,
- jiffies + msecs_to_jiffies(500));
- BT_INFO("enc pause timer, enc_pend_flag set");
- } else {
- del_timer(&conn->encrypt_pause_timer);
- hci_encrypt_cfm(conn, ev->status, ev->encrypt);
- }
- }
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_encrypt_change(hdev->id, &conn->dst, ev->status);
+ hci_conn_drop(conn);
+ } else
+ hci_encrypt_cfm(conn, ev->status, ev->encrypt);
}
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2032,7 +2060,7 @@
if (!ev->status)
conn->link_mode |= HCI_LM_SECURE;
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
hci_key_change_cfm(conn, ev->status);
}
@@ -2040,12 +2068,13 @@
hci_dev_unlock(hdev);
}
-static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_features *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2053,10 +2082,8 @@
if (!conn)
goto unlock;
- if (!ev->status) {
- memcpy(conn->features, ev->features, 8);
- mgmt_remote_features(hdev->id, &conn->dst, ev->features);
- }
+ if (!ev->status)
+ memcpy(conn->features[0], ev->features, 8);
if (conn->state != BT_CONFIG)
goto unlock;
@@ -2066,61 +2093,35 @@
cp.handle = ev->handle;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
goto unlock;
- } else if (!(lmp_ssp_capable(conn)) && conn->auth_initiator &&
- (conn->pending_sec_level == BT_SECURITY_VERY_HIGH)) {
- conn->pending_sec_level = BT_SECURITY_MEDIUM;
}
- if (!ev->status) {
+ if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- }
+ } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_ev_remote_version *ev = (void *) skb->data;
- struct hci_cp_read_remote_features cp;
- struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
-
- hci_dev_lock(hdev);
- cp.handle = ev->handle;
- hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
- sizeof(cp), &cp);
-
- conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (!conn)
- goto unlock;
- if (!ev->status)
- mgmt_remote_version(hdev->id, &conn->dst, ev->lmp_ver,
- ev->manufacturer, ev->lmp_subver);
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- BT_DBG("%s", hdev->name);
-}
-
-static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_complete *ev = (void *) skb->data;
+ u8 status = skb->data[sizeof(*ev)];
__u16 opcode;
skb_pull(skb, sizeof(*ev));
@@ -2135,12 +2136,12 @@
hci_cc_inquiry_cancel(hdev, skb);
break;
- case HCI_OP_EXIT_PERIODIC_INQ:
- hci_cc_exit_periodic_inq(hdev, skb);
+ case HCI_OP_PERIODIC_INQ:
+ hci_cc_periodic_inq(hdev, skb);
break;
- case HCI_OP_LINK_KEY_REPLY:
- hci_cc_link_key_reply(hdev, skb);
+ case HCI_OP_EXIT_PERIODIC_INQ:
+ hci_cc_exit_periodic_inq(hdev, skb);
break;
case HCI_OP_REMOTE_NAME_REQ_CANCEL:
@@ -2207,14 +2208,6 @@
hci_cc_write_voice_setting(hdev, skb);
break;
- case HCI_OP_HOST_BUFFER_SIZE:
- hci_cc_host_buffer_size(hdev, skb);
- break;
-
- case HCI_OP_READ_SSP_MODE:
- hci_cc_read_ssp_mode(hdev, skb);
- break;
-
case HCI_OP_WRITE_SSP_MODE:
hci_cc_write_ssp_mode(hdev, skb);
break;
@@ -2231,6 +2224,10 @@
hci_cc_read_local_features(hdev, skb);
break;
+ case HCI_OP_READ_LOCAL_EXT_FEATURES:
+ hci_cc_read_local_ext_features(hdev, skb);
+ break;
+
case HCI_OP_READ_BUFFER_SIZE:
hci_cc_read_buffer_size(hdev, skb);
break;
@@ -2239,47 +2236,42 @@
hci_cc_read_bd_addr(hdev, skb);
break;
- case HCI_OP_WRITE_CA_TIMEOUT:
- hci_cc_write_ca_timeout(hdev, skb);
+ case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
+ hci_cc_read_page_scan_activity(hdev, skb);
break;
- case HCI_OP_READ_FLOW_CONTROL_MODE:
- hci_cc_read_flow_control_mode(hdev, skb);
+ case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
+ hci_cc_write_page_scan_activity(hdev, skb);
+ break;
+
+ case HCI_OP_READ_PAGE_SCAN_TYPE:
+ hci_cc_read_page_scan_type(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_PAGE_SCAN_TYPE:
+ hci_cc_write_page_scan_type(hdev, skb);
break;
case HCI_OP_READ_DATA_BLOCK_SIZE:
hci_cc_read_data_block_size(hdev, skb);
break;
+ case HCI_OP_READ_FLOW_CONTROL_MODE:
+ hci_cc_read_flow_control_mode(hdev, skb);
+ break;
+
case HCI_OP_READ_LOCAL_AMP_INFO:
hci_cc_read_local_amp_info(hdev, skb);
break;
case HCI_OP_READ_LOCAL_AMP_ASSOC:
- case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
- hci_amp_cmd_complete(hdev, opcode, skb);
- break;
-
- case HCI_OP_DELETE_STORED_LINK_KEY:
- hci_cc_delete_stored_link_key(hdev, skb);
- break;
-
- case HCI_OP_SET_EVENT_MASK:
- hci_cc_set_event_mask(hdev, skb);
- break;
-
- case HCI_OP_WRITE_INQUIRY_MODE:
- hci_cc_write_inquiry_mode(hdev, skb);
+ hci_cc_read_local_amp_assoc(hdev, skb);
break;
case HCI_OP_READ_INQ_RSP_TX_POWER:
hci_cc_read_inq_rsp_tx_power(hdev, skb);
break;
- case HCI_OP_SET_EVENT_FLT:
- hci_cc_set_event_flt(hdev, skb);
- break;
-
case HCI_OP_PIN_CODE_REPLY:
hci_cc_pin_code_reply(hdev, skb);
break;
@@ -2296,16 +2288,12 @@
hci_cc_le_read_buffer_size(hdev, skb);
break;
- case HCI_OP_LE_READ_WHITE_LIST_SIZE:
- hci_cc_le_read_white_list_size(hdev, skb);
+ case HCI_OP_LE_READ_LOCAL_FEATURES:
+ hci_cc_le_read_local_features(hdev, skb);
break;
- case HCI_OP_LE_CLEAR_WHITE_LIST:
- hci_cc_le_clear_white_list(hdev, skb);
- break;
-
- case HCI_OP_READ_RSSI:
- hci_cc_read_rssi(hdev, skb);
+ case HCI_OP_LE_READ_ADV_TX_POWER:
+ hci_cc_le_read_adv_tx_power(hdev, skb);
break;
case HCI_OP_USER_CONFIRM_REPLY:
@@ -2316,34 +2304,60 @@
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
- case HCI_OP_LE_LTK_REPLY:
- hci_cc_le_ltk_reply(hdev, skb);
+ case HCI_OP_USER_PASSKEY_REPLY:
+ hci_cc_user_passkey_reply(hdev, skb);
break;
- case HCI_OP_LE_LTK_NEG_REPLY:
- hci_cc_le_ltk_neg_reply(hdev, skb);
+ case HCI_OP_USER_PASSKEY_NEG_REPLY:
+ hci_cc_user_passkey_neg_reply(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_SCAN_PARAM:
+ hci_cc_le_set_scan_param(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_ADV_ENABLE:
+ hci_cc_le_set_adv_enable(hdev, skb);
break;
case HCI_OP_LE_SET_SCAN_ENABLE:
hci_cc_le_set_scan_enable(hdev, skb);
break;
+ case HCI_OP_LE_READ_WHITE_LIST_SIZE:
+ hci_cc_le_read_white_list_size(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_SUPPORTED_STATES:
+ hci_cc_le_read_supported_states(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_LE_HOST_SUPPORTED:
+ hci_cc_write_le_host_supported(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
+ hci_cc_write_remote_amp_assoc(hdev, skb);
+ break;
+
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
- if (ev->opcode != HCI_OP_NOP)
+ if (opcode != HCI_OP_NOP)
del_timer(&hdev->cmd_timer);
- if (ev->ncmd) {
+ hci_req_cmd_complete(hdev, opcode, status);
+
+ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
-static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_status *ev = (void *) skb->data;
__u16 opcode;
@@ -2397,64 +2411,47 @@
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
- case HCI_OP_CREATE_LOGICAL_LINK:
- hci_cs_create_logical_link(hdev, ev->status);
- break;
-
- case HCI_OP_ACCEPT_LOGICAL_LINK:
- hci_cs_accept_logical_link(hdev, ev->status);
- break;
-
- case HCI_OP_DISCONN_LOGICAL_LINK:
- hci_cs_disconn_logical_link(hdev, ev->status);
- break;
-
- case HCI_OP_FLOW_SPEC_MODIFY:
- hci_cs_flow_spec_modify(hdev, ev->status);
- break;
-
- case HCI_OP_CREATE_PHYS_LINK:
- case HCI_OP_ACCEPT_PHYS_LINK:
- hci_amp_cmd_status(hdev, opcode, ev->status);
- break;
-
- case HCI_OP_DISCONN_PHYS_LINK:
- hci_cs_disconn_physical_link(hdev, ev->status);
-
case HCI_OP_DISCONNECT:
- if (ev->status != 0)
- mgmt_disconnect_failed(hdev->id);
+ hci_cs_disconnect(hdev, ev->status);
break;
case HCI_OP_LE_CREATE_CONN:
hci_cs_le_create_conn(hdev, ev->status);
break;
- case HCI_OP_LE_START_ENC:
- hci_cs_le_start_enc(hdev, ev->status);
+ case HCI_OP_CREATE_PHY_LINK:
+ hci_cs_create_phylink(hdev, ev->status);
+ break;
+
+ case HCI_OP_ACCEPT_PHY_LINK:
+ hci_cs_accept_phylink(hdev, ev->status);
break;
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
- if (ev->opcode != HCI_OP_NOP)
+ if (opcode != HCI_OP_NOP)
del_timer(&hdev->cmd_timer);
+ if (ev->status ||
+ (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
+ hci_req_cmd_complete(hdev, opcode, ev->status);
+
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
-static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_role_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2467,7 +2464,7 @@
conn->link_mode |= HCI_LM_MASTER;
}
- clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
hci_role_switch_cfm(conn, ev->status, ev->role);
}
@@ -2475,136 +2472,149 @@
hci_dev_unlock(hdev);
}
-static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
- __le16 *ptr;
int i;
- skb_pull(skb, sizeof(*ev));
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+ BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+ return;
+ }
+
+ if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
+ ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
+ BT_DBG("%s bad parameters", hdev->name);
+ return;
+ }
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
- if (skb->len < ev->num_hndl * 4) {
- BT_DBG("%s bad parameters", hdev->name);
- return;
- }
-
- tasklet_disable(&hdev->tx_task);
-
- for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
- struct hci_conn *conn = NULL;
- struct hci_chan *chan;
+ for (i = 0; i < ev->num_hndl; i++) {
+ struct hci_comp_pkts_info *info = &ev->handles[i];
+ struct hci_conn *conn;
__u16 handle, count;
- handle = get_unaligned_le16(ptr++);
- count = get_unaligned_le16(ptr++);
+ handle = __le16_to_cpu(info->handle);
+ count = __le16_to_cpu(info->count);
- if (hdev->dev_type == HCI_BREDR)
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- else {
- chan = hci_chan_list_lookup_handle(hdev, handle);
- if (chan)
- conn = chan->conn;
- }
- if (conn) {
- conn->sent -= count;
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn)
+ continue;
- if (conn->type == ACL_LINK) {
+ conn->sent -= count;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ break;
+
+ case LE_LINK:
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
- } else if (conn->type == LE_LINK) {
- if (hdev->le_pkts) {
- hdev->le_cnt += count;
- if (hdev->le_cnt > hdev->le_pkts)
- hdev->le_cnt = hdev->le_pkts;
- } else {
- hdev->acl_cnt += count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- }
- } else {
- hdev->sco_cnt += count;
- if (hdev->sco_cnt > hdev->sco_pkts)
- hdev->sco_cnt = hdev->sco_pkts;
}
+ break;
+
+ case SCO_LINK:
+ hdev->sco_cnt += count;
+ if (hdev->sco_cnt > hdev->sco_pkts)
+ hdev->sco_cnt = hdev->sco_pkts;
+ break;
+
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
}
}
- tasklet_schedule(&hdev->tx_task);
-
- tasklet_enable(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
-static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
+ __u16 handle)
+{
+ struct hci_chan *chan;
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ return hci_conn_hash_lookup_handle(hdev, handle);
+ case HCI_AMP:
+ chan = hci_chan_lookup_handle(hdev, handle);
+ if (chan)
+ return chan->conn;
+ break;
+ default:
+ BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+ break;
+ }
+
+ return NULL;
+}
+
+static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
- __le16 *ptr;
int i;
- skb_pull(skb, sizeof(*ev));
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
+ BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+ return;
+ }
- BT_DBG("%s total_num_blocks %d num_hndl %d",
- hdev->name, ev->total_num_blocks, ev->num_hndl);
-
- if (skb->len < ev->num_hndl * 6) {
+ if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
+ ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
- tasklet_disable(&hdev->tx_task);
+ BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
+ ev->num_hndl);
- for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+ for (i = 0; i < ev->num_hndl; i++) {
+ struct hci_comp_blocks_info *info = &ev->handles[i];
struct hci_conn *conn = NULL;
- struct hci_chan *chan;
__u16 handle, block_count;
- handle = get_unaligned_le16(ptr++);
+ handle = __le16_to_cpu(info->handle);
+ block_count = __le16_to_cpu(info->blocks);
- /* Skip packet count */
- ptr++;
- block_count = get_unaligned_le16(ptr++);
+ conn = __hci_conn_lookup_handle(hdev, handle);
+ if (!conn)
+ continue;
- BT_DBG("%s handle %d count %d", hdev->name, handle,
- block_count);
+ conn->sent -= block_count;
- if (hdev->dev_type == HCI_BREDR)
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- else {
- chan = hci_chan_list_lookup_handle(hdev, handle);
- if (chan)
- conn = chan->conn;
- }
- if (conn) {
- BT_DBG("%s conn %p sent %d", hdev->name,
- conn, conn->sent);
+ switch (conn->type) {
+ case ACL_LINK:
+ case AMP_LINK:
+ hdev->block_cnt += block_count;
+ if (hdev->block_cnt > hdev->num_blocks)
+ hdev->block_cnt = hdev->num_blocks;
+ break;
- conn->sent -= block_count;
-
- if (conn->type == ACL_LINK) {
- hdev->acl_cnt += block_count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- } else {
- /* We should not find ourselves here */
- BT_DBG("Unexpected event for SCO connection");
- }
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
}
}
- tasklet_schedule(&hdev->tx_task);
-
- tasklet_enable(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
-static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2613,24 +2623,22 @@
conn->mode = ev->mode;
conn->interval = __le16_to_cpu(ev->interval);
- if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
+ &conn->flags)) {
if (conn->mode == HCI_CM_ACTIVE)
- conn->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
else
- conn->power_save = 0;
+ clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
}
- if (conn->mode == HCI_CM_SNIFF)
- if (wake_lock_active(&conn->idle_lock))
- wake_unlock(&conn->idle_lock);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, ev->status);
}
hci_dev_unlock(hdev);
}
-static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pin_code_req *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2640,24 +2648,34 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn && conn->state == BT_CONNECTED) {
+ if (!conn)
+ goto unlock;
+
+ if (conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
- hci_conn_put(conn);
- hci_conn_enter_active_mode(conn, 0);
+ hci_conn_drop(conn);
}
- if (!test_bit(HCI_PAIRABLE, &hdev->flags))
+ if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
+ else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
+ u8 secure;
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_request(hdev->id, &ev->bdaddr);
+ if (conn->pending_sec_level == BT_SECURITY_HIGH)
+ secure = 1;
+ else
+ secure = 0;
+ mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_req *ev = (void *) skb->data;
struct hci_cp_link_key_reply cp;
@@ -2666,47 +2684,48 @@
BT_DBG("%s", hdev->name);
- if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
+ if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
return;
hci_dev_lock(hdev);
key = hci_find_link_key(hdev, &ev->bdaddr);
if (!key) {
- BT_DBG("%s link key not found for %s", hdev->name,
- batostr(&ev->bdaddr));
+ BT_DBG("%s link key not found for %pMR", hdev->name,
+ &ev->bdaddr);
goto not_found;
}
- BT_DBG("%s found key type %u for %s", hdev->name, key->key_type,
- batostr(&ev->bdaddr));
+ BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
+ &ev->bdaddr);
- if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->key_type == 0x03) {
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
+ key->type == HCI_LK_DEBUG_COMBINATION) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-
if (conn) {
- BT_DBG("Conn pending sec level is %d, ssp is %d, key len is %d",
- conn->pending_sec_level, conn->ssp_mode, key->pin_len);
- }
- if (conn && (conn->ssp_mode == 0) &&
- (conn->pending_sec_level == BT_SECURITY_VERY_HIGH) &&
- (key->pin_len != 16)) {
- BT_DBG("Security is high ignoring this key");
- goto not_found;
- }
+ if (key->type == HCI_LK_UNAUTH_COMBINATION &&
+ conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
+ BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ goto not_found;
+ }
- if (key->key_type == 0x04 && conn && conn->auth_type != 0xff &&
- (conn->auth_type & 0x01)) {
- BT_DBG("%s ignoring unauthenticated key", hdev->name);
- goto not_found;
+ if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
+ conn->pending_sec_level == BT_SECURITY_HIGH) {
+ BT_DBG("%s ignoring key unauthenticated for high security",
+ hdev->name);
+ goto not_found;
+ }
+
+ conn->key_type = key->type;
+ conn->pin_length = key->pin_len;
}
bacpy(&cp.bdaddr, &ev->bdaddr);
- memcpy(cp.link_key, key->val, 16);
+ memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
@@ -2719,13 +2738,13 @@
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
u8 pin_len = 0;
- BT_DBG("%s type %d", hdev->name, ev->key_type);
+ BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
@@ -2733,30 +2752,27 @@
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
-
- memcpy(conn->link_key, ev->link_key, 16);
- conn->key_type = ev->key_type;
- hci_disconnect_amp(conn, 0x06);
-
- conn->link_mode &= ~HCI_LM_ENCRYPT;
pin_len = conn->pin_length;
- hci_conn_put(conn);
- hci_conn_enter_active_mode(conn, 0);
+
+ if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
+ conn->key_type = ev->key_type;
+
+ hci_conn_drop(conn);
}
- if (test_bit(HCI_LINK_KEYS, &hdev->flags))
- hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
- ev->key_type, pin_len);
+ if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
+ hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
+ ev->key_type, pin_len);
hci_dev_unlock(hdev);
}
-static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_clock_offset *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2774,12 +2790,12 @@
hci_dev_unlock(hdev);
}
-static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pkt_type_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2790,7 +2806,7 @@
hci_dev_unlock(hdev);
}
-static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
struct inquiry_entry *ie;
@@ -2808,16 +2824,21 @@
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
+ bool name_known, ssp;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2833,10 +2854,12 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
- info->dev_class, info->rssi,
- 0, NULL);
+
+ name_known = hci_inquiry_cache_update(hdev, &data,
+ false, &ssp);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi,
+ !name_known, ssp, NULL, 0);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -2850,17 +2873,19 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
- info->dev_class, info->rssi,
- 0, NULL);
+ name_known = hci_inquiry_cache_update(hdev, &data,
+ false, &ssp);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi,
+ !name_known, ssp, NULL, 0);
}
}
hci_dev_unlock(hdev);
}
-static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_ext_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_ext_features *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2873,54 +2898,62 @@
if (!conn)
goto unlock;
+ if (ev->page < HCI_MAX_PAGES)
+ memcpy(conn->features[ev->page], ev->features, 8);
+
if (!ev->status && ev->page == 0x01) {
struct inquiry_entry *ie;
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
- conn->ssp_mode = (ev->features[0] & 0x01);
- /*In case if remote device ssp supported/2.0 device
- reduce the security level to MEDIUM if it is VERY HIGH*/
- if (!conn->ssp_mode && conn->auth_initiator &&
- (conn->pending_sec_level == BT_SECURITY_VERY_HIGH))
- conn->pending_sec_level = BT_SECURITY_MEDIUM;
-
- if (conn->ssp_mode && conn->auth_initiator &&
- conn->io_capability != 0x03) {
- conn->pending_sec_level = BT_SECURITY_VERY_HIGH;
- conn->auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+ if (ev->features[0] & LMP_HOST_SSP) {
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+ } else {
+ /* It is mandatory by the Bluetooth specification that
+ * Extended Inquiry Results are only used when Secure
+ * Simple Pairing is enabled, but some devices violate
+ * this.
+ *
+ * To make these devices work, the internal SSP
+ * enabled flag needs to be cleared if the remote host
+ * features do not indicate SSP support */
+ clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
}
if (conn->state != BT_CONFIG)
goto unlock;
- if (!ev->status) {
+ if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- }
+ } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2941,22 +2974,19 @@
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
- hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
break;
+ case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
- if (!conn->hdev->is_wbs) {
- conn->pkt_type =
- (hdev->esco_type & SCO_ESCO_MASK) |
+ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
- hci_setup_sync(conn, conn->link->handle);
- goto unlock;
- }
+ hci_setup_sync(conn, conn->link->handle);
+ goto unlock;
}
/* fall through */
@@ -2973,40 +3003,27 @@
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- BT_DBG("%s", hdev->name);
-}
-
-static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_ev_sniff_subrate *ev = (void *) skb->data;
- struct hci_conn *conn =
- hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-
- BT_DBG("%s status %d", hdev->name, ev->status);
- if (conn && (ev->max_rx_latency > hdev->sniff_max_interval)) {
- BT_ERR("value of rx_latency:%d", ev->max_rx_latency);
- hci_dev_lock(hdev);
- hci_conn_enter_active_mode(conn, 1);
- hci_dev_unlock(hdev);
- }
-}
-
-static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
+ size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
+ bool name_known, ssp;
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -3015,39 +3032,89 @@
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x01;
- hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, 0, 0,
- info->dev_class, info->rssi,
- HCI_MAX_EIR_LENGTH, info->data);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ name_known = eir_has_data_type(info->data,
+ sizeof(info->data),
+ EIR_NAME_COMPLETE);
+ else
+ name_known = true;
+
+ name_known = hci_inquiry_cache_update(hdev, &data, name_known,
+ &ssp);
+ eir_len = eir_get_length(info->data, sizeof(info->data));
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi, !name_known,
+ ssp, info->data, eir_len);
}
hci_dev_unlock(hdev);
}
-static inline u8 hci_get_auth_req(struct hci_conn *conn)
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- BT_DBG("%p", conn);
+ struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+ BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
+ __le16_to_cpu(ev->handle));
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+ if (!conn)
+ goto unlock;
+
+ if (!ev->status)
+ conn->sec_level = conn->pending_sec_level;
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+ if (ev->status && conn->state == BT_CONNECTED) {
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status)
+ conn->state = BT_CONNECTED;
+
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_drop(conn);
+ } else {
+ hci_auth_cfm(conn, ev->status);
+
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(conn);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static u8 hci_get_auth_req(struct hci_conn *conn)
+{
/* If remote requests dedicated bonding follow that lead */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
/* If both remote and local IO capabilities allow MITM
* protection then require it, otherwise don't */
- if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) {
+ if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
return 0x02;
- } else {
- conn->auth_type |= 0x01;
+ else
return 0x03;
- }
}
/* If remote requests no-bonding follow that lead */
- if (conn->remote_auth <= 0x01)
- return 0x00;
+ if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+ return conn->remote_auth | (conn->auth_type & 0x01);
return conn->auth_type;
}
-static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3062,45 +3129,44 @@
hci_conn_hold(conn);
- if (!test_bit(HCI_MGMT, &hdev->flags))
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
- if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
- (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+ if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
+ (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
- u8 io_cap = conn->io_capability;
- /* ACL-SSP does not support IO CAP 0x04 */
- cp.capability = (io_cap == 0x04) ? 0x01 : io_cap;
bacpy(&cp.bdaddr, &ev->bdaddr);
- if (conn->auth_initiator)
- cp.authentication = conn->auth_type;
- else
- cp.authentication = hci_get_auth_req(conn);
+ /* Change the IO capability from KeyboardDisplay
+ * to DisplayYesNo as it is not supported by BT spec. */
+ cp.capability = (conn->io_capability == 0x04) ?
+ 0x01 : conn->io_capability;
+ conn->auth_type = hci_get_auth_req(conn);
+ cp.authentication = conn->auth_type;
- if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
- hci_find_remote_oob_data(hdev, &conn->dst))
+ if (hci_find_remote_oob_data(hdev, &conn->dst) &&
+ (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
cp.oob_data = 0x01;
else
cp.oob_data = 0x00;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
} else {
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x16; /* Pairing not allowed */
+ cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_reply *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3114,35 +3180,154 @@
goto unlock;
conn->remote_cap = ev->capability;
- conn->remote_oob = ev->oob_data;
conn->remote_auth = ev->authentication;
+ if (ev->oob_data)
+ set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_user_ssp_confirmation_evt(struct hci_dev *hdev,
- u8 event, struct sk_buff *skb)
+static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
+ int loc_mitm, rem_mitm, confirm_hint = 0;
+ struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->flags)) {
- if (event == HCI_EV_USER_PASSKEY_REQUEST)
- mgmt_user_confirm_request(hdev->id, event,
- &ev->bdaddr, 0);
- else
- mgmt_user_confirm_request(hdev->id, event,
- &ev->bdaddr, ev->passkey);
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ goto unlock;
+
+ loc_mitm = (conn->auth_type & 0x01);
+ rem_mitm = (conn->remote_auth & 0x01);
+
+ /* If we require MITM but the remote device can't provide that
+ * (it has NoInputNoOutput) then reject the confirmation
+ * request. The only exception is when we're dedicated bonding
+ * initiators (connect_cfm_cb set) since then we always have the MITM
+ * bit set. */
+ if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
+ BT_DBG("Rejecting request: remote device can't provide MITM");
+ hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+ goto unlock;
}
+ /* If no side requires MITM protection; auto-accept */
+ if ((!loc_mitm || conn->remote_cap == 0x03) &&
+ (!rem_mitm || conn->io_capability == 0x03)) {
+
+ /* If we're not the initiators request authorization to
+ * proceed from user space (mgmt_user_confirm with
+ * confirm_hint set to 1). */
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ BT_DBG("Confirming auto-accept as acceptor");
+ confirm_hint = 1;
+ goto confirm;
+ }
+
+ BT_DBG("Auto-accept of user confirmation with %ums delay",
+ hdev->auto_accept_delay);
+
+ if (hdev->auto_accept_delay > 0) {
+ int delay = msecs_to_jiffies(hdev->auto_accept_delay);
+ mod_timer(&conn->auto_accept_timer, jiffies + delay);
+ goto unlock;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+ goto unlock;
+ }
+
+confirm:
+ mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
+ confirm_hint);
+
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_user_passkey_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
+}
+
+static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ return;
+
+ conn->passkey_notify = __le32_to_cpu(ev->passkey);
+ conn->passkey_entered = 0;
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+ conn->dst_type, conn->passkey_notify,
+ conn->passkey_entered);
+}
+
+static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_keypress_notify *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ return;
+
+ switch (ev->type) {
+ case HCI_KEYPRESS_STARTED:
+ conn->passkey_entered = 0;
+ return;
+
+ case HCI_KEYPRESS_ENTERED:
+ conn->passkey_entered++;
+ break;
+
+ case HCI_KEYPRESS_ERASED:
+ conn->passkey_entered--;
+ break;
+
+ case HCI_KEYPRESS_CLEARED:
+ conn->passkey_entered = 0;
+ break;
+
+ case HCI_KEYPRESS_COMPLETED:
+ return;
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+ conn->dst_type, conn->passkey_notify,
+ conn->passkey_entered);
+}
+
+static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3160,33 +3345,40 @@
* initiated the authentication. A traditional auth_complete
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
+ mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ ev->status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_host_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_host_features *ev = (void *) skb->data;
struct inquiry_entry *ie;
+ struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (conn)
+ memcpy(conn->features[1], ev->features, 8);
+
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
hci_dev_unlock(hdev);
}
-static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
struct oob_data *data;
@@ -3195,7 +3387,7 @@
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->flags))
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
@@ -3207,119 +3399,218 @@
memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
- &cp);
+ &cp);
} else {
struct hci_cp_remote_oob_data_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
- &cp);
+ &cp);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_phy_link_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_ev_le_conn_complete *ev = (void *) skb->data;
- struct hci_conn *conn;
- u8 white_list;
+ struct hci_ev_phy_link_complete *ev = (void *) skb->data;
+ struct hci_conn *hcon, *bredr_hcon;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
+ ev->status);
hci_dev_lock(hdev);
- /* Ignore event for LE cancel create conn whitelist */
- if (ev->status && !bacmp(&ev->bdaddr, BDADDR_ANY))
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ if (ev->status) {
+ hci_conn_del(hcon);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
+
+ hcon->state = BT_CONNECTED;
+ bacpy(&hcon->dst, &bredr_hcon->dst);
+
+ hci_conn_hold(hcon);
+ hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(hcon);
+
+ hci_conn_add_sysfs(hcon);
+
+ amp_physical_cfm(bredr_hcon, hcon);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_logical_link_complete *ev = (void *) skb->data;
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
+ struct amp_mgr *mgr;
+
+ BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
+ hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
+ ev->status);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon)
+ return;
+
+ /* Create AMP hchan */
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
+ return;
+
+ hchan->handle = le16_to_cpu(ev->handle);
+
+ BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+
+ mgr = hcon->amp_mgr;
+ if (mgr && mgr->bredr_chan) {
+ struct l2cap_chan *bredr_chan = mgr->bredr_chan;
+
+ l2cap_chan_lock(bredr_chan);
+
+ bredr_chan->conn->mtu = hdev->block_mtu;
+ l2cap_logical_cfm(bredr_chan, hchan, 0);
+ hci_conn_hold(hcon);
+
+ l2cap_chan_unlock(bredr_chan);
+ }
+}
+
+static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
+ struct hci_chan *hchan;
+
+ BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
+ le16_to_cpu(ev->handle), ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+ if (!hchan)
goto unlock;
- if (hci_conn_hash_lookup_ba(hdev, LE_LINK, BDADDR_ANY))
- white_list = 1;
- else
- white_list = 0;
+ amp_destroy_logical_link(hchan, ev->reason);
- BT_DBG("w_list %d", white_list);
+unlock:
+ hci_dev_unlock(hdev);
+}
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
+static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
+ struct hci_conn *hcon;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (hcon) {
+ hcon->state = BT_CLOSED;
+ hci_conn_del(hcon);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (!conn) {
- conn = hci_le_conn_add(hdev, &ev->bdaddr, ev->bdaddr_type);
+ conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
- hci_dev_unlock(hdev);
- return;
+ goto unlock;
+ }
+
+ conn->dst_type = ev->bdaddr_type;
+
+ if (ev->role == LE_CONN_ROLE_MASTER) {
+ conn->out = true;
+ conn->link_mode |= HCI_LM_MASTER;
}
}
if (ev->status) {
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
+ if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, 0, NULL, 0, NULL);
+
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr, 1);
- mgmt_le_conn_params(hdev->id, &ev->bdaddr,
- __le16_to_cpu(ev->interval),
- __le16_to_cpu(ev->latency),
- __le16_to_cpu(ev->supervision_timeout));
- hci_conn_hold(conn);
- hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
- if (!white_list)
- hci_proto_connect_cfm(conn, ev->status);
+ hci_proto_connect_cfm(conn, ev->status);
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
- struct hci_conn *conn;
+ u8 num_reports = skb->data[0];
+ void *ptr = &skb->data[1];
+ s8 rssi;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ while (num_reports--) {
+ struct hci_ev_le_advertising_info *ev = ptr;
- hci_dev_lock(hdev);
+ rssi = ev->data[ev->length];
+ mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
+ NULL, rssi, 0, 1, ev->data, ev->length);
- conn = hci_conn_hash_lookup_handle(hdev,
- __le16_to_cpu(ev->handle));
- if (conn == NULL) {
- BT_ERR("Unknown connection update");
- goto unlock;
+ ptr += sizeof(*ev) + ev->length + 1;
}
-
- if (ev->status) {
- BT_ERR("Connection update unsuccessful");
- goto unlock;
- }
-
- mgmt_le_conn_params(hdev->id, &conn->dst,
- __le16_to_cpu(ev->interval),
- __le16_to_cpu(ev->latency),
- __le16_to_cpu(ev->supervision_timeout));
-
-unlock:
- hci_dev_unlock(hdev);
}
-static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_ltk_req *ev = (void *) skb->data;
struct hci_cp_le_ltk_reply cp;
struct hci_cp_le_ltk_neg_reply neg;
struct hci_conn *conn;
- struct link_key *ltk;
+ struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
+ BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -3333,10 +3624,27 @@
memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
cp.handle = cpu_to_le16(conn->handle);
- conn->pin_length = ltk->pin_len;
+
+ if (ltk->authenticated)
+ conn->pending_sec_level = BT_SECURITY_HIGH;
+ else
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
+ conn->enc_key_size = ltk->enc_size;
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+ /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
+ * temporary key used to encrypt a connection following
+ * pairing. It is used during the Encrypted Session Setup to
+ * distribute the keys. Later, security can be re-established
+ * using a distributed LTK.
+ */
+ if (ltk->type == HCI_SMP_STK_SLAVE) {
+ list_del(<k->list);
+ kfree(ltk);
+ }
+
hci_dev_unlock(hdev);
return;
@@ -3347,28 +3655,7 @@
hci_dev_unlock(hdev);
}
-static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_le_advertising_info *ev;
- u8 num_reports;
-
- num_reports = skb->data[0];
- ev = (void *) &skb->data[1];
-
- hci_dev_lock(hdev);
-
- while (num_reports--) {
- mgmt_device_found(hdev->id, &ev->bdaddr, ev->bdaddr_type,
- 1, NULL, 0, ev->length, ev->data);
- hci_add_adv_entry(hdev, ev);
- ev = (void *) (ev->data + ev->length + 1);
- }
-
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -3379,141 +3666,33 @@
hci_le_conn_complete_evt(hdev, skb);
break;
- case HCI_EV_LE_CONN_UPDATE_COMPLETE:
- hci_le_conn_update_complete_evt(hdev, skb);
+ case HCI_EV_LE_ADVERTISING_REPORT:
+ hci_le_adv_report_evt(hdev, skb);
break;
case HCI_EV_LE_LTK_REQ:
hci_le_ltk_request_evt(hdev, skb);
break;
- case HCI_EV_LE_ADVERTISING_REPORT:
- hci_le_adv_report_evt(hdev, skb);
- break;
-
default:
break;
}
}
-static inline void hci_phy_link_complete(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_ev_phys_link_complete *ev = (void *) skb->data;
- struct hci_conn *conn;
+ struct hci_ev_channel_selected *ev = (void *) skb->data;
+ struct hci_conn *hcon;
- BT_DBG("%s handle %d status %d", hdev->name, ev->phy_handle,
- ev->status);
+ BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
- hci_dev_lock(hdev);
+ skb_pull(skb, sizeof(*ev));
- if (ev->status == 0) {
- conn = hci_conn_add(hdev, ACL_LINK, 0, BDADDR_ANY);
- if (conn) {
- conn->handle = ev->phy_handle;
- conn->state = BT_CONNECTED;
-
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT/2;
- hci_conn_put(conn);
-
- hci_conn_hold_device(conn);
- hci_conn_add_sysfs(conn);
- } else
- BT_ERR("No memory for new connection");
- }
-
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_log_link_complete(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_log_link_complete *ev = (void *) skb->data;
- struct hci_chan *chan;
-
- BT_DBG("%s handle %d status %d", hdev->name,
- __le16_to_cpu(ev->log_handle), ev->status);
-
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_id(hdev, ev->phy_handle);
-
- if (chan) {
- if (ev->status == 0) {
- chan->ll_handle = __le16_to_cpu(ev->log_handle);
- chan->state = BT_CONNECTED;
- } else {
- chan->state = BT_CLOSED;
- }
-
- hci_proto_create_cfm(chan, ev->status);
- }
-
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_flow_spec_modify_complete(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_flow_spec_modify_complete *ev = (void *) skb->data;
- struct hci_chan *chan;
-
- BT_DBG("%s handle %d status %d", hdev->name,
- __le16_to_cpu(ev->log_handle), ev->status);
-
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_handle(hdev, ev->log_handle);
- if (chan)
- hci_proto_modify_cfm(chan, ev->status);
-
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_disconn_log_link_complete_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_log_link_complete *ev = (void *) skb->data;
- struct hci_chan *chan;
-
- BT_DBG("%s handle %d status %d", hdev->name,
- __le16_to_cpu(ev->log_handle), ev->status);
-
- if (ev->status)
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon)
return;
- hci_dev_lock(hdev);
-
- chan = hci_chan_list_lookup_handle(hdev, __le16_to_cpu(ev->log_handle));
- if (chan)
- hci_proto_destroy_cfm(chan, ev->reason);
-
- hci_dev_unlock(hdev);
-}
-
-static inline void hci_disconn_phy_link_complete_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_phys_link_complete *ev = (void *) skb->data;
- struct hci_conn *conn;
-
- BT_DBG("%s status %d", hdev->name, ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (conn) {
- conn->state = BT_CLOSED;
-
- hci_proto_disconn_cfm(conn, ev->reason, 0);
- hci_conn_del(conn);
- }
-
- hci_dev_unlock(hdev);
+ amp_read_loc_assoc_final_data(hdev, hcon);
}
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3521,10 +3700,27 @@
struct hci_event_hdr *hdr = (void *) skb->data;
__u8 event = hdr->evt;
- BT_DBG("");
+ hci_dev_lock(hdev);
+
+ /* Received events are (currently) only needed when a request is
+ * ongoing so avoid unnecessary memory allocation.
+ */
+ if (hdev->req_status == HCI_REQ_PEND) {
+ kfree_skb(hdev->recv_evt);
+ hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
+ }
+
+ hci_dev_unlock(hdev);
skb_pull(skb, HCI_EVENT_HDR_SIZE);
+ if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
+ struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(hdr->opcode);
+
+ hci_req_cmd_complete(hdev, opcode, 0);
+ }
+
switch (event) {
case HCI_EV_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
@@ -3566,14 +3762,6 @@
hci_remote_features_evt(hdev, skb);
break;
- case HCI_EV_REMOTE_VERSION:
- hci_remote_version_evt(hdev, skb);
- break;
-
- case HCI_EV_QOS_SETUP_COMPLETE:
- hci_qos_setup_complete_evt(hdev, skb);
- break;
-
case HCI_EV_CMD_COMPLETE:
hci_cmd_complete_evt(hdev, skb);
break;
@@ -3630,18 +3818,14 @@
hci_sync_conn_complete_evt(hdev, skb);
break;
- case HCI_EV_SYNC_CONN_CHANGED:
- hci_sync_conn_changed_evt(hdev, skb);
- break;
-
- case HCI_EV_SNIFF_SUBRATE:
- hci_sniff_subrate_evt(hdev, skb);
- break;
-
case HCI_EV_EXTENDED_INQUIRY_RESULT:
hci_extended_inquiry_result_evt(hdev, skb);
break;
+ case HCI_EV_KEY_REFRESH_COMPLETE:
+ hci_key_refresh_complete_evt(hdev, skb);
+ break;
+
case HCI_EV_IO_CAPA_REQUEST:
hci_io_capa_request_evt(hdev, skb);
break;
@@ -3650,10 +3834,20 @@
hci_io_capa_reply_evt(hdev, skb);
break;
- case HCI_EV_USER_PASSKEY_REQUEST:
- case HCI_EV_USER_PASSKEY_NOTIFICATION:
case HCI_EV_USER_CONFIRM_REQUEST:
- hci_user_ssp_confirmation_evt(hdev, event, skb);
+ hci_user_confirm_request_evt(hdev, skb);
+ break;
+
+ case HCI_EV_USER_PASSKEY_REQUEST:
+ hci_user_passkey_request_evt(hdev, skb);
+ break;
+
+ case HCI_EV_USER_PASSKEY_NOTIFY:
+ hci_user_passkey_notify_evt(hdev, skb);
+ break;
+
+ case HCI_EV_KEYPRESS_NOTIFY:
+ hci_keypress_notify_evt(hdev, skb);
break;
case HCI_EV_SIMPLE_PAIR_COMPLETE:
@@ -3668,77 +3862,39 @@
hci_le_meta_evt(hdev, skb);
break;
+ case HCI_EV_CHANNEL_SELECTED:
+ hci_chan_selected_evt(hdev, skb);
+ break;
+
case HCI_EV_REMOTE_OOB_DATA_REQUEST:
hci_remote_oob_data_request_evt(hdev, skb);
break;
- case HCI_EV_PHYS_LINK_COMPLETE:
- hci_phy_link_complete(hdev, skb);
- hci_amp_event_packet(hdev, event, skb);
+ case HCI_EV_PHY_LINK_COMPLETE:
+ hci_phy_link_complete_evt(hdev, skb);
break;
- case HCI_EV_LOG_LINK_COMPLETE:
- hci_log_link_complete(hdev, skb);
+ case HCI_EV_LOGICAL_LINK_COMPLETE:
+ hci_loglink_complete_evt(hdev, skb);
break;
- case HCI_EV_FLOW_SPEC_MODIFY_COMPLETE:
- hci_flow_spec_modify_complete(hdev, skb);
+ case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
+ hci_disconn_loglink_complete_evt(hdev, skb);
break;
- case HCI_EV_DISCONN_LOG_LINK_COMPLETE:
- hci_disconn_log_link_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_DISCONN_PHYS_LINK_COMPLETE:
- hci_disconn_phy_link_complete_evt(hdev, skb);
- hci_amp_event_packet(hdev, event, skb);
+ case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
+ hci_disconn_phylink_complete_evt(hdev, skb);
break;
case HCI_EV_NUM_COMP_BLOCKS:
hci_num_comp_blocks_evt(hdev, skb);
break;
- case HCI_EV_CHANNEL_SELECTED:
- hci_amp_event_packet(hdev, event, skb);
- break;
-
- case HCI_EV_AMP_STATUS_CHANGE:
- hci_amp_event_packet(hdev, event, skb);
- break;
-
default:
- BT_DBG("%s event 0x%x", hdev->name, event);
+ BT_DBG("%s event 0x%2.2x", hdev->name, event);
break;
}
kfree_skb(skb);
hdev->stat.evt_rx++;
}
-
-/* Generate internal stack event */
-void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
-{
- struct hci_event_hdr *hdr;
- struct hci_ev_stack_internal *ev;
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
- if (!skb)
- return;
-
- hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
- hdr->evt = HCI_EV_STACK_INTERNAL;
- hdr->plen = sizeof(*ev) + dlen;
-
- ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
- ev->type = type;
- memcpy(ev->data, data, dlen);
-
- bt_cb(skb)->incoming = 1;
- __net_timestamp(skb);
-
- bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
- skb->dev = (void *) hdev;
- hci_send_to_sock(hdev, skb, NULL);
- kfree_skb(skb);
-}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 95ccb21..f7d08c4 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2011, The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -24,32 +24,14 @@
/* Bluetooth HCI sockets. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/compat.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_mon.h>
-static bool enable_mgmt = 1;
+static atomic_t monitor_promisc = ATOMIC_INIT(0);
/* ----- HCI socket interface ----- */
@@ -85,22 +67,20 @@
};
/* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
- struct sock *skip_sk)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
struct sock *sk;
+ struct sk_buff *skb_copy = NULL;
struct hlist_node *node;
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
+
sk_for_each(sk, node, &hci_sk_list.head) {
struct hci_filter *flt;
struct sk_buff *nskb;
- if (sk == skip_sk)
- continue;
-
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
@@ -108,21 +88,19 @@
if (skb->sk == sk)
continue;
- if (bt_cb(skb)->channel != hci_pi(sk)->channel)
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
continue;
- if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
- goto clone;
-
/* Apply filter */
flt = &hci_pi(sk)->filter;
if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
- 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
+ 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
+ &flt->type_mask))
continue;
if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
- register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+ int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
if (!hci_test_bit(evt, &flt->event_mask))
continue;
@@ -137,19 +115,302 @@
continue;
}
-clone:
- nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb_copy) {
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
+ if (!skb_copy)
+ continue;
+
+ /* Put type byte before the data */
+ memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
+ }
+
+ nskb = skb_clone(skb_copy, GFP_ATOMIC);
if (!nskb)
continue;
- /* Put type byte before the data */
- if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
- memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
-
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
+
read_unlock(&hci_sk_list.lock);
+
+ kfree_skb(skb_copy);
+}
+
+/* Send frame to control socket */
+void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ /* Skip the original socket */
+ if (sk == skip_sk)
+ continue;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
+/* Send frame to monitor socket */
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct sock *sk;
+ struct sk_buff *skb_copy = NULL;
+ __le16 opcode;
+ struct hlist_node *node;
+
+ if (!atomic_read(&monitor_promisc))
+ return;
+
+ BT_DBG("hdev %p len %d", hdev, skb->len);
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
+ break;
+ case HCI_EVENT_PKT:
+ opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
+ break;
+ case HCI_ACLDATA_PKT:
+ if (bt_cb(skb)->incoming)
+ opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
+ else
+ opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
+ break;
+ case HCI_SCODATA_PKT:
+ if (bt_cb(skb)->incoming)
+ opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
+ else
+ opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
+ break;
+ default:
+ return;
+ }
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ if (!skb_copy) {
+ struct hci_mon_hdr *hdr;
+
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
+ GFP_ATOMIC);
+ if (!skb_copy)
+ continue;
+
+ /* Put header before the data */
+ hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len);
+ }
+
+ nskb = skb_clone(skb_copy, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+
+ kfree_skb(skb_copy);
+}
+
+static void send_monitor_event(struct sk_buff *skb)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
+static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
+{
+ struct hci_mon_hdr *hdr;
+ struct hci_mon_new_index *ni;
+ struct sk_buff *skb;
+ __le16 opcode;
+
+ switch (event) {
+ case HCI_DEV_REG:
+ skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+ ni->type = hdev->dev_type;
+ ni->bus = hdev->bus;
+ bacpy(&ni->bdaddr, &hdev->bdaddr);
+ memcpy(ni->name, hdev->name, 8);
+
+ opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
+ break;
+
+ case HCI_DEV_UNREG:
+ skb = bt_skb_alloc(0, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
+ break;
+
+ default:
+ return NULL;
+ }
+
+ __net_timestamp(skb);
+
+ hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
+static void send_monitor_replay(struct sock *sk)
+{
+ struct hci_dev *hdev;
+
+ read_lock(&hci_dev_list_lock);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ struct sk_buff *skb;
+
+ skb = create_monitor_event(hdev, HCI_DEV_REG);
+ if (!skb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, skb))
+ kfree_skb(skb);
+ }
+
+ read_unlock(&hci_dev_list_lock);
+}
+
+/* Generate internal stack event */
+static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
+{
+ struct hci_event_hdr *hdr;
+ struct hci_ev_stack_internal *ev;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
+ hdr->evt = HCI_EV_STACK_INTERNAL;
+ hdr->plen = sizeof(*ev) + dlen;
+
+ ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
+ ev->type = type;
+ memcpy(ev->data, data, dlen);
+
+ bt_cb(skb)->incoming = 1;
+ __net_timestamp(skb);
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ skb->dev = (void *) hdev;
+ hci_send_to_sock(hdev, skb);
+ kfree_skb(skb);
+}
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event)
+{
+ struct hci_ev_si_device ev;
+ struct hlist_node *node;
+
+ BT_DBG("hdev %s event %d", hdev->name, event);
+
+ /* Send event to monitor */
+ if (atomic_read(&monitor_promisc)) {
+ struct sk_buff *skb;
+
+ skb = create_monitor_event(hdev, event);
+ if (skb) {
+ send_monitor_event(skb);
+ kfree_skb(skb);
+ }
+ }
+
+ /* Send event to sockets */
+ ev.event = event;
+ ev.dev_id = hdev->id;
+ hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+
+ if (event == HCI_DEV_UNREG) {
+ struct sock *sk;
+
+ /* Detach sockets from device */
+ read_lock(&hci_sk_list.lock);
+ sk_for_each(sk, node, &hci_sk_list.head) {
+ bh_lock_sock_nested(sk);
+ if (hci_pi(sk)->hdev == hdev) {
+ hci_pi(sk)->hdev = NULL;
+ sk->sk_err = EPIPE;
+ sk->sk_state = BT_OPEN;
+ sk->sk_state_change(sk);
+
+ hci_dev_put(hdev);
+ }
+ bh_unlock_sock(sk);
+ }
+ read_unlock(&hci_sk_list.lock);
+ }
}
static int hci_sock_release(struct socket *sock)
@@ -164,6 +425,9 @@
hdev = hci_pi(sk)->hdev;
+ if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
+ atomic_dec(&monitor_promisc);
+
bt_sock_unlink(&hci_sk_list, sk);
if (hdev) {
@@ -180,86 +444,43 @@
return 0;
}
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct list_head *p;
-
- list_for_each(p, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
-
- if (bacmp(bdaddr, &b->bdaddr) == 0)
- return b;
- }
-
- return NULL;
-}
-
-static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
- struct bdaddr_list *entry;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- if (bacmp(&bdaddr, BDADDR_ANY) == 0)
- return -EBADF;
+ hci_dev_lock(hdev);
- if (hci_blacklist_lookup(hdev, &bdaddr))
- return -EEXIST;
+ err = hci_blacklist_add(hdev, &bdaddr, 0);
- entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ hci_dev_unlock(hdev);
- bacpy(&entry->bdaddr, &bdaddr);
-
- list_add(&entry->list, &hdev->blacklist);
-
- return 0;
+ return err;
}
-int hci_blacklist_clear(struct hci_dev *hdev)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
-
- list_del(p);
- kfree(b);
- }
-
- return 0;
-}
-
-static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
- struct bdaddr_list *entry;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- if (bacmp(&bdaddr, BDADDR_ANY) == 0)
- return hci_blacklist_clear(hdev);
+ hci_dev_lock(hdev);
- entry = hci_blacklist_lookup(hdev, &bdaddr);
- if (!entry)
- return -ENOENT;
+ err = hci_blacklist_del(hdev, &bdaddr, 0);
- list_del(&entry->list);
- kfree(entry);
+ hci_dev_unlock(hdev);
- return 0;
+ return err;
}
/* Ioctls that require bound socket */
-static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
+ unsigned long arg)
{
struct hci_dev *hdev = hci_pi(sk)->hdev;
@@ -269,7 +490,7 @@
switch (cmd) {
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return -EPERM;
@@ -289,16 +510,13 @@
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return hci_blacklist_add(hdev, (void __user *) arg);
+ return -EPERM;
+ return hci_sock_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return hci_blacklist_del(hdev, (void __user *) arg);
-
- case HCISETAUTHINFO:
- return hci_set_auth_info(hdev, (void __user *) arg);
+ return -EPERM;
+ return hci_sock_blacklist_del(hdev, (void __user *) arg);
default:
if (hdev->ioctl)
@@ -307,7 +525,8 @@
}
}
-static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
@@ -327,27 +546,22 @@
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
-
- err = hci_dev_open(arg);
- if (!err || err == -EALREADY)
- return 0;
- else
- return err;
+ return -EPERM;
+ return hci_dev_open(arg);
case HCIDEVDOWN:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_close(arg);
case HCIDEVRESET:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_reset(arg);
case HCIDEVRESTAT:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_reset_stat(arg);
case HCISETSCAN:
@@ -359,7 +573,7 @@
case HCISETACLMTU:
case HCISETSCOMTU:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_cmd(cmd, argp);
case HCIINQUIRY:
@@ -373,7 +587,8 @@
}
}
-static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
{
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
@@ -392,31 +607,69 @@
if (haddr.hci_family != AF_BLUETOOTH)
return -EINVAL;
- if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
- return -EINVAL;
-
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
- return -EINVAL;
-
lock_sock(sk);
- if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
+ if (sk->sk_state == BT_BOUND) {
err = -EALREADY;
goto done;
}
- if (haddr.hci_dev != HCI_DEV_NONE) {
- hdev = hci_dev_get(haddr.hci_dev);
- if (!hdev) {
- err = -ENODEV;
+ switch (haddr.hci_channel) {
+ case HCI_CHANNEL_RAW:
+ if (hci_pi(sk)->hdev) {
+ err = -EALREADY;
goto done;
}
- atomic_inc(&hdev->promisc);
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ hdev = hci_dev_get(haddr.hci_dev);
+ if (!hdev) {
+ err = -ENODEV;
+ goto done;
+ }
+
+ atomic_inc(&hdev->promisc);
+ }
+
+ hci_pi(sk)->hdev = hdev;
+ break;
+
+ case HCI_CHANNEL_CONTROL:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ break;
+
+ case HCI_CHANNEL_MONITOR:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_RAW)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ send_monitor_replay(sk);
+
+ atomic_inc(&monitor_promisc);
+ break;
+
+ default:
+ err = -EINVAL;
+ goto done;
}
+
hci_pi(sk)->channel = haddr.hci_channel;
- hci_pi(sk)->hdev = hdev;
sk->sk_state = BT_BOUND;
done:
@@ -424,7 +677,8 @@
return err;
}
-static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
+static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *addr_len, int peer)
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
@@ -440,18 +694,21 @@
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hdev->id;
+ haddr->hci_channel= 0;
release_sock(sk);
return 0;
}
-static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming;
- put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
+ &incoming);
}
if (mask & HCI_CMSG_TSTAMP) {
@@ -467,7 +724,8 @@
data = &tv;
len = sizeof(tv);
#ifdef CONFIG_COMPAT
- if (msg->msg_flags & MSG_CMSG_COMPAT) {
+ if (!COMPAT_USE_64BIT_TIME &&
+ (msg->msg_flags & MSG_CMSG_COMPAT)) {
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
data = &ctv;
@@ -480,7 +738,7 @@
}
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+ struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -508,7 +766,15 @@
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- hci_sock_cmsg(sk, msg, skb);
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ hci_sock_cmsg(sk, msg, skb);
+ break;
+ case HCI_CHANNEL_CONTROL:
+ case HCI_CHANNEL_MONITOR:
+ sock_recv_timestamp(msg, sk, skb);
+ break;
+ }
skb_free_datagram(sk, skb);
@@ -521,7 +787,6 @@
struct sock *sk = sock->sk;
struct hci_dev *hdev;
struct sk_buff *skb;
- int reserve = 0;
int err;
BT_DBG("sock %p sk %p", sock, sk);
@@ -543,6 +808,9 @@
case HCI_CHANNEL_CONTROL:
err = mgmt_control(sk, msg, len);
goto done;
+ case HCI_CHANNEL_MONITOR:
+ err = -EOPNOTSUPP;
+ goto done;
default:
err = -EINVAL;
goto done;
@@ -559,18 +827,10 @@
goto done;
}
- /* Allocate extra headroom for Qualcomm PAL */
- if (hdev->dev_type == HCI_AMP && hdev->manufacturer == 0x001d)
- reserve = BT_SKB_RESERVE_80211;
-
- skb = bt_skb_send_alloc(sk, len + reserve,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
goto done;
- if (reserve)
- skb_reserve(skb, reserve);
-
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto drop;
@@ -586,18 +846,24 @@
u16 ocf = hci_opcode_ocf(opcode);
if (((ogf > HCI_SFLT_MAX_OGF) ||
- !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
- !capable(CAP_NET_RAW)) {
+ !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
+ &hci_sec_filter.ocf_mask[ogf])) &&
+ !capable(CAP_NET_RAW)) {
err = -EPERM;
goto drop;
}
if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
} else {
+ /* Stand-alone HCI commands must be flaged as
+ * single-command requests.
+ */
+ bt_cb(skb)->req.start = true;
+
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
} else {
if (!capable(CAP_NET_RAW)) {
@@ -606,7 +872,7 @@
}
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
err = len;
@@ -620,7 +886,8 @@
goto done;
}
-static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
+static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@@ -630,6 +897,11 @@
lock_sock(sk);
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (optname) {
case HCI_DATA_DIR:
if (get_user(opt, (int __user *)optval)) {
@@ -692,19 +964,30 @@
break;
}
+done:
release_sock(sk);
return err;
}
-static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
- int len, opt;
+ int len, opt, err = 0;
+
+ BT_DBG("sk %p, opt %d", sk, optname);
if (get_user(len, optlen))
return -EFAULT;
+ lock_sock(sk);
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (optname) {
case HCI_DATA_DIR:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
@@ -713,7 +996,7 @@
opt = 0;
if (put_user(opt, optval))
- return -EFAULT;
+ err = -EFAULT;
break;
case HCI_TIME_STAMP:
@@ -723,13 +1006,14 @@
opt = 0;
if (put_user(opt, optval))
- return -EFAULT;
+ err = -EFAULT;
break;
case HCI_FILTER:
{
struct hci_filter *f = &hci_pi(sk)->filter;
+ memset(&uf, 0, sizeof(uf));
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
@@ -738,15 +1022,17 @@
len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
- return -EFAULT;
+ err = -EFAULT;
break;
default:
- return -ENOPROTOOPT;
+ err = -ENOPROTOOPT;
break;
}
- return 0;
+done:
+ release_sock(sk);
+ return err;
}
static const struct proto_ops hci_sock_ops = {
@@ -804,54 +1090,12 @@
return 0;
}
-static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct hci_dev *hdev = (struct hci_dev *) ptr;
- struct hci_ev_si_device ev;
-
- BT_DBG("hdev %s event %ld", hdev->name, event);
-
- /* Send event to sockets */
- ev.event = event;
- ev.dev_id = hdev->id;
- hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
-
- if (event == HCI_DEV_UNREG) {
- struct sock *sk;
- struct hlist_node *node;
-
- /* Detach sockets from device */
- read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
- local_bh_disable();
- bh_lock_sock_nested(sk);
- if (hci_pi(sk)->hdev == hdev) {
- hci_pi(sk)->hdev = NULL;
- sk->sk_err = EPIPE;
- sk->sk_state = BT_OPEN;
- sk->sk_state_change(sk);
-
- hci_dev_put(hdev);
- }
- bh_unlock_sock(sk);
- local_bh_enable();
- }
- read_unlock(&hci_sk_list.lock);
- }
-
- return NOTIFY_DONE;
-}
-
static const struct net_proto_family hci_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = hci_sock_create,
};
-static struct notifier_block hci_sock_nblock = {
- .notifier_call = hci_sock_dev_event
-};
-
int __init hci_sock_init(void)
{
int err;
@@ -861,30 +1105,30 @@
return err;
err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("HCI socket registration failed");
goto error;
+ }
- hci_register_notifier(&hci_sock_nblock);
+ err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create HCI proc file");
+ bt_sock_unregister(BTPROTO_HCI);
+ goto error;
+ }
BT_INFO("HCI socket layer initialized");
return 0;
error:
- BT_ERR("HCI socket registration failed");
proto_unregister(&hci_sk_proto);
return err;
}
void hci_sock_cleanup(void)
{
- if (bt_sock_unregister(BTPROTO_HCI) < 0)
- BT_ERR("HCI socket unregistration failed");
-
- hci_unregister_notifier(&hci_sock_nblock);
-
+ bt_procfs_cleanup(&init_net, "hci");
+ bt_sock_unregister(BTPROTO_HCI);
proto_unregister(&hci_sk_proto);
}
-
-module_param(enable_mgmt, bool, 0644);
-MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 255419e..7ad6ecf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,12 +1,8 @@
/* Bluetooth HCI driver model support. */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -25,32 +21,37 @@
return "SCO";
case ESCO_LINK:
return "eSCO";
+ case LE_LINK:
+ return "LE";
default:
return "UNKNOWN";
}
}
-static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
+ struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
-static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", batostr(&conn->dst));
+ struct hci_conn *conn = to_hci_conn(dev);
+ return sprintf(buf, "%pMR\n", &conn->dst);
}
-static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_features(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
+ struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- conn->features[0], conn->features[1],
- conn->features[2], conn->features[3],
- conn->features[4], conn->features[5],
- conn->features[6], conn->features[7]);
+ conn->features[0][0], conn->features[0][1],
+ conn->features[0][2], conn->features[0][3],
+ conn->features[0][4], conn->features[0][5],
+ conn->features[0][6], conn->features[0][7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -78,8 +79,8 @@
static void bt_link_release(struct device *dev)
{
- void *data = dev_get_drvdata(dev);
- kfree(data);
+ struct hci_conn *conn = to_hci_conn(dev);
+ kfree(conn);
}
static struct device_type bt_link = {
@@ -88,23 +89,6 @@
.release = bt_link_release,
};
-static void add_conn(struct work_struct *work)
-{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
- struct hci_dev *hdev = conn->hdev;
-
- dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
-
- dev_set_drvdata(&conn->dev, conn);
-
- if (device_add(&conn->dev) < 0) {
- BT_ERR("Failed to register connection device");
- return;
- }
-
- hci_dev_hold(hdev);
-}
-
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
@@ -115,9 +99,37 @@
return !strncmp(dev_name(dev), "rfcomm", 6);
}
-static void del_conn(struct work_struct *work)
+void hci_conn_init_sysfs(struct hci_conn *conn)
{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = bt_class;
+ conn->dev.parent = &hdev->dev;
+
+ device_initialize(&conn->dev);
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+
+ if (device_add(&conn->dev) < 0) {
+ BT_ERR("Failed to register connection device");
+ return;
+ }
+
+ hci_dev_hold(hdev);
+}
+
+void hci_conn_del_sysfs(struct hci_conn *conn)
+{
struct hci_dev *hdev = conn->hdev;
if (!device_is_registered(&conn->dev))
@@ -134,42 +146,10 @@
}
device_del(&conn->dev);
- put_device(&conn->dev);
hci_dev_put(hdev);
}
-void hci_conn_init_sysfs(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- conn->dev.type = &bt_link;
- conn->dev.class = bt_class;
- conn->dev.parent = &hdev->dev;
-
- device_initialize(&conn->dev);
-
- INIT_WORK(&conn->work_add, add_conn);
- INIT_WORK(&conn->work_del, del_conn);
-}
-
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- queue_work(conn->hdev->workqueue, &conn->work_add);
-}
-
-void hci_conn_del_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- if (conn->hdev)
- queue_work(conn->hdev->workqueue, &conn->work_del);
-}
-
static inline char *host_bustostr(int bus)
{
switch (bus) {
@@ -204,21 +184,24 @@
}
}
-static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_bus(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
-static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
int i;
@@ -229,57 +212,66 @@
return sprintf(buf, "%s\n", name);
}
-static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_class(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "0x%.2x%.2x%.2x\n",
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ struct hci_dev *hdev = to_hci_dev(dev);
+ return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
}
-static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
+ struct hci_dev *hdev = to_hci_dev(dev);
+ return sprintf(buf, "%pMR\n", &hdev->bdaddr);
}
-static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_features(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ hdev->features[0][0], hdev->features[0][1],
+ hdev->features[0][2], hdev->features[0][3],
+ hdev->features[0][4], hdev->features[0][5],
+ hdev->features[0][6], hdev->features[0][7]);
}
-static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_manufacturer(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
-static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_hci_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
-static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_hci_revision(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
-static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_idle_timeout(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
-static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_idle_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
unsigned int val;
int rv;
@@ -295,15 +287,18 @@
return count;
}
-static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_sniff_max_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
-static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_sniff_max_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
int rv;
@@ -319,15 +314,18 @@
return count;
}
-static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_sniff_min_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
-static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_sniff_min_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
int rv;
@@ -354,11 +352,11 @@
static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
- show_idle_timeout, store_idle_timeout);
+ show_idle_timeout, store_idle_timeout);
static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
- show_sniff_max_interval, store_sniff_max_interval);
+ show_sniff_max_interval, store_sniff_max_interval);
static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
- show_sniff_min_interval, store_sniff_min_interval);
+ show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
&dev_attr_bus.attr,
@@ -387,8 +385,9 @@
static void bt_host_release(struct device *dev)
{
- void *data = dev_get_drvdata(dev);
- kfree(data);
+ struct hci_dev *hdev = to_hci_dev(dev);
+ kfree(hdev);
+ module_put(THIS_MODULE);
}
static struct device_type bt_host = {
@@ -400,15 +399,15 @@
static int inquiry_cache_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- for (e = cache->list; e; e = e->next) {
+ list_for_each_entry(e, &cache->all, all) {
struct inquiry_data *data = &e->data;
- seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- batostr(&data->bdaddr),
+ seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ &data->bdaddr,
data->pscan_rep_mode, data->pscan_period_mode,
data->pscan_mode, data->dev_class[2],
data->dev_class[1], data->dev_class[0],
@@ -416,7 +415,7 @@
data->rssi, data->ssp_mode, e->timestamp);
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -436,19 +435,14 @@
static int blacklist_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
+ struct bdaddr_list *b;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- list_for_each(l, &hdev->blacklist) {
- struct bdaddr_list *b;
+ list_for_each_entry(b, &hdev->blacklist, list)
+ seq_printf(f, "%pMR\n", &b->bdaddr);
- b = list_entry(l, struct bdaddr_list, list);
-
- seq_printf(f, "%s\n", batostr(&b->bdaddr));
- }
-
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -467,37 +461,31 @@
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
- u32 data0, data4;
- u16 data1, data2, data3, data5;
+ u32 data0, data5;
+ u16 data1, data2, data3, data4;
- memcpy(&data0, &uuid[0], 4);
- memcpy(&data1, &uuid[4], 2);
- memcpy(&data2, &uuid[6], 2);
- memcpy(&data3, &uuid[8], 2);
- memcpy(&data4, &uuid[10], 4);
- memcpy(&data5, &uuid[14], 2);
+ data5 = get_unaligned_le32(uuid);
+ data4 = get_unaligned_le16(uuid + 4);
+ data3 = get_unaligned_le16(uuid + 6);
+ data2 = get_unaligned_le16(uuid + 8);
+ data1 = get_unaligned_le16(uuid + 10);
+ data0 = get_unaligned_le32(uuid + 12);
- seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
- ntohl(data0), ntohs(data1), ntohs(data2),
- ntohs(data3), ntohl(data4), ntohs(data5));
+ seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
+ data0, data1, data2, data3, data4, data5);
}
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
+ struct bt_uuid *uuid;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- list_for_each(l, &hdev->uuids) {
- struct bt_uuid *uuid;
-
- uuid = list_entry(l, struct bt_uuid, list);
-
+ list_for_each_entry(uuid, &hdev->uuids, list)
print_bt_uuid(f, uuid->uuid);
- }
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -514,22 +502,56 @@
.release = single_release,
};
-int hci_register_sysfs(struct hci_dev *hdev)
+static int auto_accept_delay_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+
+ hdev->auto_accept_delay = val;
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+
+ *val = hdev->auto_accept_delay;
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+ auto_accept_delay_set, "%llu\n");
+
+void hci_init_sysfs(struct hci_dev *hdev)
+{
+ struct device *dev = &hdev->dev;
+
+ dev->type = &bt_host;
+ dev->class = bt_class;
+
+ __module_get(THIS_MODULE);
+ device_initialize(dev);
+}
+
+int hci_add_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->type = &bt_host;
- dev->class = bt_class;
- dev->parent = hdev->parent;
-
dev_set_name(dev, "%s", hdev->name);
- dev_set_drvdata(dev, hdev);
-
- err = device_register(dev);
+ err = device_add(dev);
if (err < 0)
return err;
@@ -541,17 +563,19 @@
return 0;
debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
- hdev, &inquiry_cache_fops);
+ hdev, &inquiry_cache_fops);
debugfs_create_file("blacklist", 0444, hdev->debugfs,
- hdev, &blacklist_fops);
+ hdev, &blacklist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+ debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
+ &auto_accept_delay_fops);
return 0;
}
-void hci_unregister_sysfs(struct hci_dev *hdev)
+void hci_del_sysfs(struct hci_dev *hdev)
{
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
@@ -565,10 +589,8 @@
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
bt_class = class_create(THIS_MODULE, "bluetooth");
- if (IS_ERR(bt_class))
- return PTR_ERR(bt_class);
- return 0;
+ return PTR_RET(bt_class);
}
void bt_sysfs_cleanup(void)
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 4deaca7..9332bc7 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
config BT_HIDP
tristate "HIDP protocol support"
- depends on BT && INPUT && HID_SUPPORT
+ depends on BT && INPUT
select HID
help
HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index a8db484..de030f5 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1,7 +1,7 @@
/*
HIDP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
- Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
+ Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -21,27 +21,10 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/kref.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/freezer.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <net/sock.h>
-
-#include <linux/input.h>
-#include <linux/hid.h>
+#include <linux/kthread.h>
#include <linux/hidraw.h>
#include <net/bluetooth/bluetooth.h>
@@ -78,56 +61,20 @@
static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
-static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
-{
- struct hidp_session *session;
- struct list_head *p;
+static int hidp_session_probe(struct l2cap_conn *conn,
+ struct l2cap_user *user);
+static void hidp_session_remove(struct l2cap_conn *conn,
+ struct l2cap_user *user);
+static int hidp_session_thread(void *arg);
+static void hidp_session_terminate(struct hidp_session *s);
- BT_DBG("");
-
- list_for_each(p, &hidp_session_list) {
- session = list_entry(p, struct hidp_session, list);
- if (!bacmp(bdaddr, &session->bdaddr))
- return session;
- }
- return NULL;
-}
-
-static void __hidp_link_session(struct hidp_session *session)
-{
- __module_get(THIS_MODULE);
- list_add(&session->list, &hidp_session_list);
-}
-
-static void __hidp_unlink_session(struct hidp_session *session)
-{
- bdaddr_t *dst = &session->bdaddr;
- struct hci_dev *hdev;
- struct device *dev = NULL;
-
- hdev = hci_get_route(dst, BDADDR_ANY);
- if (hdev) {
- session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (session->conn && session->conn->hidp_session_valid)
- dev = &session->conn->dev;
-
- hci_dev_put(hdev);
- }
-
- if (dev)
- hci_conn_put_device(session->conn);
-
- list_del(&session->list);
- module_put(THIS_MODULE);
-}
-
-static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
+static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
{
memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
ci->flags = session->flags;
- ci->state = session->state;
+ ci->state = BT_CONNECTED;
ci->vendor = 0x0000;
ci->product = 0x0000;
@@ -151,13 +98,60 @@
}
}
-static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+/* assemble skb, queue message on @transmit and wake up the session thread */
+static int hidp_send_message(struct hidp_session *session, struct socket *sock,
+ struct sk_buff_head *transmit, unsigned char hdr,
+ const unsigned char *data, int size)
{
- unsigned char newleds;
struct sk_buff *skb;
+ struct sock *sk = sock->sk;
- BT_DBG("session %p type %d code %d value %d", session, type, code, value);
+ BT_DBG("session %p data %p size %d", session, data, size);
+
+ if (atomic_read(&session->terminate))
+ return -EIO;
+
+ skb = alloc_skb(size + 1, GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("Can't allocate memory for new frame");
+ return -ENOMEM;
+ }
+
+ *skb_put(skb, 1) = hdr;
+ if (data && size > 0)
+ memcpy(skb_put(skb, size), data, size);
+
+ skb_queue_tail(transmit, skb);
+ wake_up_interruptible(sk_sleep(sk));
+
+ return 0;
+}
+
+static int hidp_send_ctrl_message(struct hidp_session *session,
+ unsigned char hdr, const unsigned char *data,
+ int size)
+{
+ return hidp_send_message(session, session->ctrl_sock,
+ &session->ctrl_transmit, hdr, data, size);
+}
+
+static int hidp_send_intr_message(struct hidp_session *session,
+ unsigned char hdr, const unsigned char *data,
+ int size)
+{
+ return hidp_send_message(session, session->intr_sock,
+ &session->intr_transmit, hdr, data, size);
+}
+
+static int hidp_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct hidp_session *session = input_get_drvdata(dev);
+ unsigned char newleds;
+ unsigned char hdr, data[2];
+
+ BT_DBG("session %p type %d code %d value %d",
+ session, type, code, value);
if (type != EV_LED)
return -1;
@@ -173,36 +167,11 @@
session->leds = newleds;
- skb = alloc_skb(3, GFP_ATOMIC);
- if (!skb) {
- BT_ERR("Can't allocate memory for new frame");
- return -ENOMEM;
- }
+ hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+ data[0] = 0x01;
+ data[1] = newleds;
- *skb_put(skb, 1) = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
- *skb_put(skb, 1) = 0x01;
- *skb_put(skb, 1) = newleds;
-
- skb_queue_tail(&session->intr_transmit, skb);
-
- hidp_schedule(session);
-
- return 0;
-}
-
-static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
- struct hid_device *hid = input_get_drvdata(dev);
- struct hidp_session *session = hid->driver_data;
-
- return hidp_queue_event(session, dev, type, code, value);
-}
-
-static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
- struct hidp_session *session = input_get_drvdata(dev);
-
- return hidp_queue_event(session, dev, type, code, value);
+ return hidp_send_intr_message(session, hdr, data, 2);
}
static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
@@ -260,104 +229,174 @@
input_sync(dev);
}
-static int __hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
-{
- struct sk_buff *skb;
-
- BT_DBG("session %p data %p size %d", session, data, size);
-
- skb = alloc_skb(size + 1, GFP_ATOMIC);
- if (!skb) {
- BT_ERR("Can't allocate memory for new frame");
- return -ENOMEM;
- }
-
- *skb_put(skb, 1) = hdr;
- if (data && size > 0)
- memcpy(skb_put(skb, size), data, size);
-
- skb_queue_tail(&session->ctrl_transmit, skb);
-
- return 0;
-}
-
-static inline int hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
-{
- int err;
-
- err = __hidp_send_ctrl_message(session, hdr, data, size);
-
- hidp_schedule(session);
-
- return err;
-}
-
-static int hidp_queue_report(struct hidp_session *session,
- unsigned char *data, int size)
-{
- struct sk_buff *skb;
-
- BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
-
- skb = alloc_skb(size + 1, GFP_ATOMIC);
- if (!skb) {
- BT_ERR("Can't allocate memory for new frame");
- return -ENOMEM;
- }
-
- *skb_put(skb, 1) = 0xa2;
- if (size > 0)
- memcpy(skb_put(skb, size), data, size);
-
- skb_queue_tail(&session->intr_transmit, skb);
-
- hidp_schedule(session);
-
- return 0;
-}
-
static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
{
- unsigned char buf[32];
- int rsize;
+ unsigned char hdr;
+ u8 *buf;
+ int rsize, ret;
- rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- if (rsize > sizeof(buf))
+ buf = hid_alloc_report_buf(report, GFP_ATOMIC);
+ if (!buf)
return -EIO;
hid_output_report(report, buf);
+ hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
- return hidp_queue_report(session, buf, rsize);
+ rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ ret = hidp_send_intr_message(session, hdr, buf, rsize);
+
+ kfree(buf);
+ return ret;
}
-static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
+static int hidp_get_raw_report(struct hid_device *hid,
+ unsigned char report_number,
+ unsigned char *data, size_t count,
unsigned char report_type)
{
+ struct hidp_session *session = hid->driver_data;
+ struct sk_buff *skb;
+ size_t len;
+ int numbered_reports = hid->report_enum[report_type].numbered;
+ int ret;
+
+ if (atomic_read(&session->terminate))
+ return -EIO;
+
switch (report_type) {
case HID_FEATURE_REPORT:
- report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ break;
+ case HID_INPUT_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT;
break;
case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT;
break;
default:
return -EINVAL;
}
- if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count))
- return -ENOMEM;
- return count;
+ if (mutex_lock_interruptible(&session->report_mutex))
+ return -ERESTARTSYS;
+
+ /* Set up our wait, and send the report request to the device. */
+ session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK;
+ session->waiting_report_number = numbered_reports ? report_number : -1;
+ set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ data[0] = report_number;
+ ret = hidp_send_ctrl_message(session, report_type, data, 1);
+ if (ret)
+ goto err;
+
+ /* Wait for the return of the report. The returned report
+ gets put in session->report_return. */
+ while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
+ !atomic_read(&session->terminate)) {
+ int res;
+
+ res = wait_event_interruptible_timeout(session->report_queue,
+ !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)
+ || atomic_read(&session->terminate),
+ 5*HZ);
+ if (res == 0) {
+ /* timeout */
+ ret = -EIO;
+ goto err;
+ }
+ if (res < 0) {
+ /* signal */
+ ret = -ERESTARTSYS;
+ goto err;
+ }
+ }
+
+ skb = session->report_return;
+ if (skb) {
+ len = skb->len < count ? skb->len : count;
+ memcpy(data, skb->data, len);
+
+ kfree_skb(skb);
+ session->report_return = NULL;
+ } else {
+ /* Device returned a HANDSHAKE, indicating protocol error. */
+ len = -EIO;
+ }
+
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
+
+ return len;
+
+err:
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return ret;
+}
+
+static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
+ unsigned char report_type)
+{
+ struct hidp_session *session = hid->driver_data;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+ return hidp_send_intr_message(session, report_type,
+ data, count);
+ } else if (report_type != HID_FEATURE_REPORT) {
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&session->report_mutex))
+ return -ERESTARTSYS;
+
+ /* Set up our wait, and send the report request to the device. */
+ set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ ret = hidp_send_ctrl_message(session, report_type, data, count);
+ if (ret)
+ goto err;
+
+ /* Wait for the ACK from the device. */
+ while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) &&
+ !atomic_read(&session->terminate)) {
+ int res;
+
+ res = wait_event_interruptible_timeout(session->report_queue,
+ !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)
+ || atomic_read(&session->terminate),
+ 10*HZ);
+ if (res == 0) {
+ /* timeout */
+ ret = -EIO;
+ goto err;
+ }
+ if (res < 0) {
+ /* signal */
+ ret = -ERESTARTSYS;
+ goto err;
+ }
+ }
+
+ if (!session->output_report_success) {
+ ret = -EIO;
+ goto err;
+ }
+
+ ret = count;
+
+err:
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return ret;
}
static void hidp_idle_timeout(unsigned long arg)
{
struct hidp_session *session = (struct hidp_session *) arg;
- atomic_inc(&session->terminate);
- hidp_schedule(session);
+ hidp_session_terminate(session);
}
static void hidp_set_timer(struct hidp_session *session)
@@ -366,7 +405,7 @@
mod_timer(&session->timer, jiffies + HZ * session->idle_to);
}
-static inline void hidp_del_timer(struct hidp_session *session)
+static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
del_timer(&session->timer);
@@ -376,16 +415,21 @@
unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
+ session->output_report_success = 0; /* default condition */
switch (param) {
case HIDP_HSHK_SUCCESSFUL:
/* FIXME: Call into SET_ GET_ handlers here */
+ session->output_report_success = 1;
break;
case HIDP_HSHK_NOT_READY:
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
+ if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
+ wake_up_interruptible(&session->report_queue);
+
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -395,15 +439,19 @@
case HIDP_HSHK_ERR_FATAL:
/* Device requests a reboot, as this is the only way this error
* can be recovered. */
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0);
break;
default:
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
break;
}
+
+ /* Wake up the waiting thread. */
+ if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
+ wake_up_interruptible(&session->report_queue);
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -416,15 +464,15 @@
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- /* Kill session thread */
- atomic_inc(&session->terminate);
- hidp_schedule(session);
+ hidp_session_terminate(session);
}
}
-static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
+/* Returns true if the passed-in skb should be freed by the caller. */
+static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
+ int done_with_skb = 1;
BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
switch (param) {
@@ -436,7 +484,6 @@
if (session->hid)
hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
-
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -445,15 +492,30 @@
break;
default:
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
}
+
+ if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
+ param == session->waiting_report_type) {
+ if (session->waiting_report_number < 0 ||
+ session->waiting_report_number == skb->data[0]) {
+ /* hidp_get_raw_report() is waiting on this report. */
+ session->report_return = skb;
+ done_with_skb = 0;
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+ }
+ }
+
+ return done_with_skb;
}
static void hidp_recv_ctrl_frame(struct hidp_session *session,
struct sk_buff *skb)
{
unsigned char hdr, type, param;
+ int free_skb = 1;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -473,16 +535,17 @@
break;
case HIDP_TRANS_DATA:
- hidp_process_data(session, skb, param);
+ free_skb = hidp_process_data(session, skb, param);
break;
default:
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0);
break;
}
- kfree_skb(skb);
+ if (free_skb)
+ kfree_skb(skb);
}
static void hidp_recv_intr_frame(struct hidp_session *session,
@@ -527,156 +590,37 @@
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
-static void hidp_process_transmit(struct hidp_session *session)
+/* dequeue message from @transmit and send via @sock */
+static void hidp_process_transmit(struct hidp_session *session,
+ struct sk_buff_head *transmit,
+ struct socket *sock)
{
struct sk_buff *skb;
+ int ret;
BT_DBG("session %p", session);
- while ((skb = skb_dequeue(&session->ctrl_transmit))) {
- if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->ctrl_transmit, skb);
+ while ((skb = skb_dequeue(transmit))) {
+ ret = hidp_send_frame(sock, skb->data, skb->len);
+ if (ret == -EAGAIN) {
+ skb_queue_head(transmit, skb);
+ break;
+ } else if (ret < 0) {
+ hidp_session_terminate(session);
+ kfree_skb(skb);
break;
}
hidp_set_timer(session);
kfree_skb(skb);
}
-
- while ((skb = skb_dequeue(&session->intr_transmit))) {
- if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->intr_transmit, skb);
- break;
- }
-
- hidp_set_timer(session);
- kfree_skb(skb);
- }
-}
-
-static int hidp_session(void *arg)
-{
- struct hidp_session *session = arg;
- struct sock *ctrl_sk = session->ctrl_sock->sk;
- struct sock *intr_sk = session->intr_sock->sk;
- struct sk_buff *skb;
- int vendor = 0x0000, product = 0x0000;
- wait_queue_t ctrl_wait, intr_wait;
-
- BT_DBG("session %p", session);
-
- if (session->input) {
- vendor = session->input->id.vendor;
- product = session->input->id.product;
- }
-
- if (session->hid) {
- vendor = session->hid->vendor;
- product = session->hid->product;
- }
-
- daemonize("khidpd_%04x%04x", vendor, product);
- set_user_nice(current, -15);
-
- init_waitqueue_entry(&ctrl_wait, current);
- init_waitqueue_entry(&intr_wait, current);
- add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
- add_wait_queue(sk_sleep(intr_sk), &intr_wait);
- while (!atomic_read(&session->terminate)) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (ctrl_sk->sk_state != BT_CONNECTED ||
- intr_sk->sk_state != BT_CONNECTED)
- break;
-
- while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
- skb_orphan(skb);
- if (!skb_linearize(skb))
- hidp_recv_ctrl_frame(session, skb);
- else
- kfree_skb(skb);
- }
-
- while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
- skb_orphan(skb);
- if (!skb_linearize(skb))
- hidp_recv_intr_frame(session, skb);
- else
- kfree_skb(skb);
- }
-
- hidp_process_transmit(session);
-
- schedule();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
- remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
-
- down_write(&hidp_session_sem);
-
- hidp_del_timer(session);
-
- if (session->input) {
- input_unregister_device(session->input);
- session->input = NULL;
- }
-
- if (session->hid) {
- hid_destroy_device(session->hid);
- session->hid = NULL;
- }
-
- /* Wakeup user-space polling for socket errors */
- session->intr_sock->sk->sk_err = EUNATCH;
- session->ctrl_sock->sk->sk_err = EUNATCH;
-
- hidp_schedule(session);
-
- fput(session->intr_sock->file);
-
- wait_event_timeout(*(sk_sleep(ctrl_sk)),
- (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
-
- fput(session->ctrl_sock->file);
-
- __hidp_unlink_session(session);
-
- up_write(&hidp_session_sem);
-
- kfree(session);
- return 0;
-}
-
-static struct hci_conn *hidp_get_connection(struct hidp_session *session)
-{
- bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
- bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
- struct hci_conn *conn;
- struct hci_dev *hdev;
-
- hdev = hci_get_route(dst, src);
- if (!hdev)
- return NULL;
-
- hci_dev_lock_bh(hdev);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (conn) {
- conn->hidp_session_valid = true;
- hci_conn_hold_device(conn);
- }
- hci_dev_unlock_bh(hdev);
-
- hci_dev_put(hdev);
-
- return conn;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int err, i;
+ int i;
input = input_allocate_device();
if (!input)
@@ -719,16 +663,10 @@
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
- input->dev.parent = &session->conn->dev;
+ input->dev.parent = &session->conn->hcon->dev;
input->event = hidp_input_event;
- err = input_register_device(input);
- if (err < 0) {
- hci_conn_put_device(session->conn);
- return err;
- }
-
return 0;
}
@@ -754,6 +692,9 @@
struct hidp_session *session = hid->driver_data;
struct hid_report *report;
+ if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
+ return 0;
+
list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
report_list, list)
hidp_send_report(session, report);
@@ -781,9 +722,10 @@
.stop = hidp_stop,
.open = hidp_open,
.close = hidp_close,
- .hidinput_input_event = hidp_hidinput_event,
};
+/* This function sets up the hid device. It does not add it
+ to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
@@ -816,25 +758,29 @@
hid->version = req->version;
hid->country = req->country;
- strncpy(hid->name, req->name, 128);
- strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
- strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
+ strncpy(hid->name, req->name, sizeof(req->name) - 1);
- hid->dev.parent = &session->conn->dev;
+ snprintf(hid->phys, sizeof(hid->phys), "%pMR",
+ &bt_sk(session->ctrl_sock->sk)->src);
+
+ snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
+ &bt_sk(session->ctrl_sock->sk)->dst);
+
+ hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
+ hid->hid_get_raw_report = hidp_get_raw_report;
hid->hid_output_raw_report = hidp_output_raw_report;
- err = hid_add_device(hid);
- if (err < 0)
- goto failed;
+ /* True if device is blacklisted in drivers/hid/hid-core.c */
+ if (hid_ignore(hid)) {
+ hid_destroy_device(session->hid);
+ session->hid = NULL;
+ return -ENODEV;
+ }
return 0;
-failed:
- hid_destroy_device(hid);
- session->hid = NULL;
-
fault:
kfree(session->rd_data);
session->rd_data = NULL;
@@ -842,168 +788,558 @@
return err;
}
-int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
+/* initialize session devices */
+static int hidp_session_dev_init(struct hidp_session *session,
+ struct hidp_connadd_req *req)
{
- struct hidp_session *session, *s;
- int err;
-
- BT_DBG("");
-
- if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) ||
- bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
- return -ENOTUNIQ;
-
- session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
- if (!session)
- return -ENOMEM;
-
- BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
-
- down_write(&hidp_session_sem);
-
- s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
- if (s && s->state == BT_CONNECTED) {
- err = -EEXIST;
- goto failed;
- }
-
- bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
-
- session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu);
- session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu);
-
- BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
-
- session->ctrl_sock = ctrl_sock;
- session->intr_sock = intr_sock;
- session->state = BT_CONNECTED;
-
- session->conn = hidp_get_connection(session);
- if (!session->conn) {
- err = -ENOTCONN;
- goto failed;
- }
-
- setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
-
- skb_queue_head_init(&session->ctrl_transmit);
- skb_queue_head_init(&session->intr_transmit);
-
- session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
- session->idle_to = req->idle_to;
-
- __hidp_link_session(session);
+ int ret;
if (req->rd_size > 0) {
- err = hidp_setup_hid(session, req);
- if (err && err != -ENODEV)
- goto purge;
+ ret = hidp_setup_hid(session, req);
+ if (ret && ret != -ENODEV)
+ return ret;
}
if (!session->hid) {
- err = hidp_setup_input(session, req);
- if (err < 0)
- goto purge;
+ ret = hidp_setup_input(session, req);
+ if (ret < 0)
+ return ret;
}
- hidp_set_timer(session);
-
- err = kernel_thread(hidp_session, session, CLONE_KERNEL);
- if (err < 0)
- goto unlink;
-
- if (session->input) {
- hidp_send_ctrl_message(session,
- HIDP_TRANS_SET_PROTOCOL | HIDP_PROTO_BOOT, NULL, 0);
- session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE);
-
- session->leds = 0xff;
- hidp_input_event(session->input, EV_LED, 0, 0);
- }
-
- up_write(&hidp_session_sem);
return 0;
+}
-unlink:
- hidp_del_timer(session);
-
- if (session->input) {
- input_unregister_device(session->input);
- session->input = NULL;
- }
-
- if (session->hid) {
- hid_destroy_device(session->hid);
- session->hid = NULL;
- }
+/* destroy session devices */
+static void hidp_session_dev_destroy(struct hidp_session *session)
+{
+ if (session->hid)
+ put_device(&session->hid->dev);
+ else if (session->input)
+ input_put_device(session->input);
kfree(session->rd_data);
session->rd_data = NULL;
-
-purge:
- __hidp_unlink_session(session);
-
- skb_queue_purge(&session->ctrl_transmit);
- skb_queue_purge(&session->intr_transmit);
-
-failed:
- up_write(&hidp_session_sem);
-
- input_free_device(session->input);
- kfree(session);
- return err;
}
-int hidp_del_connection(struct hidp_conndel_req *req)
+/* add HID/input devices to their underlying bus systems */
+static int hidp_session_dev_add(struct hidp_session *session)
+{
+ int ret;
+
+ /* Both HID and input systems drop a ref-count when unregistering the
+ * device but they don't take a ref-count when registering them. Work
+ * around this by explicitly taking a refcount during registration
+ * which is dropped automatically by unregistering the devices. */
+
+ if (session->hid) {
+ ret = hid_add_device(session->hid);
+ if (ret)
+ return ret;
+ get_device(&session->hid->dev);
+ } else if (session->input) {
+ ret = input_register_device(session->input);
+ if (ret)
+ return ret;
+ input_get_device(session->input);
+ }
+
+ return 0;
+}
+
+/* remove HID/input devices from their bus systems */
+static void hidp_session_dev_del(struct hidp_session *session)
+{
+ if (session->hid)
+ hid_destroy_device(session->hid);
+ else if (session->input)
+ input_unregister_device(session->input);
+}
+
+/*
+ * Create new session object
+ * Allocate session object, initialize static fields, copy input data into the
+ * object and take a reference to all sub-objects.
+ * This returns 0 on success and puts a pointer to the new session object in
+ * \out. Otherwise, an error code is returned.
+ * The new session object has an initial ref-count of 1.
+ */
+static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
+ struct socket *ctrl_sock,
+ struct socket *intr_sock,
+ struct hidp_connadd_req *req,
+ struct l2cap_conn *conn)
{
struct hidp_session *session;
- int err = 0;
+ int ret;
+ struct bt_sock *ctrl, *intr;
- BT_DBG("");
+ ctrl = bt_sk(ctrl_sock->sk);
+ intr = bt_sk(intr_sock->sk);
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
+ /* object and runtime management */
+ kref_init(&session->ref);
+ atomic_set(&session->state, HIDP_SESSION_IDLING);
+ init_waitqueue_head(&session->state_queue);
+ session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
+
+ /* connection management */
+ bacpy(&session->bdaddr, bdaddr);
+ session->conn = conn;
+ session->user.probe = hidp_session_probe;
+ session->user.remove = hidp_session_remove;
+ session->ctrl_sock = ctrl_sock;
+ session->intr_sock = intr_sock;
+ skb_queue_head_init(&session->ctrl_transmit);
+ skb_queue_head_init(&session->intr_transmit);
+ session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl)->chan->omtu,
+ l2cap_pi(ctrl)->chan->imtu);
+ session->intr_mtu = min_t(uint, l2cap_pi(intr)->chan->omtu,
+ l2cap_pi(intr)->chan->imtu);
+ session->idle_to = req->idle_to;
+
+ /* device management */
+ setup_timer(&session->timer, hidp_idle_timeout,
+ (unsigned long)session);
+
+ /* session data */
+ mutex_init(&session->report_mutex);
+ init_waitqueue_head(&session->report_queue);
+
+ ret = hidp_session_dev_init(session, req);
+ if (ret)
+ goto err_free;
+
+ l2cap_conn_get(session->conn);
+ get_file(session->intr_sock->file);
+ get_file(session->ctrl_sock->file);
+ *out = session;
+ return 0;
+
+err_free:
+ kfree(session);
+ return ret;
+}
+
+/* increase ref-count of the given session by one */
+static void hidp_session_get(struct hidp_session *session)
+{
+ kref_get(&session->ref);
+}
+
+/* release callback */
+static void session_free(struct kref *ref)
+{
+ struct hidp_session *session = container_of(ref, struct hidp_session,
+ ref);
+
+ hidp_session_dev_destroy(session);
+ skb_queue_purge(&session->ctrl_transmit);
+ skb_queue_purge(&session->intr_transmit);
+ fput(session->intr_sock->file);
+ fput(session->ctrl_sock->file);
+ l2cap_conn_put(session->conn);
+ kfree(session);
+}
+
+/* decrease ref-count of the given session by one */
+static void hidp_session_put(struct hidp_session *session)
+{
+ kref_put(&session->ref, session_free);
+}
+
+/*
+ * Search the list of active sessions for a session with target address
+ * \bdaddr. You must hold at least a read-lock on \hidp_session_sem. As long as
+ * you do not release this lock, the session objects cannot vanish and you can
+ * safely take a reference to the session yourself.
+ */
+static struct hidp_session *__hidp_session_find(const bdaddr_t *bdaddr)
+{
+ struct hidp_session *session;
+
+ list_for_each_entry(session, &hidp_session_list, list) {
+ if (!bacmp(bdaddr, &session->bdaddr))
+ return session;
+ }
+
+ return NULL;
+}
+
+/*
+ * Same as __hidp_session_find() but no locks must be held. This also takes a
+ * reference of the returned session (if non-NULL) so you must drop this
+ * reference if you no longer use the object.
+ */
+static struct hidp_session *hidp_session_find(const bdaddr_t *bdaddr)
+{
+ struct hidp_session *session;
down_read(&hidp_session_sem);
- session = __hidp_get_session(&req->bdaddr);
- if (session) {
- if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) {
- hidp_send_ctrl_message(session,
- HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0);
- } else {
- /* Flush the transmit queues */
- skb_queue_purge(&session->ctrl_transmit);
- skb_queue_purge(&session->intr_transmit);
-
- /* Wakeup user-space polling for socket errors */
- session->intr_sock->sk->sk_err = EUNATCH;
- session->ctrl_sock->sk->sk_err = EUNATCH;
-
- /* Kill session thread */
- atomic_inc(&session->terminate);
- hidp_schedule(session);
- }
- } else
- err = -ENOENT;
+ session = __hidp_session_find(bdaddr);
+ if (session)
+ hidp_session_get(session);
up_read(&hidp_session_sem);
- return err;
+
+ return session;
+}
+
+/*
+ * Start session synchronously
+ * This starts a session thread and waits until initialization
+ * is done or returns an error if it couldn't be started.
+ * If this returns 0 the session thread is up and running. You must call
+ * hipd_session_stop_sync() before deleting any runtime resources.
+ */
+static int hidp_session_start_sync(struct hidp_session *session)
+{
+ unsigned int vendor, product;
+
+ if (session->hid) {
+ vendor = session->hid->vendor;
+ product = session->hid->product;
+ } else if (session->input) {
+ vendor = session->input->id.vendor;
+ product = session->input->id.product;
+ } else {
+ vendor = 0x0000;
+ product = 0x0000;
+ }
+
+ session->task = kthread_run(hidp_session_thread, session,
+ "khidpd_%04x%04x", vendor, product);
+ if (IS_ERR(session->task))
+ return PTR_ERR(session->task);
+
+ while (atomic_read(&session->state) <= HIDP_SESSION_IDLING)
+ wait_event(session->state_queue,
+ atomic_read(&session->state) > HIDP_SESSION_IDLING);
+
+ return 0;
+}
+
+/*
+ * Terminate session thread
+ * Wake up session thread and notify it to stop. This is asynchronous and
+ * returns immediately. Call this whenever a runtime error occurs and you want
+ * the session to stop.
+ * Note: wake_up_process() performs any necessary memory-barriers for us.
+ */
+static void hidp_session_terminate(struct hidp_session *session)
+{
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
+}
+
+/*
+ * Probe HIDP session
+ * This is called from the l2cap_conn core when our l2cap_user object is bound
+ * to the hci-connection. We get the session via the \user object and can now
+ * start the session thread, register the HID/input devices and link it into
+ * the global session list.
+ * The global session-list owns its own reference to the session object so you
+ * can drop your own reference after registering the l2cap_user object.
+ */
+static int hidp_session_probe(struct l2cap_conn *conn,
+ struct l2cap_user *user)
+{
+ struct hidp_session *session = container_of(user,
+ struct hidp_session,
+ user);
+ struct hidp_session *s;
+ int ret;
+
+ down_write(&hidp_session_sem);
+
+ /* check that no other session for this device exists */
+ s = __hidp_session_find(&session->bdaddr);
+ if (s) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ ret = hidp_session_start_sync(session);
+ if (ret)
+ goto out_unlock;
+
+ ret = hidp_session_dev_add(session);
+ if (ret)
+ goto out_stop;
+
+ hidp_session_get(session);
+ list_add(&session->list, &hidp_session_list);
+ ret = 0;
+ goto out_unlock;
+
+out_stop:
+ hidp_session_terminate(session);
+out_unlock:
+ up_write(&hidp_session_sem);
+ return ret;
+}
+
+/*
+ * Remove HIDP session
+ * Called from the l2cap_conn core when either we explicitly unregistered
+ * the l2cap_user object or if the underlying connection is shut down.
+ * We signal the hidp-session thread to shut down, unregister the HID/input
+ * devices and unlink the session from the global list.
+ * This drops the reference to the session that is owned by the global
+ * session-list.
+ * Note: We _must_ not synchronosly wait for the session-thread to shut down.
+ * This is, because the session-thread might be waiting for an HCI lock that is
+ * held while we are called. Therefore, we only unregister the devices and
+ * notify the session-thread to terminate. The thread itself owns a reference
+ * to the session object so it can safely shut down.
+ */
+static void hidp_session_remove(struct l2cap_conn *conn,
+ struct l2cap_user *user)
+{
+ struct hidp_session *session = container_of(user,
+ struct hidp_session,
+ user);
+
+ down_write(&hidp_session_sem);
+
+ hidp_session_terminate(session);
+ hidp_session_dev_del(session);
+ list_del(&session->list);
+
+ up_write(&hidp_session_sem);
+
+ hidp_session_put(session);
+}
+
+/*
+ * Session Worker
+ * This performs the actual main-loop of the HIDP worker. We first check
+ * whether the underlying connection is still alive, then parse all pending
+ * messages and finally send all outstanding messages.
+ */
+static void hidp_session_run(struct hidp_session *session)
+{
+ struct sock *ctrl_sk = session->ctrl_sock->sk;
+ struct sock *intr_sk = session->intr_sock->sk;
+ struct sk_buff *skb;
+
+ for (;;) {
+ /*
+ * This thread can be woken up two ways:
+ * - You call hidp_session_terminate() which sets the
+ * session->terminate flag and wakes this thread up.
+ * - Via modifying the socket state of ctrl/intr_sock. This
+ * thread is woken up by ->sk_state_changed().
+ *
+ * Note: set_current_state() performs any necessary
+ * memory-barriers for us.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (atomic_read(&session->terminate))
+ break;
+
+ if (ctrl_sk->sk_state != BT_CONNECTED ||
+ intr_sk->sk_state != BT_CONNECTED)
+ break;
+
+ /* parse incoming intr-skbs */
+ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+ skb_orphan(skb);
+ if (!skb_linearize(skb))
+ hidp_recv_intr_frame(session, skb);
+ else
+ kfree_skb(skb);
+ }
+
+ /* send pending intr-skbs */
+ hidp_process_transmit(session, &session->intr_transmit,
+ session->intr_sock);
+
+ /* parse incoming ctrl-skbs */
+ while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+ skb_orphan(skb);
+ if (!skb_linearize(skb))
+ hidp_recv_ctrl_frame(session, skb);
+ else
+ kfree_skb(skb);
+ }
+
+ /* send pending ctrl-skbs */
+ hidp_process_transmit(session, &session->ctrl_transmit,
+ session->ctrl_sock);
+
+ schedule();
+ }
+
+ atomic_inc(&session->terminate);
+ set_current_state(TASK_RUNNING);
+}
+
+/*
+ * HIDP session thread
+ * This thread runs the I/O for a single HIDP session. Startup is synchronous
+ * which allows us to take references to ourself here instead of doing that in
+ * the caller.
+ * When we are ready to run we notify the caller and call hidp_session_run().
+ */
+static int hidp_session_thread(void *arg)
+{
+ struct hidp_session *session = arg;
+ wait_queue_t ctrl_wait, intr_wait;
+
+ BT_DBG("session %p", session);
+
+ /* initialize runtime environment */
+ hidp_session_get(session);
+ __module_get(THIS_MODULE);
+ set_user_nice(current, -15);
+ hidp_set_timer(session);
+
+ init_waitqueue_entry(&ctrl_wait, current);
+ init_waitqueue_entry(&intr_wait, current);
+ add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
+ add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+ /* This memory barrier is paired with wq_has_sleeper(). See
+ * sock_poll_wait() for more information why this is needed. */
+ smp_mb();
+
+ /* notify synchronous startup that we're ready */
+ atomic_inc(&session->state);
+ wake_up(&session->state_queue);
+
+ /* run session */
+ hidp_session_run(session);
+
+ /* cleanup runtime environment */
+ remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+ remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
+ wake_up_interruptible(&session->report_queue);
+ hidp_del_timer(session);
+
+ /*
+ * If we stopped ourself due to any internal signal, we should try to
+ * unregister our own session here to avoid having it linger until the
+ * parent l2cap_conn dies or user-space cleans it up.
+ * This does not deadlock as we don't do any synchronous shutdown.
+ * Instead, this call has the same semantics as if user-space tried to
+ * delete the session.
+ */
+ l2cap_unregister_user(session->conn, &session->user);
+ hidp_session_put(session);
+
+ module_put_and_exit(0);
+ return 0;
+}
+
+static int hidp_verify_sockets(struct socket *ctrl_sock,
+ struct socket *intr_sock)
+{
+ struct bt_sock *ctrl, *intr;
+ struct hidp_session *session;
+
+ if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
+ return -EINVAL;
+
+ ctrl = bt_sk(ctrl_sock->sk);
+ intr = bt_sk(intr_sock->sk);
+
+ if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst))
+ return -ENOTUNIQ;
+ if (ctrl->sk.sk_state != BT_CONNECTED ||
+ intr->sk.sk_state != BT_CONNECTED)
+ return -EBADFD;
+
+ /* early session check, we check again during session registration */
+ session = hidp_session_find(&ctrl->dst);
+ if (session) {
+ hidp_session_put(session);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int hidp_connection_add(struct hidp_connadd_req *req,
+ struct socket *ctrl_sock,
+ struct socket *intr_sock)
+{
+ struct hidp_session *session;
+ struct l2cap_conn *conn;
+ struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+ int ret;
+
+ ret = hidp_verify_sockets(ctrl_sock, intr_sock);
+ if (ret)
+ return ret;
+
+ conn = NULL;
+ l2cap_chan_lock(chan);
+ if (chan->conn) {
+ l2cap_conn_get(chan->conn);
+ conn = chan->conn;
+ }
+ l2cap_chan_unlock(chan);
+
+ if (!conn)
+ return -EBADFD;
+
+ ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock,
+ intr_sock, req, conn);
+ if (ret)
+ goto out_conn;
+
+ ret = l2cap_register_user(conn, &session->user);
+ if (ret)
+ goto out_session;
+
+ ret = 0;
+
+out_session:
+ hidp_session_put(session);
+out_conn:
+ l2cap_conn_put(conn);
+ return ret;
+}
+
+int hidp_connection_del(struct hidp_conndel_req *req)
+{
+ struct hidp_session *session;
+
+ session = hidp_session_find(&req->bdaddr);
+ if (!session)
+ return -ENOENT;
+
+ if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
+ hidp_send_ctrl_message(session,
+ HIDP_TRANS_HID_CONTROL |
+ HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
+ NULL, 0);
+ else
+ l2cap_unregister_user(session->conn, &session->user);
+
+ hidp_session_put(session);
+
+ return 0;
}
int hidp_get_connlist(struct hidp_connlist_req *req)
{
- struct list_head *p;
+ struct hidp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&hidp_session_sem);
- list_for_each(p, &hidp_session_list) {
- struct hidp_session *session;
+ list_for_each_entry(session, &hidp_session_list, list) {
struct hidp_conninfo ci;
- session = list_entry(p, struct hidp_session, list);
-
- __hidp_copy_session(session, &ci);
+ hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
err = -EFAULT;
@@ -1024,18 +1360,14 @@
int hidp_get_conninfo(struct hidp_conninfo *ci)
{
struct hidp_session *session;
- int err = 0;
- down_read(&hidp_session_sem);
+ session = hidp_session_find(&ci->bdaddr);
+ if (session) {
+ hidp_copy_session(session, ci);
+ hidp_session_put(session);
+ }
- session = __hidp_get_session(&ci->bdaddr);
- if (session)
- __hidp_copy_session(session, ci);
- else
- err = -ENOENT;
-
- up_read(&hidp_session_sem);
- return err;
+ return session ? 0 : -ENOENT;
}
static int __init hidp_init(void)
@@ -1054,6 +1386,7 @@
module_exit(hidp_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 28bb9ce..6162ce8 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,7 +24,9 @@
#define __HIDP_H
#include <linux/types.h>
+#include <linux/kref.h>
#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/l2cap.h>
/* HIDP header masks */
#define HIDP_HEADER_TRANS_MASK 0xf0
@@ -80,6 +82,8 @@
#define HIDP_VIRTUAL_CABLE_UNPLUG 0
#define HIDP_BOOT_PROTOCOL_MODE 1
#define HIDP_BLUETOOTH_VENDOR_ID 9
+#define HIDP_WAITING_FOR_RETURN 10
+#define HIDP_WAITING_FOR_SEND_ACK 11
struct hidp_connadd_req {
int ctrl_sock; /* Connected control socket */
@@ -117,57 +121,64 @@
struct hidp_conninfo __user *ci;
};
-int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
-int hidp_del_connection(struct hidp_conndel_req *req);
+int hidp_connection_add(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
+int hidp_connection_del(struct hidp_conndel_req *req);
int hidp_get_connlist(struct hidp_connlist_req *req);
int hidp_get_conninfo(struct hidp_conninfo *ci);
+enum hidp_session_state {
+ HIDP_SESSION_IDLING,
+ HIDP_SESSION_RUNNING,
+};
+
/* HIDP session defines */
struct hidp_session {
struct list_head list;
+ struct kref ref;
- struct hci_conn *conn;
+ /* runtime management */
+ atomic_t state;
+ wait_queue_head_t state_queue;
+ atomic_t terminate;
+ struct task_struct *task;
+ unsigned long flags;
+ /* connection management */
+ bdaddr_t bdaddr;
+ struct l2cap_conn *conn;
+ struct l2cap_user user;
struct socket *ctrl_sock;
struct socket *intr_sock;
-
- bdaddr_t bdaddr;
-
- unsigned long state;
- unsigned long flags;
- unsigned long idle_to;
-
- uint ctrl_mtu;
- uint intr_mtu;
-
- atomic_t terminate;
-
- unsigned char keys[8];
- unsigned char leds;
-
- struct input_dev *input;
-
- struct hid_device *hid;
-
- struct timer_list timer;
-
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
+ uint ctrl_mtu;
+ uint intr_mtu;
+ unsigned long idle_to;
+
+ /* device management */
+ struct input_dev *input;
+ struct hid_device *hid;
+ struct timer_list timer;
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
+
+ /* session data */
+ unsigned char keys[8];
+ unsigned char leds;
+
+ /* Used in hidp_get_raw_report() */
+ int waiting_report_type; /* HIDP_DATA_RTYPE_* */
+ int waiting_report_number; /* -1 for not numbered */
+ struct mutex report_mutex;
+ struct sk_buff *report_return;
+ wait_queue_head_t report_queue;
+
+ /* Used in hidp_output_raw_report() */
+ int output_report_success; /* boolean */
};
-static inline void hidp_schedule(struct hidp_session *session)
-{
- struct sock *ctrl_sk = session->ctrl_sock->sk;
- struct sock *intr_sk = session->intr_sock->sk;
-
- wake_up_interruptible(sk_sleep(ctrl_sk));
- wake_up_interruptible(sk_sleep(intr_sk));
-}
-
/* HIDP init defines */
extern int __init hidp_init_sockets(void);
extern void __exit hidp_cleanup_sockets(void);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 178ac7f..cb3fdde 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,25 +20,15 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <net/sock.h>
#include "hidp.h"
+static struct bt_sock_list hidp_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
+};
+
static int hidp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -48,6 +38,8 @@
if (!sk)
return 0;
+ bt_sock_unlink(&hidp_sk_list, sk);
+
sock_orphan(sk);
sock_put(sk);
@@ -70,7 +62,7 @@
switch (cmd) {
case HIDPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -85,32 +77,23 @@
return err;
}
- if (csock->sk->sk_state != BT_CONNECTED ||
- isock->sk->sk_state != BT_CONNECTED) {
- sockfd_put(csock);
- sockfd_put(isock);
- return -EBADFD;
- }
+ err = hidp_connection_add(&ca, csock, isock);
+ if (!err && copy_to_user(argp, &ca, sizeof(ca)))
+ err = -EFAULT;
- err = hidp_add_connection(&ca, csock, isock);
- if (!err) {
- if (copy_to_user(argp, &ca, sizeof(ca)))
- err = -EFAULT;
- } else {
- sockfd_put(csock);
- sockfd_put(isock);
- }
+ sockfd_put(csock);
+ sockfd_put(isock);
return err;
case HIDPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
- return hidp_del_connection(&cd);
+ return hidp_connection_del(&cd);
case HIDPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
@@ -160,10 +143,10 @@
{
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -174,7 +157,7 @@
err = hidp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
@@ -267,6 +250,8 @@
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
+ bt_sock_link(&hidp_sk_list, sk);
+
return 0;
}
@@ -285,21 +270,30 @@
return err;
err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("Can't register HIDP socket");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "hidp", &hidp_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create HIDP proc file");
+ bt_sock_unregister(BTPROTO_HIDP);
+ goto error;
+ }
+
+ BT_INFO("HIDP socket layer initialized");
return 0;
error:
- BT_ERR("Can't register HIDP socket");
proto_unregister(&hidp_proto);
return err;
}
void __exit hidp_cleanup_sockets(void)
{
- if (bt_sock_unregister(BTPROTO_HIDP) < 0)
- BT_ERR("Can't unregister HIDP socket");
-
+ bt_procfs_cleanup(&init_net, "hidp");
+ bt_sock_unregister(BTPROTO_HIDP);
proto_unregister(&hidp_proto);
}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index be8973b..79a680a 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1,8 +1,10 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010-2013 The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -28,155 +30,240 @@
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
#include <linux/crc16.h>
-#include <linux/math64.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/a2mp.h>
#include <net/bluetooth/amp.h>
bool disable_ertm;
-bool enable_hs;
-bool enable_reconfig;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fc_mask = L2CAP_FC_L2CAP;
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
-struct workqueue_struct *_l2cap_wq;
-
-struct bt_sock_list l2cap_sk_list = {
- .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
-};
-
-static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
- struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
-static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
- struct l2cap_pinfo *pi, u16 icid, u16 result);
-static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid, u16 result);
-
-static void l2cap_amp_move_setup(struct sock *sk);
-static void l2cap_amp_move_success(struct sock *sk);
-static void l2cap_amp_move_revert(struct sock *sk);
-
-static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
+static LIST_HEAD(chan_list);
+static DEFINE_RWLOCK(chan_list_lock);
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data);
-static int l2cap_answer_move_poll(struct sock *sk);
-static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
-static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
-static void l2cap_chan_ready(struct sock *sk);
-static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l);
-static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to);
-static void l2cap_queue_acl_data(struct work_struct *worker);
-static void l2cap_queue_smp_data(struct work_struct *worker);
-static struct att_channel_parameters{
- struct sk_buff *skb;
- struct l2cap_conn *conn;
- __le16 cid;
- int dir;
-} att_chn_params;
-static struct smp_channel_params{
- struct sk_buff *skb;
- struct l2cap_conn *conn;
- __le16 cid;
-} smp_chn_params;
+ u8 code, u8 ident, u16 dlen, void *data);
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event);
/* ---- L2CAP channels ---- */
-static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
+
+static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ u16 cid)
{
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->dcid == cid)
- break;
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->dcid == cid)
+ return c;
}
- return s;
+ return NULL;
}
-/* Find channel with given DCID.
- * Returns locked socket */
-static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
- u16 cid)
+static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ u16 cid)
{
- struct sock *s;
- read_lock(&l->lock);
- s = __l2cap_get_chan_by_dcid(l, cid);
- if (s)
- bh_lock_sock(s);
- read_unlock(&l->lock);
- return s;
-}
+ struct l2cap_chan *c;
-static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
-{
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->scid == cid)
- break;
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->scid == cid)
+ return c;
}
- return s;
+ return NULL;
}
/* Find channel with given SCID.
- * Returns locked socket */
-static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
+ * Returns locked channel. */
+static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ u16 cid)
{
- struct sock *s;
- read_lock(&l->lock);
- s = __l2cap_get_chan_by_scid(l, cid);
- if (s)
- bh_lock_sock(s);
- read_unlock(&l->lock);
- return s;
+ struct l2cap_chan *c;
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_scid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
}
-static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
+/* Find channel with given DCID.
+ * Returns locked channel.
+ */
+static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ u16 cid)
{
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->ident == ident)
- break;
+ struct l2cap_chan *c;
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_dcid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+}
+
+static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+ u8 ident)
+{
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->ident == ident)
+ return c;
}
- return s;
+ return NULL;
}
-static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
+static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+ u8 ident)
{
- struct sock *s;
- read_lock(&l->lock);
- s = __l2cap_get_chan_by_ident(l, ident);
- if (s)
- bh_lock_sock(s);
- read_unlock(&l->lock);
- return s;
+ struct l2cap_chan *c;
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_ident(conn, ident);
+ if (c)
+ l2cap_chan_lock(c);
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
}
-static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
- u16 seq)
+static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
+{
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &chan_list, global_l) {
+ if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
+ return c;
+ }
+ return NULL;
+}
+
+int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
+{
+ int err;
+
+ write_lock(&chan_list_lock);
+
+ if (psm && __l2cap_global_chan_by_addr(psm, src)) {
+ err = -EADDRINUSE;
+ goto done;
+ }
+
+ if (psm) {
+ chan->psm = psm;
+ chan->sport = psm;
+ err = 0;
+ } else {
+ u16 p;
+
+ err = -EINVAL;
+ for (p = 0x1001; p < 0x1100; p += 2)
+ if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
+ chan->psm = cpu_to_le16(p);
+ chan->sport = cpu_to_le16(p);
+ err = 0;
+ break;
+ }
+ }
+
+done:
+ write_unlock(&chan_list_lock);
+ return err;
+}
+
+int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
+{
+ write_lock(&chan_list_lock);
+
+ chan->scid = scid;
+
+ write_unlock(&chan_list_lock);
+
+ return 0;
+}
+
+static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
+{
+ u16 cid = L2CAP_CID_DYN_START;
+
+ for (; cid < L2CAP_CID_DYN_END; cid++) {
+ if (!__l2cap_get_chan_by_scid(conn, cid))
+ return cid;
+ }
+
+ return 0;
+}
+
+static void __l2cap_state_change(struct l2cap_chan *chan, int state)
+{
+ BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
+ state_to_string(state));
+
+ chan->state = state;
+ chan->ops->state_change(chan, state);
+}
+
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
+{
+ struct sock *sk = chan->sk;
+
+ lock_sock(sk);
+ __l2cap_state_change(chan, state);
+ release_sock(sk);
+}
+
+static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+{
+ struct sock *sk = chan->sk;
+
+ sk->sk_err = err;
+}
+
+static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+{
+ struct sock *sk = chan->sk;
+
+ lock_sock(sk);
+ __l2cap_chan_set_err(chan, err);
+ release_sock(sk);
+}
+
+static void __set_retrans_timer(struct l2cap_chan *chan)
+{
+ if (!delayed_work_pending(&chan->monitor_timer) &&
+ chan->retrans_timeout) {
+ l2cap_set_timer(chan, &chan->retrans_timer,
+ msecs_to_jiffies(chan->retrans_timeout));
+ }
+}
+
+static void __set_monitor_timer(struct l2cap_chan *chan)
+{
+ __clear_retrans_timer(chan);
+ if (chan->monitor_timeout) {
+ l2cap_set_timer(chan, &chan->monitor_timer,
+ msecs_to_jiffies(chan->monitor_timeout));
+ }
+}
+
+static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
+ u16 seq)
{
struct sk_buff *skb;
@@ -188,30 +275,38 @@
return NULL;
}
+/* ---- L2CAP sequence number lists ---- */
+
+/* For ERTM, ordered lists of sequence numbers must be tracked for
+ * SREJ requests that are received and for frames that are to be
+ * retransmitted. These seq_list functions implement a singly-linked
+ * list in an array, where membership in the list can also be checked
+ * in constant time. Items can also be added to the tail of the list
+ * and removed from the head in constant time, without further memory
+ * allocs or frees.
+ */
+
static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
{
- u16 allocSize = 1;
- int err = 0;
- int i;
+ size_t alloc_size, i;
- /* Actual allocated size must be a power of 2 */
- while (allocSize && allocSize <= size)
- allocSize <<= 1;
- if (!allocSize)
- return -ENOMEM;
+ /* Allocated size is a power of 2 to map sequence numbers
+ * (which may be up to 14 bits) in to a smaller array that is
+ * sized for the negotiated ERTM transmit windows.
+ */
+ alloc_size = roundup_pow_of_two(size);
- seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
+ seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
if (!seq_list->list)
return -ENOMEM;
- seq_list->size = allocSize;
- seq_list->mask = allocSize - 1;
+ seq_list->mask = alloc_size - 1;
seq_list->head = L2CAP_SEQ_LIST_CLEAR;
seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
- for (i = 0; i < allocSize; i++)
+ for (i = 0; i < alloc_size; i++)
seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
- return err;
+ return 0;
}
static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
@@ -220,8 +315,9 @@
}
static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
- u16 seq)
+ u16 seq)
{
+ /* Constant-time check for list membership */
return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
}
@@ -229,15 +325,11 @@
{
u16 mask = seq_list->mask;
- BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
-
if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
/* In case someone tries to pop the head of an empty list */
- BT_DBG("List empty");
return L2CAP_SEQ_LIST_CLEAR;
} else if (seq_list->head == seq) {
- /* Head can be removed quickly */
- BT_DBG("Remove head");
+ /* Head can be removed in constant time */
seq_list->head = seq_list->list[seq & mask];
seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
@@ -246,17 +338,15 @@
seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
}
} else {
- /* Non-head item must be found first */
+ /* Walk the list to find the sequence number */
u16 prev = seq_list->head;
- BT_DBG("Find and remove");
while (seq_list->list[prev & mask] != seq) {
prev = seq_list->list[prev & mask];
- if (prev == L2CAP_SEQ_LIST_TAIL) {
- BT_DBG("seq %d not in list", (int) seq);
+ if (prev == L2CAP_SEQ_LIST_TAIL)
return L2CAP_SEQ_LIST_CLEAR;
- }
}
+ /* Unlink the number from the list and clear it */
seq_list->list[prev & mask] = seq_list->list[seq & mask];
seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
if (seq_list->tail == seq)
@@ -267,355 +357,323 @@
static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
{
+ /* Remove the head in constant time */
return l2cap_seq_list_remove(seq_list, seq_list->head);
}
static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
{
- if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
- u16 i;
- for (i = 0; i < seq_list->size; i++)
- seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+ u16 i;
- seq_list->head = L2CAP_SEQ_LIST_CLEAR;
- seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
- }
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ for (i = 0; i <= seq_list->mask; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
}
static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
{
u16 mask = seq_list->mask;
- BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
+ /* All appends happen in constant time */
- if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
- if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
- seq_list->head = seq;
- else
- seq_list->list[seq_list->tail & mask] = seq;
+ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
+ return;
- seq_list->tail = seq;
- seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
- }
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
}
-static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
+static void l2cap_chan_timeout(struct work_struct *work)
{
- u16 packed;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ chan_timer.work);
+ struct l2cap_conn *conn = chan->conn;
+ int reason;
- packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
- L2CAP_CTRL_REQSEQ;
- packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
- L2CAP_CTRL_FINAL;
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
- if (control->frame_type == 's') {
- packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
- L2CAP_CTRL_POLL;
- packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
- L2CAP_CTRL_SUPERVISE;
- packed |= L2CAP_CTRL_FRAME_TYPE;
- } else {
- packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
- L2CAP_CTRL_SAR;
- packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
- L2CAP_CTRL_TXSEQ;
- }
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(chan);
- return packed;
+ if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
+ reason = ECONNREFUSED;
+ else if (chan->state == BT_CONNECT &&
+ chan->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+
+ l2cap_chan_close(chan, reason);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ mutex_unlock(&conn->chan_lock);
+
+ l2cap_chan_put(chan);
}
-static void __get_enhanced_control(u16 enhanced,
- struct bt_l2cap_control *control)
+struct l2cap_chan *l2cap_chan_create(void)
{
- control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
- L2CAP_CTRL_REQSEQ_SHIFT;
- control->final = (enhanced & L2CAP_CTRL_FINAL) >>
- L2CAP_CTRL_FINAL_SHIFT;
+ struct l2cap_chan *chan;
- if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
- control->frame_type = 's';
- control->poll = (enhanced & L2CAP_CTRL_POLL) >>
- L2CAP_CTRL_POLL_SHIFT;
- control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
- L2CAP_CTRL_SUPERVISE_SHIFT;
+ chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
- control->sar = 0;
- control->txseq = 0;
- } else {
- control->frame_type = 'i';
- control->sar = (enhanced & L2CAP_CTRL_SAR) >>
- L2CAP_CTRL_SAR_SHIFT;
- control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
- L2CAP_CTRL_TXSEQ_SHIFT;
+ mutex_init(&chan->lock);
- control->poll = 0;
- control->super = 0;
- }
+ write_lock(&chan_list_lock);
+ list_add(&chan->global_l, &chan_list);
+ write_unlock(&chan_list_lock);
+
+ INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
+
+ chan->state = BT_OPEN;
+
+ kref_init(&chan->kref);
+
+ /* This flag is cleared in l2cap_chan_ready() */
+ set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
+
+ BT_DBG("chan %p", chan);
+
+ return chan;
}
-static u32 __pack_extended_control(struct bt_l2cap_control *control)
+static void l2cap_chan_destroy(struct kref *kref)
{
- u32 packed;
+ struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
- packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
- L2CAP_EXT_CTRL_REQSEQ;
- packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
- L2CAP_EXT_CTRL_FINAL;
+ BT_DBG("chan %p", chan);
- if (control->frame_type == 's') {
- packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
- L2CAP_EXT_CTRL_POLL;
- packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
- L2CAP_EXT_CTRL_SUPERVISE;
- packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
- } else {
- packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
- L2CAP_EXT_CTRL_SAR;
- packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
- L2CAP_EXT_CTRL_TXSEQ;
- }
+ write_lock(&chan_list_lock);
+ list_del(&chan->global_l);
+ write_unlock(&chan_list_lock);
- return packed;
+ kfree(chan);
}
-static void __get_extended_control(u32 extended,
- struct bt_l2cap_control *control)
+void l2cap_chan_hold(struct l2cap_chan *c)
{
- control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
- L2CAP_EXT_CTRL_REQSEQ_SHIFT;
- control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
- L2CAP_EXT_CTRL_FINAL_SHIFT;
+ BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
- if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
- control->frame_type = 's';
- control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
- L2CAP_EXT_CTRL_POLL_SHIFT;
- control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
- L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
-
- control->sar = 0;
- control->txseq = 0;
- } else {
- control->frame_type = 'i';
- control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
- L2CAP_EXT_CTRL_SAR_SHIFT;
- control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
- L2CAP_EXT_CTRL_TXSEQ_SHIFT;
-
- control->poll = 0;
- control->super = 0;
- }
+ kref_get(&c->kref);
}
-static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
+void l2cap_chan_put(struct l2cap_chan *c)
{
- BT_DBG("pi %p", pi);
- __cancel_delayed_work(&pi->ack_work);
+ BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+ kref_put(&c->kref, l2cap_chan_destroy);
}
-static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
+void l2cap_chan_set_defaults(struct l2cap_chan *chan)
{
- BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
- if (!delayed_work_pending(&pi->ack_work)) {
- queue_delayed_work(_l2cap_wq, &pi->ack_work,
- msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
- }
+ chan->fcs = L2CAP_FCS_CRC16;
+ chan->max_tx = L2CAP_DEFAULT_MAX_TX;
+ chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->sec_level = BT_SECURITY_LOW;
+
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}
-static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
- BT_DBG("pi %p", pi);
- __cancel_delayed_work(&pi->retrans_work);
-}
-
-static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
-{
- BT_DBG("pi %p", pi);
- if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
- __cancel_delayed_work(&pi->retrans_work);
- queue_delayed_work(_l2cap_wq, &pi->retrans_work,
- msecs_to_jiffies(pi->retrans_timeout));
- }
-}
-
-static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
-{
- BT_DBG("pi %p", pi);
- __cancel_delayed_work(&pi->monitor_work);
-}
-
-static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
-{
- BT_DBG("pi %p", pi);
- l2cap_ertm_stop_retrans_timer(pi);
- __cancel_delayed_work(&pi->monitor_work);
- if (pi->monitor_timeout) {
- queue_delayed_work(_l2cap_wq, &pi->monitor_work,
- msecs_to_jiffies(pi->monitor_timeout));
- }
-}
-
-static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
-{
- u16 cid = L2CAP_CID_DYN_START;
-
- for (; cid < L2CAP_CID_DYN_END; cid++) {
- if (!__l2cap_get_chan_by_scid(l, cid))
- return cid;
- }
-
- return 0;
-}
-
-static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
-{
- sock_hold(sk);
-
- if (l->head)
- l2cap_pi(l->head)->prev_c = sk;
-
- l2cap_pi(sk)->next_c = l->head;
- l2cap_pi(sk)->prev_c = NULL;
- l->head = sk;
-}
-
-static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
-{
- struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
-
- write_lock_bh(&l->lock);
- if (sk == l->head)
- l->head = next;
-
- if (next)
- l2cap_pi(next)->prev_c = prev;
- if (prev)
- l2cap_pi(prev)->next_c = next;
- write_unlock_bh(&l->lock);
-
- __sock_put(sk);
-}
-
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
-
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
+ __le16_to_cpu(chan->psm), chan->dcid);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
- l2cap_pi(sk)->conn = conn;
+ chan->conn = conn;
- if (!l2cap_pi(sk)->fixed_channel &&
- (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_CONN_ORIENTED:
if (conn->hcon->type == LE_LINK) {
/* LE connection */
- if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
- l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
- if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
- l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
-
- l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
- l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ chan->scid = L2CAP_CID_LE_DATA;
+ chan->dcid = L2CAP_CID_LE_DATA;
} else {
/* Alloc CID for connection-oriented socket */
- l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
- l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+ chan->scid = l2cap_alloc_cid(conn);
+ chan->omtu = L2CAP_DEFAULT_MTU;
}
- } else if (sk->sk_type == SOCK_DGRAM) {
+ break;
+
+ case L2CAP_CHAN_CONN_LESS:
/* Connectionless socket */
- l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
- l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
- l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
- } else if (sk->sk_type == SOCK_RAW) {
+ chan->scid = L2CAP_CID_CONN_LESS;
+ chan->dcid = L2CAP_CID_CONN_LESS;
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ break;
+
+ case L2CAP_CHAN_CONN_FIX_A2MP:
+ chan->scid = L2CAP_CID_A2MP;
+ chan->dcid = L2CAP_CID_A2MP;
+ chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
+ chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
+ break;
+
+ default:
/* Raw socket can send/recv signalling messages only */
- l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
- l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
- l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+ chan->scid = L2CAP_CID_SIGNALING;
+ chan->dcid = L2CAP_CID_SIGNALING;
+ chan->omtu = L2CAP_DEFAULT_MTU;
}
- if (l2cap_get_smallest_flushto(l) > l2cap_pi(sk)->flush_to) {
- /*if flush timeout of the channel is lesser than existing */
- l2cap_set_acl_flushto(conn->hcon, l2cap_pi(sk)->flush_to);
- }
- /* Otherwise, do not set scid/dcid/omtu. These will be set up
- * by l2cap_fixed_channel_config()
- */
+ chan->local_id = L2CAP_BESTEFFORT_ID;
+ chan->local_stype = L2CAP_SERV_BESTEFFORT;
+ chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
+ chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
+ chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
+ chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
- __l2cap_chan_link(l, sk);
+ l2cap_chan_hold(chan);
+
+ list_add(&chan->list, &conn->chan_l);
}
-/* Delete channel.
- * Must be called on the locked socket. */
-void l2cap_chan_del(struct sock *sk, int err)
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sock *parent = bt_sk(sk)->parent;
+ mutex_lock(&conn->chan_lock);
+ __l2cap_chan_add(conn, chan);
+ mutex_unlock(&conn->chan_lock);
+}
- l2cap_sock_clear_timer(sk);
+void l2cap_chan_del(struct l2cap_chan *chan, int err)
+{
+ struct l2cap_conn *conn = chan->conn;
- BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
+ __clear_chan_timer(chan);
+
+ BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
if (conn) {
- struct l2cap_chan_list *l = &conn->chan_list;
- /* Unlink from channel list */
- l2cap_chan_unlink(l, sk);
- l2cap_pi(sk)->conn = NULL;
- if (!l2cap_pi(sk)->fixed_channel)
- hci_conn_put(conn->hcon);
+ struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ /* Delete from channel list */
+ list_del(&chan->list);
- read_lock(&l->lock);
- if (l2cap_pi(sk)->flush_to < l2cap_get_smallest_flushto(l))
- l2cap_set_acl_flushto(conn->hcon,
- l2cap_get_smallest_flushto(l));
- read_unlock(&l->lock);
+ l2cap_chan_put(chan);
+
+ chan->conn = NULL;
+
+ if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
+ hci_conn_drop(conn->hcon);
+
+ if (mgr && mgr->bredr_chan == chan)
+ mgr->bredr_chan = NULL;
}
- if (l2cap_pi(sk)->ampchan) {
- struct hci_chan *ampchan = l2cap_pi(sk)->ampchan;
- struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
- l2cap_pi(sk)->ampchan = NULL;
- l2cap_pi(sk)->ampcon = NULL;
- l2cap_pi(sk)->amp_id = 0;
- if (hci_chan_put(ampchan))
- ampcon->l2cap_data = NULL;
- else
- l2cap_deaggregate(ampchan, l2cap_pi(sk));
+ if (chan->hs_hchan) {
+ struct hci_chan *hs_hchan = chan->hs_hchan;
+
+ BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
+ amp_disconnect_logical_link(hs_hchan);
}
- sk->sk_state = BT_CLOSED;
- sock_set_flag(sk, SOCK_ZAPPED);
+ chan->ops->teardown(chan, err);
- if (err)
- sk->sk_err = err;
+ if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
+ return;
- if (parent) {
- bt_accept_unlink(sk);
- parent->sk_data_ready(parent, 0);
- } else
- sk->sk_state_change(sk);
+ switch(chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
- sk->sk_send_head = NULL;
- skb_queue_purge(TX_QUEUE(sk));
+ case L2CAP_MODE_ERTM:
+ __clear_retrans_timer(chan);
+ __clear_monitor_timer(chan);
+ __clear_ack_timer(chan);
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
- if (l2cap_pi(sk)->sdu)
- kfree_skb(l2cap_pi(sk)->sdu);
+ skb_queue_purge(&chan->srej_q);
- skb_queue_purge(SREJ_QUEUE(sk));
+ l2cap_seq_list_free(&chan->srej_list);
+ l2cap_seq_list_free(&chan->retrans_list);
- __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
- __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
- __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
+ /* fall through */
+
+ case L2CAP_MODE_STREAMING:
+ skb_queue_purge(&chan->tx_q);
+ break;
+ }
+
+ return;
+}
+
+void l2cap_chan_close(struct l2cap_chan *chan, int reason)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sock *sk = chan->sk;
+
+ BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
+ sk);
+
+ switch (chan->state) {
+ case BT_LISTEN:
+ chan->ops->teardown(chan, 0);
+ break;
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
+ conn->hcon->type == ACL_LINK) {
+ __set_chan_timer(chan, sk->sk_sndtimeo);
+ l2cap_send_disconn_req(chan, reason);
+ } else
+ l2cap_chan_del(chan, reason);
+ break;
+
+ case BT_CONNECT2:
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
+ conn->hcon->type == ACL_LINK) {
+ struct l2cap_conn_rsp rsp;
+ __u16 result;
+
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
+ result = L2CAP_CR_SEC_BLOCK;
+ else
+ result = L2CAP_CR_BAD_PSM;
+ l2cap_state_change(chan, BT_DISCONN);
+
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+ sizeof(rsp), &rsp);
+ }
+
+ l2cap_chan_del(chan, reason);
+ break;
+
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(chan, reason);
+ break;
+
+ default:
+ chan->ops->teardown(chan, 0);
+ break;
}
}
-static inline u8 l2cap_get_auth_type(struct sock *sk)
+static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
{
- if (sk->sk_type == SOCK_RAW) {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_VERY_HIGH:
+ if (chan->chan_type == L2CAP_CHAN_RAW) {
+ switch (chan->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_DEDICATED_BONDING_MITM;
case BT_SECURITY_MEDIUM:
@@ -623,18 +681,16 @@
default:
return HCI_AT_NO_BONDING;
}
- } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
+ if (chan->sec_level == BT_SECURITY_LOW)
+ chan->sec_level = BT_SECURITY_SDP;
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH ||
- l2cap_pi(sk)->sec_level == BT_SECURITY_VERY_HIGH)
+ if (chan->sec_level == BT_SECURITY_HIGH)
return HCI_AT_NO_BONDING_MITM;
else
return HCI_AT_NO_BONDING;
} else {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_VERY_HIGH:
+ switch (chan->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_GENERAL_BONDING_MITM;
case BT_SECURITY_MEDIUM:
@@ -646,18 +702,17 @@
}
/* Service level security */
-static inline int l2cap_check_security(struct sock *sk)
+int l2cap_chan_check_security(struct l2cap_chan *chan)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct l2cap_conn *conn = chan->conn;
__u8 auth_type;
- auth_type = l2cap_get_auth_type(sk);
+ auth_type = l2cap_get_auth_type(chan);
- return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
- auth_type);
+ return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
}
-u8 l2cap_get_ident(struct l2cap_conn *conn)
+static u8 l2cap_get_ident(struct l2cap_conn *conn)
{
u8 id;
@@ -667,46 +722,20 @@
* 200 - 254 are used by utilities like l2ping, etc.
*/
- spin_lock_bh(&conn->lock);
+ spin_lock(&conn->lock);
if (++conn->tx_ident > 128)
conn->tx_ident = 1;
id = conn->tx_ident;
- spin_unlock_bh(&conn->lock);
+ spin_unlock(&conn->lock);
return id;
}
-static void apply_fcs(struct sk_buff *skb)
-{
- size_t len;
- u16 partial_crc;
- struct sk_buff *iter;
- struct sk_buff *final_frag = skb;
-
- if (skb_has_frag_list(skb))
- len = skb_headlen(skb);
- else
- len = skb->len - L2CAP_FCS_SIZE;
-
- partial_crc = crc16(0, (u8 *) skb->data, len);
-
- skb_walk_frags(skb, iter) {
- len = iter->len;
- if (!iter->next)
- len -= L2CAP_FCS_SIZE;
-
- partial_crc = crc16(partial_crc, iter->data, len);
- final_frag = iter;
- }
-
- put_unaligned_le16(partial_crc,
- final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
-}
-
-void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
u8 flags;
@@ -724,73 +753,417 @@
else
flags = ACL_START;
- bt_cb(skb)->force_active = 1;
+ bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ skb->priority = HCI_PRIO_MAX;
- hci_send_acl(conn->hcon, NULL, skb, flags);
+ hci_send_acl(conn->hchan, skb, flags);
}
-static inline int __l2cap_no_conn_pending(struct sock *sk)
+static bool __chan_is_moving(struct l2cap_chan *chan)
{
- return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
+ return chan->move_state != L2CAP_MOVE_STABLE &&
+ chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
}
-static void l2cap_send_conn_req(struct sock *sk)
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
+ struct hci_conn *hcon = chan->conn->hcon;
+ u16 flags;
+
+ BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ skb->priority);
+
+ if (chan->hs_hcon && !__chan_is_moving(chan)) {
+ if (chan->hs_hchan)
+ hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
+ else
+ kfree_skb(skb);
+
+ return;
+ }
+
+ if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+ lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ hci_send_acl(chan->conn->hchan, skb, flags);
+}
+
+static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
+{
+ control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enh & L2CAP_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
+{
+ control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void __unpack_control(struct l2cap_chan *chan,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ __unpack_extended_control(get_unaligned_le32(skb->data),
+ &bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
+ } else {
+ __unpack_enhanced_control(get_unaligned_le16(skb->data),
+ &bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
+ }
+}
+
+static u32 __pack_extended_control(struct l2cap_ctrl *control)
+{
+ u32 packed;
+
+ packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
+{
+ u16 packed;
+
+ packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static inline void __pack_control(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+}
+
+static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_HDR_SIZE;
+ else
+ return L2CAP_ENH_HDR_SIZE;
+}
+
+static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
+ u32 control)
+{
+ struct sk_buff *skb;
+ struct l2cap_hdr *lh;
+ int hlen = __ertm_hdr_size(chan);
+
+ if (chan->fcs == L2CAP_FCS_CRC16)
+ hlen += L2CAP_FCS_SIZE;
+
+ skb = bt_skb_alloc(hlen, GFP_KERNEL);
+
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
+
+ skb->priority = HCI_PRIO_MAX;
+ return skb;
+}
+
+static void l2cap_send_sframe(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ struct sk_buff *skb;
+ u32 control_field;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (!control->sframe)
+ return;
+
+ if (__chan_is_moving(chan))
+ return;
+
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
+ !control->poll)
+ control->final = 1;
+
+ if (control->super == L2CAP_SUPER_RR)
+ clear_bit(CONN_RNR_SENT, &chan->conn_state);
+ else if (control->super == L2CAP_SUPER_RNR)
+ set_bit(CONN_RNR_SENT, &chan->conn_state);
+
+ if (control->super != L2CAP_SUPER_SREJ) {
+ chan->last_acked_seq = control->reqseq;
+ __clear_ack_timer(chan);
+ }
+
+ BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
+ control->final, control->poll, control->super);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ control_field = __pack_extended_control(control);
+ else
+ control_field = __pack_enhanced_control(control);
+
+ skb = l2cap_create_sframe_pdu(chan, control_field);
+ if (!IS_ERR(skb))
+ l2cap_do_send(chan, skb);
+}
+
+static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p, poll %d", chan, poll);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.poll = poll;
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ control.super = L2CAP_SUPER_RNR;
+ else
+ control.super = L2CAP_SUPER_RR;
+
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+}
+
+static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
+{
+ return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
+}
+
+static bool __amp_capable(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ if (enable_hs &&
+ hci_amp_capable() &&
+ chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
+ conn->fixed_chan_mask & L2CAP_FC_A2MP)
+ return true;
+ else
+ return false;
+}
+
+static bool l2cap_check_efs(struct l2cap_chan *chan)
+{
+ /* Check EFS parameters */
+ return true;
+}
+
+void l2cap_send_conn_req(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
struct l2cap_conn_req req;
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- req.psm = l2cap_pi(sk)->psm;
- l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
+ req.scid = cpu_to_le16(chan->scid);
+ req.psm = chan->psm;
- l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_REQ, sizeof(req), &req);
+ chan->ident = l2cap_get_ident(conn);
+
+ set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
-static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
+static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
{
struct l2cap_create_chan_req req;
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- req.psm = l2cap_pi(sk)->psm;
+ req.scid = cpu_to_le16(chan->scid);
+ req.psm = chan->psm;
req.amp_id = amp_id;
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
- l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
+ chan->ident = l2cap_get_ident(chan->conn);
- l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
- L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
+ sizeof(req), &req);
}
-static void l2cap_do_start(struct sock *sk)
+static void l2cap_move_setup(struct l2cap_chan *chan)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct sk_buff *skb;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
+
+ __clear_retrans_timer(chan);
+ __clear_monitor_timer(chan);
+ __clear_ack_timer(chan);
+
+ chan->retry_count = 0;
+ skb_queue_walk(&chan->tx_q, skb) {
+ if (bt_cb(skb)->control.retries)
+ bt_cb(skb)->control.retries = 1;
+ else
+ break;
+ }
+
+ chan->expected_tx_seq = chan->buffer_seq;
+
+ clear_bit(CONN_REJ_ACT, &chan->conn_state);
+ clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ l2cap_seq_list_clear(&chan->srej_list);
+ skb_queue_purge(&chan->srej_q);
+
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+ chan->rx_state = L2CAP_RX_STATE_MOVE;
+
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+}
+
+static void l2cap_move_done(struct l2cap_chan *chan)
+{
+ u8 move_role = chan->move_role;
+ BT_DBG("chan %p", chan);
+
+ chan->move_state = L2CAP_MOVE_STABLE;
+ chan->move_role = L2CAP_MOVE_ROLE_NONE;
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
+
+ switch (move_role) {
+ case L2CAP_MOVE_ROLE_INITIATOR:
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
+ chan->rx_state = L2CAP_RX_STATE_WAIT_F;
+ break;
+ case L2CAP_MOVE_ROLE_RESPONDER:
+ chan->rx_state = L2CAP_RX_STATE_WAIT_P;
+ break;
+ }
+}
+
+static void l2cap_chan_ready(struct l2cap_chan *chan)
+{
+ /* This clears all conf flags, including CONF_NOT_COMPLETE */
+ chan->conf_state = 0;
+ __clear_chan_timer(chan);
+
+ chan->state = BT_CONNECTED;
+
+ chan->ops->ready(chan);
+}
+
+static void l2cap_start_connection(struct l2cap_chan *chan)
+{
+ if (__amp_capable(chan)) {
+ BT_DBG("chan %p AMP capable: discover AMPs", chan);
+ a2mp_discover_amp(chan);
+ } else {
+ l2cap_send_conn_req(chan);
+ }
+}
+
+static void l2cap_do_start(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_chan_ready(chan);
+ return;
+ }
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
- if (l2cap_pi(sk)->amp_pref ==
- BT_AMP_POLICY_PREFER_AMP &&
- enable_hs &&
- conn->fc_mask & L2CAP_FC_A2MP)
- amp_create_physical(conn, sk);
- else
- l2cap_send_conn_req(sk);
+ if (l2cap_chan_check_security(chan) &&
+ __l2cap_no_conn_pending(chan)) {
+ l2cap_start_connection(chan);
}
} else {
struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- mod_timer(&conn->info_timer, jiffies +
- msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
- l2cap_send_cmd(conn, conn->info_ident,
- L2CAP_INFO_REQ, sizeof(req), &req);
+ l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+ sizeof(req), &req);
}
}
@@ -810,330 +1183,265 @@
}
}
-void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
+static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
{
+ struct sock *sk = chan->sk;
+ struct l2cap_conn *conn = chan->conn;
struct l2cap_disconn_req req;
if (!conn)
return;
- sk->sk_send_head = NULL;
- skb_queue_purge(TX_QUEUE(sk));
-
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
- skb_queue_purge(SREJ_QUEUE(sk));
-
- __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
- __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
- __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
+ if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
+ __clear_retrans_timer(chan);
+ __clear_monitor_timer(chan);
+ __clear_ack_timer(chan);
}
- req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_DISCONN_REQ, sizeof(req), &req);
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ l2cap_state_change(chan, BT_DISCONN);
+ return;
+ }
- sk->sk_state = BT_DISCONN;
- sk->sk_err = err;
+ req.dcid = cpu_to_le16(chan->dcid);
+ req.scid = cpu_to_le16(chan->scid);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
+ sizeof(req), &req);
+
+ lock_sock(sk);
+ __l2cap_state_change(chan, BT_DISCONN);
+ __l2cap_chan_set_err(chan, err);
+ release_sock(sk);
}
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sock_del_list del, *tmp1, *tmp2;
- struct sock *sk;
+ struct l2cap_chan *chan, *tmp;
BT_DBG("conn %p", conn);
- INIT_LIST_HEAD(&del.list);
+ mutex_lock(&conn->chan_lock);
- read_lock(&l->lock);
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ struct sock *sk = chan->sk;
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
+ l2cap_chan_lock(chan);
- if (sk->sk_type != SOCK_SEQPACKET &&
- sk->sk_type != SOCK_STREAM) {
- bh_unlock_sock(sk);
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ l2cap_chan_unlock(chan);
continue;
}
- if (sk->sk_state == BT_CONNECT) {
- if (!l2cap_check_security(sk) ||
- !__l2cap_no_conn_pending(sk)) {
- bh_unlock_sock(sk);
+ if (chan->state == BT_CONNECT) {
+ if (!l2cap_chan_check_security(chan) ||
+ !__l2cap_no_conn_pending(chan)) {
+ l2cap_chan_unlock(chan);
continue;
}
- if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
- conn->feat_mask)
- && l2cap_pi(sk)->conf_state &
- L2CAP_CONF_STATE2_DEVICE) {
- tmp1 = kzalloc(sizeof(struct sock_del_list),
- GFP_ATOMIC);
- tmp1->sk = sk;
- list_add_tail(&tmp1->list, &del.list);
- bh_unlock_sock(sk);
+ if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
+ && test_bit(CONF_STATE2_DEVICE,
+ &chan->conf_state)) {
+ l2cap_chan_close(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
continue;
}
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+ l2cap_start_connection(chan);
- if (l2cap_pi(sk)->amp_pref ==
- BT_AMP_POLICY_PREFER_AMP &&
- enable_hs &&
- conn->fc_mask & L2CAP_FC_A2MP)
- amp_create_physical(conn, sk);
- else
- l2cap_send_conn_req(sk);
-
- } else if (sk->sk_state == BT_CONNECT2) {
+ } else if (chan->state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
char buf[128];
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
- if (l2cap_check_security(sk)) {
- if (bt_sk(sk)->defer_setup) {
- struct sock *parent = bt_sk(sk)->parent;
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
- if (parent)
- parent->sk_data_ready(parent, 0);
+ if (l2cap_chan_check_security(chan)) {
+ lock_sock(sk);
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
+ chan->ops->defer(chan);
} else {
- sk->sk_state = BT_CONFIG;
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ __l2cap_state_change(chan, BT_CONFIG);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
}
+ release_sock(sk);
} else {
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
}
- if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
- l2cap_pi(sk)->amp_id) {
- amp_accept_physical(conn,
- l2cap_pi(sk)->amp_id, sk);
- bh_unlock_sock(sk);
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
+ rsp.result != L2CAP_CR_SUCCESS) {
+ l2cap_chan_unlock(chan);
continue;
}
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
- rsp.result != L2CAP_CR_SUCCESS) {
- bh_unlock_sock(sk);
- continue;
- }
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
}
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
}
- read_unlock(&l->lock);
-
- list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
- bh_lock_sock(tmp1->sk);
- __l2cap_sock_close(tmp1->sk, ECONNRESET);
- bh_unlock_sock(tmp1->sk);
- list_del(&tmp1->list);
- kfree(tmp1);
- }
+ mutex_unlock(&conn->chan_lock);
}
-/* Find socket with fixed cid with given source and destination bdaddrs.
- * Direction of the req/rsp must match.
- */
-struct sock *l2cap_find_sock_by_fixed_cid_and_dir(__le16 cid, bdaddr_t *src,
- bdaddr_t *dst, int incoming)
-{
- struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
-
- BT_DBG(" %d", incoming);
-
- read_lock(&l2cap_sk_list.lock);
-
- sk_for_each(sk, node, &l2cap_sk_list.head) {
-
- BT_DBG("sock %p scid %d check cid : %d ", sk, l2cap_pi(sk)->scid, cid);
-
- if (incoming && !l2cap_pi(sk)->incoming)
- continue;
-
- if (!incoming && l2cap_pi(sk)->incoming)
- continue;
-
- if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
- /* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
- break;
-
- /* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- sk1 = sk;
- }
- }
-
- read_unlock(&l2cap_sk_list.lock);
-
- return node ? sk : sk1;
-}
-
-/* Find socket with cid and source bdaddr.
+/* Find socket with cid and source/destination bdaddr.
* Returns closest match, locked.
*/
-static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
- struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
+ struct l2cap_chan *c, *c1 = NULL;
- read_lock(&l2cap_sk_list.lock);
+ read_lock(&chan_list_lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- if (state && sk->sk_state != state)
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
+
+ if (state && c->state != state)
continue;
- if (l2cap_pi(sk)->scid == cid) {
+ if (c->scid == cid) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
- break;
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
+ read_unlock(&chan_list_lock);
+ return c;
+ }
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- sk1 = sk;
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
+ c1 = c;
}
}
- read_unlock(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
- return node ? sk : sk1;
+ return c1;
}
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
- struct l2cap_chan_list *list = &conn->chan_list;
- struct sock *parent, *uninitialized_var(sk);
+ struct sock *parent, *sk;
+ struct l2cap_chan *chan, *pchan;
BT_DBG("");
/* Check if we have socket listening on cid */
- parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
- conn->src);
- if (!parent)
+ pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+ conn->src, conn->dst);
+ if (!pchan)
return;
- bh_lock_sock(parent);
+ parent = pchan->sk;
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
- goto clean;
- }
+ lock_sock(parent);
- sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
- if (!sk)
+ chan = pchan->ops->new_connection(pchan);
+ if (!chan)
goto clean;
- write_lock_bh(&list->lock);
+ sk = chan->sk;
hci_conn_hold(conn->hcon);
+ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
- l2cap_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
- l2cap_pi(sk)->incoming = 1;
- bt_accept_enqueue(parent, sk);
+ l2cap_chan_add(conn, chan);
- __l2cap_chan_add(conn, sk);
-
- sk->sk_state = BT_CONNECTED;
- parent->sk_data_ready(parent, 0);
-
- write_unlock_bh(&list->lock);
+ l2cap_chan_ready(chan);
clean:
- bh_unlock_sock(parent);
+ release_sock(parent);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sock *sk;
+ struct l2cap_chan *chan;
+ struct hci_conn *hcon = conn->hcon;
BT_DBG("conn %p", conn);
- if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+ if (!hcon->out && hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
- read_lock(&l->lock);
+ if (hcon->out && hcon->type == LE_LINK)
+ smp_conn_security(hcon, hcon->pending_sec_level);
- if (l->head) {
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
+ mutex_lock(&conn->chan_lock);
- if (conn->hcon->type == LE_LINK) {
- u8 sec_level = l2cap_pi(sk)->sec_level;
- u8 pending_sec = conn->hcon->pending_sec_level;
+ list_for_each_entry(chan, &conn->chan_l, list) {
- if (pending_sec > sec_level)
- sec_level = pending_sec;
+ l2cap_chan_lock(chan);
- if (smp_conn_security(conn, sec_level))
- l2cap_chan_ready(sk);
-
- hci_conn_put(conn->hcon);
-
- } else if (sk->sk_type != SOCK_SEQPACKET &&
- sk->sk_type != SOCK_STREAM) {
- l2cap_sock_clear_timer(sk);
- sk->sk_state = BT_CONNECTED;
- sk->sk_state_change(sk);
- } else if (sk->sk_state == BT_CONNECT)
- l2cap_do_start(sk);
-
- bh_unlock_sock(sk);
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ l2cap_chan_unlock(chan);
+ continue;
}
- } else if (conn->hcon->type == LE_LINK) {
- smp_conn_security(conn, BT_SECURITY_HIGH);
+
+ if (hcon->type == LE_LINK) {
+ if (smp_conn_security(hcon, chan->sec_level))
+ l2cap_chan_ready(chan);
+
+ } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ struct sock *sk = chan->sk;
+ __clear_chan_timer(chan);
+ lock_sock(sk);
+ __l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state_change(sk);
+ release_sock(sk);
+
+ } else if (chan->state == BT_CONNECT)
+ l2cap_do_start(chan);
+
+ l2cap_chan_unlock(chan);
}
- read_unlock(&l->lock);
-
- if (conn->hcon->out && conn->hcon->type == LE_LINK)
- l2cap_le_conn_ready(conn);
+ mutex_unlock(&conn->chan_lock);
}
/* Notify sockets that we cannot guaranty reliability anymore */
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sock *sk;
+ struct l2cap_chan *chan;
BT_DBG("conn %p", conn);
- read_lock(&l->lock);
+ mutex_lock(&conn->chan_lock);
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- if (l2cap_pi(sk)->force_reliable)
- sk->sk_err = err;
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
+ l2cap_chan_set_err(chan, err);
}
- read_unlock(&l->lock);
+ mutex_unlock(&conn->chan_lock);
}
-static void l2cap_info_timeout(unsigned long arg)
+static void l2cap_info_timeout(struct work_struct *work)
{
- struct l2cap_conn *conn = (void *) arg;
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ info_timer.work);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -1141,26 +1449,185 @@
l2cap_conn_start(conn);
}
-static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
+/*
+ * l2cap_user
+ * External modules can register l2cap_user objects on l2cap_conn. The ->probe
+ * callback is called during registration. The ->remove callback is called
+ * during unregistration.
+ * An l2cap_user object can either be explicitly unregistered or when the
+ * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
+ * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
+ * External modules must own a reference to the l2cap_conn object if they intend
+ * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
+ * any time if they don't.
+ */
+
+int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
+{
+ struct hci_dev *hdev = conn->hcon->hdev;
+ int ret;
+
+ /* We need to check whether l2cap_conn is registered. If it is not, we
+ * must not register the l2cap_user. l2cap_conn_del() is unregisters
+ * l2cap_conn objects, but doesn't provide its own locking. Instead, it
+ * relies on the parent hci_conn object to be locked. This itself relies
+ * on the hci_dev object to be locked. So we must lock the hci device
+ * here, too. */
+
+ hci_dev_lock(hdev);
+
+ if (user->list.next || user->list.prev) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* conn->hchan is NULL after l2cap_conn_del() was called */
+ if (!conn->hchan) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = user->probe(conn, user);
+ if (ret)
+ goto out_unlock;
+
+ list_add(&user->list, &conn->users);
+ ret = 0;
+
+out_unlock:
+ hci_dev_unlock(hdev);
+ return ret;
+}
+EXPORT_SYMBOL(l2cap_register_user);
+
+void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
+{
+ struct hci_dev *hdev = conn->hcon->hdev;
+
+ hci_dev_lock(hdev);
+
+ if (!user->list.next || !user->list.prev)
+ goto out_unlock;
+
+ list_del(&user->list);
+ user->list.next = NULL;
+ user->list.prev = NULL;
+ user->remove(conn, user);
+
+out_unlock:
+ hci_dev_unlock(hdev);
+}
+EXPORT_SYMBOL(l2cap_unregister_user);
+
+static void l2cap_unregister_all_users(struct l2cap_conn *conn)
+{
+ struct l2cap_user *user;
+
+ while (!list_empty(&conn->users)) {
+ user = list_first_entry(&conn->users, struct l2cap_user, list);
+ list_del(&user->list);
+ user->list.next = NULL;
+ user->list.prev = NULL;
+ user->remove(conn, user);
+ }
+}
+
+static void l2cap_conn_del(struct hci_conn *hcon, int err)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_chan *chan, *l;
- if (conn || status)
+ if (!conn)
+ return;
+
+ BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+ kfree_skb(conn->rx_skb);
+
+ l2cap_unregister_all_users(conn);
+
+ mutex_lock(&conn->chan_lock);
+
+ /* Kill channels */
+ list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
+
+ l2cap_chan_del(chan, err);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ l2cap_chan_put(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+
+ hci_chan_del(conn->hchan);
+
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+ cancel_delayed_work_sync(&conn->info_timer);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
+ cancel_delayed_work_sync(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
+
+ hcon->l2cap_data = NULL;
+ conn->hchan = NULL;
+ l2cap_conn_put(conn);
+}
+
+static void security_timeout(struct work_struct *work)
+{
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ security_timer.work);
+
+ BT_DBG("conn %p", conn);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+ smp_chan_destroy(conn);
+ l2cap_conn_del(conn->hcon, ETIMEDOUT);
+ }
+}
+
+static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_chan *hchan;
+
+ if (conn)
return conn;
- conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
- if (!conn)
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
return NULL;
+ conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
+ if (!conn) {
+ hci_chan_del(hchan);
+ return NULL;
+ }
+
+ kref_init(&conn->ref);
hcon->l2cap_data = conn;
conn->hcon = hcon;
+ hci_conn_get(conn->hcon);
+ conn->hchan = hchan;
- BT_DBG("hcon %p conn %p", hcon, conn);
+ BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
- if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
- conn->mtu = hcon->hdev->le_mtu;
- else
+ switch (hcon->type) {
+ case LE_LINK:
+ if (hcon->hdev->le_mtu) {
+ conn->mtu = hcon->hdev->le_mtu;
+ break;
+ }
+ /* fall through */
+ default:
conn->mtu = hcon->hdev->acl_mtu;
+ break;
+ }
conn->src = &hcon->hdev->bdaddr;
conn->dst = &hcon->dst;
@@ -1168,218 +1635,235 @@
conn->feat_mask = 0;
spin_lock_init(&conn->lock);
- rwlock_init(&conn->chan_list.lock);
+ mutex_init(&conn->chan_lock);
+
+ INIT_LIST_HEAD(&conn->chan_l);
+ INIT_LIST_HEAD(&conn->users);
if (hcon->type == LE_LINK)
- setup_timer(&hcon->smp_timer, smp_timeout,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
else
- setup_timer(&conn->info_timer, l2cap_info_timeout,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
return conn;
}
-void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process)
+static void l2cap_conn_free(struct kref *ref)
{
- struct l2cap_conn *conn = hcon->l2cap_data;
- struct sock *sk;
- struct sock *next;
+ struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
- if (!conn)
- return;
-
- BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
-
- if ((conn->hcon == hcon) && (conn->rx_skb))
- kfree_skb(conn->rx_skb);
-
- BT_DBG("conn->hcon %p", conn->hcon);
-
- /* Kill channels */
- for (sk = conn->chan_list.head; sk; ) {
- BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
- if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
- next = l2cap_pi(sk)->next_c;
- if (is_process)
- lock_sock(sk);
- else
- bh_lock_sock(sk);
- l2cap_chan_del(sk, err);
- if (is_process)
- release_sock(sk);
- else
- bh_unlock_sock(sk);
- l2cap_sock_kill(sk);
- sk = next;
- } else
- sk = l2cap_pi(sk)->next_c;
- }
-
- if (conn->hcon == hcon) {
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
- del_timer_sync(&conn->info_timer);
-
- hcon->l2cap_data = NULL;
-
- kfree(conn);
- }
- att_chn_params.conn = NULL;
- smp_chn_params.conn = NULL;
- BT_DBG("att_chn_params.conn set to NULL");
+ hci_conn_put(conn->hcon);
+ kfree(conn);
}
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
+void l2cap_conn_get(struct l2cap_conn *conn)
{
- struct l2cap_chan_list *l = &conn->chan_list;
- write_lock_bh(&l->lock);
- __l2cap_chan_add(conn, sk);
- write_unlock_bh(&l->lock);
+ kref_get(&conn->ref);
}
+EXPORT_SYMBOL(l2cap_conn_get);
+
+void l2cap_conn_put(struct l2cap_conn *conn)
+{
+ kref_put(&conn->ref, l2cap_conn_free);
+}
+EXPORT_SYMBOL(l2cap_conn_put);
/* ---- Socket interface ---- */
-/* Find socket with psm and source bdaddr.
+/* Find socket with psm and source / destination bdaddr.
* Returns closest match.
*/
-static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
- struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
+ struct l2cap_chan *c, *c1 = NULL;
- read_lock(&l2cap_sk_list.lock);
+ read_lock(&chan_list_lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- if (state && sk->sk_state != state)
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
+
+ if (state && c->state != state)
continue;
- if (l2cap_pi(sk)->psm == psm) {
+ if (c->psm == psm) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
- break;
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
+ read_unlock(&chan_list_lock);
+ return c;
+ }
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- sk1 = sk;
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
+ c1 = c;
}
}
- read_unlock(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
- return node ? sk : sk1;
+ return c1;
}
-int l2cap_do_connect(struct sock *sk)
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type)
{
+ struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
- bdaddr_t *dst = &bt_sk(sk)->dst;
struct l2cap_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
__u8 auth_type;
int err;
- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- l2cap_pi(sk)->psm);
+ BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
+ dst_type, __le16_to_cpu(psm));
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- auth_type = l2cap_get_auth_type(sk);
+ l2cap_chan_lock(chan);
- if (l2cap_pi(sk)->fixed_channel) {
- /* Fixed channels piggyback on existing ACL connections */
- hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (!hcon || !hcon->l2cap_data) {
- err = -ENOTCONN;
- goto done;
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ switch (chan->state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ err = 0;
+ goto done;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ lock_sock(sk);
+ bacpy(&bt_sk(sk)->dst, dst);
+ release_sock(sk);
+
+ chan->psm = psm;
+ chan->dcid = cid;
+
+ auth_type = l2cap_get_auth_type(chan);
+
+ if (chan->dcid == L2CAP_CID_LE_DATA)
+ hcon = hci_connect(hdev, LE_LINK, 0, dst, dst_type,
+ chan->sec_level, auth_type);
+ else
+ hcon = hci_connect(hdev, ACL_LINK, 0, dst, dst_type,
+ chan->sec_level, auth_type);
+
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto done;
+ }
+
+ conn = l2cap_conn_add(hcon);
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (hcon->type == LE_LINK) {
+ err = 0;
+
+ if (!list_empty(&conn->chan_l)) {
+ err = -EBUSY;
+ hci_conn_drop(hcon);
}
- conn = hcon->l2cap_data;
- } else {
- if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_le_connect(hdev, 0, dst,
- l2cap_pi(sk)->sec_level, auth_type,
- &bt_sk(sk)->le_params);
- else
- hcon = hci_connect(hdev, ACL_LINK, 0, dst,
- l2cap_pi(sk)->sec_level, auth_type);
-
- if (IS_ERR(hcon)) {
- err = PTR_ERR(hcon);
+ if (err)
goto done;
- }
-
- conn = l2cap_conn_add(hcon, 0);
- if (!conn) {
- hci_conn_put(hcon);
- err = -ENOMEM;
- goto done;
- }
}
/* Update source addr of the socket */
bacpy(src, conn->src);
- l2cap_chan_add(conn, sk);
+ l2cap_chan_unlock(chan);
+ l2cap_chan_add(conn, chan);
+ l2cap_chan_lock(chan);
- if ((l2cap_pi(sk)->fixed_channel) ||
- (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
- hcon->state == BT_CONNECTED)) {
- sk->sk_state = BT_CONNECTED;
- sk->sk_state_change(sk);
- } else {
- sk->sk_state = BT_CONNECT;
- /* If we have valid LE Params, let timeout override default */
- if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
- l2cap_sock_le_params_valid(&bt_sk(sk)->le_params)) {
- u16 timeout = bt_sk(sk)->le_params.conn_timeout;
+ l2cap_state_change(chan, BT_CONNECT);
+ __set_chan_timer(chan, sk->sk_sndtimeo);
- if (timeout)
- l2cap_sock_set_timer(sk,
- msecs_to_jiffies(timeout*1000));
+ if (hcon->state == BT_CONNECTED) {
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ __clear_chan_timer(chan);
+ if (l2cap_chan_check_security(chan))
+ l2cap_state_change(chan, BT_CONNECTED);
} else
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
- sk->sk_state_change(sk);
-
- if (hcon->state == BT_CONNECTED) {
- if (sk->sk_type != SOCK_SEQPACKET &&
- sk->sk_type != SOCK_STREAM) {
- l2cap_sock_clear_timer(sk);
- if (l2cap_check_security(sk)) {
- sk->sk_state = BT_CONNECTED;
- sk->sk_state_change(sk);
- }
- } else
- l2cap_do_start(sk);
- }
+ l2cap_do_start(chan);
}
err = 0;
done:
- hci_dev_unlock_bh(hdev);
+ l2cap_chan_unlock(chan);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
int __l2cap_wait_ack(struct sock *sk)
{
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
int timeo = HZ/5;
add_wait_queue(sk_sleep(sk), &wait);
- while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
- atomic_read(&l2cap_pi(sk)->ertm_queued)) {
- set_current_state(TASK_INTERRUPTIBLE);
-
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (chan->unacked_frames > 0 && chan->conn) {
if (!timeo)
timeo = HZ/5;
@@ -1391,6 +1875,7 @@
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
@@ -1401,1010 +1886,504 @@
return err;
}
-static void l2cap_ertm_tx_worker(struct work_struct *work)
+static void l2cap_monitor_timeout(struct work_struct *work)
{
- struct l2cap_pinfo *pi =
- container_of(work, struct l2cap_pinfo, tx_work);
- struct sock *sk = (struct sock *)pi;
- BT_DBG("%p", pi);
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ monitor_timer.work);
- lock_sock(sk);
- l2cap_ertm_send(sk);
- release_sock(sk);
- sock_put(sk);
+ BT_DBG("chan %p", chan);
+
+ l2cap_chan_lock(chan);
+
+ if (!chan->conn) {
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
-static void l2cap_skb_destructor(struct sk_buff *skb)
+static void l2cap_retrans_timeout(struct work_struct *work)
{
- struct sock *sk = skb->sk;
- int queued;
- int keep_sk = 0;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ retrans_timer.work);
- queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
- if (queued < L2CAP_MIN_ERTM_QUEUED)
- keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
+ BT_DBG("chan %p", chan);
- if (!keep_sk)
- sock_put(sk);
+ l2cap_chan_lock(chan);
+
+ if (!chan->conn) {
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
-void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+static void l2cap_streaming_send(struct l2cap_chan *chan,
+ struct sk_buff_head *skbs)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ struct l2cap_ctrl *control;
- BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
+ BT_DBG("chan %p, skbs %p", chan, skbs);
- if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
- pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
- BT_DBG("Sending on AMP connection %p %p",
- pi->ampcon, pi->ampchan);
- if (pi->ampchan)
- hci_send_acl(pi->ampcon, pi->ampchan, skb,
- ACL_COMPLETE);
- else
- kfree_skb(skb);
- } else {
- u16 flags;
+ if (__chan_is_moving(chan))
+ return;
- if (!(pi->conn)) {
- kfree_skb(skb);
- return;
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+
+ while (!skb_queue_empty(&chan->tx_q)) {
+
+ skb = skb_dequeue(&chan->tx_q);
+
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
+
+ control->reqseq = 0;
+ control->txseq = chan->next_tx_seq;
+
+ __pack_control(chan, control, skb);
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
- bt_cb(skb)->force_active = pi->force_active;
- BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
+ l2cap_do_send(chan, skb);
- if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
- !l2cap_pi(sk)->flushable)
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
+ BT_DBG("Sent txseq %u", control->txseq);
- hci_send_acl(pi->conn->hcon, NULL, skb, flags);
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->frames_sent++;
}
}
-int l2cap_ertm_send(struct sock *sk)
+static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct bt_l2cap_control *control;
+ struct l2cap_ctrl *control;
int sent = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("chan %p", chan);
- if (sk->sk_state != BT_CONNECTED)
+ if (chan->state != BT_CONNECTED)
return -ENOTCONN;
- if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return 0;
- if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
- pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ if (__chan_is_moving(chan))
return 0;
- while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
- atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
- (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
+ while (chan->tx_send_head &&
+ chan->unacked_frames < chan->remote_tx_win &&
+ chan->tx_state == L2CAP_TX_STATE_XMIT) {
- skb = sk->sk_send_head;
+ skb = chan->tx_send_head;
- bt_cb(skb)->retries = 1;
+ bt_cb(skb)->control.retries = 1;
control = &bt_cb(skb)->control;
- if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
control->final = 1;
- pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
- }
- control->reqseq = pi->buffer_seq;
- pi->last_acked_seq = pi->buffer_seq;
- control->txseq = pi->next_tx_seq;
- if (pi->extended_control) {
- put_unaligned_le32(__pack_extended_control(control),
- skb->data + L2CAP_HDR_SIZE);
- } else {
- put_unaligned_le16(__pack_enhanced_control(control),
- skb->data + L2CAP_HDR_SIZE);
- }
+ control->reqseq = chan->buffer_seq;
+ chan->last_acked_seq = chan->buffer_seq;
+ control->txseq = chan->next_tx_seq;
- if (pi->fcs == L2CAP_FCS_CRC16)
- apply_fcs(skb);
+ __pack_control(chan, control, skb);
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
/* Clone after data has been modified. Data is assumed to be
read-only (for locking purposes) on cloned sk_buffs.
*/
- tx_skb = skb_clone(skb, GFP_ATOMIC);
+ tx_skb = skb_clone(skb, GFP_KERNEL);
if (!tx_skb)
break;
- sock_hold(sk);
- tx_skb->sk = sk;
- tx_skb->destructor = l2cap_skb_destructor;
- atomic_inc(&pi->ertm_queued);
+ __set_retrans_timer(chan);
- l2cap_ertm_start_retrans_timer(pi);
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->unacked_frames++;
+ chan->frames_sent++;
+ sent++;
- pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
- pi->unacked_frames += 1;
- pi->frames_sent += 1;
- sent += 1;
-
- if (skb_queue_is_last(TX_QUEUE(sk), skb))
- sk->sk_send_head = NULL;
+ if (skb_queue_is_last(&chan->tx_q, skb))
+ chan->tx_send_head = NULL;
else
- sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+ chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
- l2cap_do_send(sk, tx_skb);
- BT_DBG("Sent txseq %d", (int)control->txseq);
+ l2cap_do_send(chan, tx_skb);
+ BT_DBG("Sent txseq %u", control->txseq);
}
- BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
- (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
- atomic_read(&pi->ertm_queued));
+ BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
+ chan->unacked_frames, skb_queue_len(&chan->tx_q));
return sent;
}
-int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
+static void l2cap_ertm_resend(struct l2cap_chan *chan)
{
+ struct l2cap_ctrl control;
struct sk_buff *skb;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct bt_l2cap_control *control;
- int sent = 0;
+ struct sk_buff *tx_skb;
+ u16 seq;
- BT_DBG("sk %p, skbs %p", sk, skbs);
+ BT_DBG("chan %p", chan);
- if (sk->sk_state != BT_CONNECTED)
- return -ENOTCONN;
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return;
- if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
- pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
- return 0;
+ if (__chan_is_moving(chan))
+ return;
- skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
+ while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&chan->retrans_list);
- BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
- while (!skb_queue_empty(TX_QUEUE(sk))) {
-
- skb = skb_dequeue(TX_QUEUE(sk));
-
- BT_DBG("skb %p", skb);
-
- bt_cb(skb)->retries = 1;
- control = &bt_cb(skb)->control;
-
- BT_DBG("control %p", control);
-
- control->reqseq = 0;
- control->txseq = pi->next_tx_seq;
-
- if (pi->extended_control) {
- put_unaligned_le32(__pack_extended_control(control),
- skb->data + L2CAP_HDR_SIZE);
- } else {
- put_unaligned_le16(__pack_enhanced_control(control),
- skb->data + L2CAP_HDR_SIZE);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
+ if (!skb) {
+ BT_DBG("Error: Can't retransmit seq %d, frame missing",
+ seq);
+ continue;
}
- if (pi->fcs == L2CAP_FCS_CRC16)
- apply_fcs(skb);
+ bt_cb(skb)->control.retries++;
+ control = bt_cb(skb)->control;
- l2cap_do_send(sk, skb);
-
- BT_DBG("Sent txseq %d", (int)control->txseq);
-
- pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
- pi->frames_sent += 1;
- sent += 1;
- }
-
- BT_DBG("Sent %d", sent);
-
- return 0;
-}
-
-static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
-{
- while (len > 0) {
- if (iv->iov_len) {
- int copy = min_t(unsigned int, len, iv->iov_len);
- memcpy(kdata, iv->iov_base, copy);
- len -= copy;
- kdata += copy;
- iv->iov_base += copy;
- iv->iov_len -= copy;
+ if (chan->max_tx != 0 &&
+ bt_cb(skb)->control.retries > chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
}
- iv++;
- }
- return 0;
-}
-
-static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
- int len, int count, struct sk_buff *skb,
- int reseg)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff **frag;
- struct sk_buff *final;
- int err, sent = 0;
-
- BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
- msg, (int)len, (int)count, skb);
-
- if (!conn)
- return -ENOTCONN;
-
- /* When resegmenting, data is copied from kernel space */
- if (reseg) {
- err = memcpy_fromkvec(skb_put(skb, count),
- (struct kvec *) msg->msg_iov, count);
- } else {
- err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
- count);
- }
-
- if (err)
- return -EFAULT;
-
- sent += count;
- len -= count;
- final = skb;
-
- /* Continuation fragments (no L2CAP header) */
- frag = &skb_shinfo(skb)->frag_list;
- while (len) {
- int skblen;
- count = min_t(unsigned int, conn->mtu, len);
-
- /* Add room for the FCS if it fits */
- if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
- len + L2CAP_FCS_SIZE <= conn->mtu)
- skblen = count + L2CAP_FCS_SIZE;
+ control.reqseq = chan->buffer_seq;
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
+ control.final = 1;
else
- skblen = count;
+ control.final = 0;
- /* Don't use bt_skb_send_alloc() while resegmenting, since
- * it is not ok to block.
- */
- if (reseg) {
- *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
- if (*frag)
- skb_set_owner_w(*frag, sk);
+ if (skb_cloned(skb)) {
+ /* Cloned sk_buffs are read-only, so we need a
+ * writeable copy
+ */
+ tx_skb = skb_copy(skb, GFP_KERNEL);
} else {
- *frag = bt_skb_send_alloc(sk, skblen,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ tx_skb = skb_clone(skb, GFP_KERNEL);
}
- if (!*frag)
- return -EFAULT;
+ if (!tx_skb) {
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
- /* When resegmenting, data is copied from kernel space */
- if (reseg) {
- err = memcpy_fromkvec(skb_put(*frag, count),
- (struct kvec *) msg->msg_iov,
- count);
+ /* Update skb contents */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
} else {
- err = memcpy_fromiovec(skb_put(*frag, count),
- msg->msg_iov, count);
+ put_unaligned_le16(__pack_enhanced_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
}
- if (err)
- return -EFAULT;
-
- sent += count;
- len -= count;
-
- final = *frag;
-
- frag = &(*frag)->next;
- }
-
- if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
- if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
- if (reseg) {
- *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
- GFP_ATOMIC);
- if (*frag)
- skb_set_owner_w(*frag, sk);
- } else {
- *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
- }
-
- if (!*frag)
- return -EFAULT;
-
- final = *frag;
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
+ put_unaligned_le16(fcs, skb_put(tx_skb,
+ L2CAP_FCS_SIZE));
}
- skb_put(final, L2CAP_FCS_SIZE);
- }
+ l2cap_do_send(chan, tx_skb);
- return sent;
+ BT_DBG("Resent txseq %d", control.txseq);
+
+ chan->last_acked_seq = chan->buffer_seq;
+ }
}
-struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+static void l2cap_retransmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
- struct l2cap_hdr *lh;
+ BT_DBG("chan %p, control %p", chan, control);
- BT_DBG("sk %p len %d", sk, (int)len);
-
- count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return ERR_PTR(err);
-
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
-
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
- if (unlikely(err < 0)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
- return skb;
+ l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
+ l2cap_ertm_resend(chan);
}
-struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE;
- struct l2cap_hdr *lh;
-
- BT_DBG("sk %p len %d", sk, (int)len);
-
- count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return ERR_PTR(err);
-
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
- if (unlikely(err < 0)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
- return skb;
-}
-
-struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
- struct msghdr *msg, size_t len,
- u16 sdulen, int reseg)
+static void l2cap_retransmit_all(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
struct sk_buff *skb;
- int err, count, hlen;
- int reserve = 0;
- struct l2cap_hdr *lh;
- u8 fcs = l2cap_pi(sk)->fcs;
- if (l2cap_pi(sk)->extended_control)
- hlen = L2CAP_EXTENDED_HDR_SIZE;
- else
- hlen = L2CAP_ENHANCED_HDR_SIZE;
+ BT_DBG("chan %p, control %p", chan, control);
- if (sdulen)
- hlen += L2CAP_SDULEN_SIZE;
+ if (control->poll)
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
- if (fcs == L2CAP_FCS_CRC16)
- hlen += L2CAP_FCS_SIZE;
+ l2cap_seq_list_clear(&chan->retrans_list);
- BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
- sk, msg, (int)len, (int)sdulen, hlen);
-
- count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
-
- /* Allocate extra headroom for Qualcomm PAL. This is only
- * necessary in two places (here and when creating sframes)
- * because only unfragmented iframes and sframes are sent
- * using AMP controllers.
- */
- if (l2cap_pi(sk)->ampcon &&
- l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
- reserve = BT_SKB_RESERVE_80211;
-
- /* Don't use bt_skb_send_alloc() while resegmenting, since
- * it is not ok to block.
- */
- if (reseg) {
- skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
- if (skb)
- skb_set_owner_w(skb, sk);
- } else {
- skb = bt_skb_send_alloc(sk, count + hlen + reserve,
- msg->msg_flags & MSG_DONTWAIT, &err);
- }
- if (!skb)
- return ERR_PTR(err);
-
- if (reserve)
- skb_reserve(skb, reserve);
-
- bt_cb(skb)->control.fcs = fcs;
-
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
-
- /* Control header is populated later */
- if (l2cap_pi(sk)->extended_control)
- put_unaligned_le32(0, skb_put(skb, 4));
- else
- put_unaligned_le16(0, skb_put(skb, 2));
-
- if (sdulen)
- put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
-
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
- if (unlikely(err < 0)) {
- BT_DBG("err %d", err);
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- bt_cb(skb)->retries = 0;
- return skb;
-}
-
-static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
-{
- struct l2cap_pinfo *pi;
- struct sk_buff *acked_skb;
- u16 ackseq;
-
- BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
-
- pi = l2cap_pi(sk);
-
- if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- BT_DBG("expected_ack_seq %d, unacked_frames %d",
- (int) pi->expected_ack_seq, (int) pi->unacked_frames);
-
- for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
- ackseq = __next_seq(ackseq, pi)) {
-
- acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
- if (acked_skb) {
- skb_unlink(acked_skb, TX_QUEUE(sk));
- kfree_skb(acked_skb);
- pi->unacked_frames--;
+ if (chan->unacked_frames) {
+ skb_queue_walk(&chan->tx_q, skb) {
+ if (bt_cb(skb)->control.txseq == control->reqseq ||
+ skb == chan->tx_send_head)
+ break;
}
+
+ skb_queue_walk_from(&chan->tx_q, skb) {
+ if (skb == chan->tx_send_head)
+ break;
+
+ l2cap_seq_list_append(&chan->retrans_list,
+ bt_cb(skb)->control.txseq);
+ }
+
+ l2cap_ertm_resend(chan);
}
-
- pi->expected_ack_seq = reqseq;
-
- if (pi->unacked_frames == 0)
- l2cap_ertm_stop_retrans_timer(pi);
-
- BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
}
-static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
+static void l2cap_send_ack(struct l2cap_chan *chan)
{
- struct sk_buff *skb;
- int len;
- int reserve = 0;
- struct l2cap_hdr *lh;
-
- if (l2cap_pi(sk)->extended_control)
- len = L2CAP_EXTENDED_HDR_SIZE;
- else
- len = L2CAP_ENHANCED_HDR_SIZE;
-
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
- len += L2CAP_FCS_SIZE;
-
- /* Allocate extra headroom for Qualcomm PAL */
- if (l2cap_pi(sk)->ampcon &&
- l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
- reserve = BT_SKB_RESERVE_80211;
-
- skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
-
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- if (reserve)
- skb_reserve(skb, reserve);
-
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
-
- if (l2cap_pi(sk)->extended_control)
- put_unaligned_le32(control, skb_put(skb, 4));
- else
- put_unaligned_le16(control, skb_put(skb, 2));
-
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
- put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
- }
-
- return skb;
-}
-
-static void l2cap_ertm_send_sframe(struct sock *sk,
- struct bt_l2cap_control *control)
-{
- struct l2cap_pinfo *pi;
- struct sk_buff *skb;
- u32 control_field;
-
- BT_DBG("sk %p, control %p", sk, control);
-
- if (control->frame_type != 's')
- return;
-
- pi = l2cap_pi(sk);
-
- if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
- pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
- pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
- BT_DBG("AMP error - attempted S-Frame send during AMP move");
- return;
- }
-
- if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
- control->final = 1;
- pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
- }
-
- if (control->super == L2CAP_SFRAME_RR)
- pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
- else if (control->super == L2CAP_SFRAME_RNR)
- pi->conn_state |= L2CAP_CONN_SENT_RNR;
-
- if (control->super != L2CAP_SFRAME_SREJ) {
- pi->last_acked_seq = control->reqseq;
- l2cap_ertm_stop_ack_timer(pi);
- }
-
- BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
- (int) control->final, (int) control->poll,
- (int) control->super);
-
- if (pi->extended_control)
- control_field = __pack_extended_control(control);
- else
- control_field = __pack_enhanced_control(control);
-
- skb = l2cap_create_sframe_pdu(sk, control_field);
- if (!IS_ERR(skb))
- l2cap_do_send(sk, skb);
-}
-
-static void l2cap_ertm_send_ack(struct sock *sk)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct bt_l2cap_control control;
- u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
+ struct l2cap_ctrl control;
+ u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
int threshold;
- BT_DBG("sk %p", sk);
- BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
- (int)pi->buffer_seq);
+ BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
+ chan, chan->last_acked_seq, chan->buffer_seq);
memset(&control, 0, sizeof(control));
- control.frame_type = 's';
+ control.sframe = 1;
- if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
- pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
- l2cap_ertm_stop_ack_timer(pi);
- control.super = L2CAP_SFRAME_RNR;
- control.reqseq = pi->buffer_seq;
- l2cap_ertm_send_sframe(sk, &control);
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
+ chan->rx_state == L2CAP_RX_STATE_RECV) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RNR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
} else {
- if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
- l2cap_ertm_send(sk);
+ if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
+ l2cap_ertm_send(chan);
/* If any i-frames were sent, they included an ack */
- if (pi->buffer_seq == pi->last_acked_seq)
+ if (chan->buffer_seq == chan->last_acked_seq)
frames_to_ack = 0;
}
/* Ack now if the window is 3/4ths full.
* Calculate without mul or div
*/
- threshold = pi->ack_win;
+ threshold = chan->ack_win;
threshold += threshold << 1;
threshold >>= 2;
- BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
- threshold);
+ BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
+ threshold);
if (frames_to_ack >= threshold) {
- l2cap_ertm_stop_ack_timer(pi);
- control.super = L2CAP_SFRAME_RR;
- control.reqseq = pi->buffer_seq;
- l2cap_ertm_send_sframe(sk, &control);
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
frames_to_ack = 0;
}
if (frames_to_ack)
- l2cap_ertm_start_ack_timer(pi);
+ __set_ack_timer(chan);
}
}
-static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
+static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
+ struct msghdr *msg, int len,
+ int count, struct sk_buff *skb)
{
- struct l2cap_pinfo *pi;
- struct bt_l2cap_control control;
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff **frag;
+ int sent = 0;
- BT_DBG("sk %p, poll %d", sk, (int) poll);
+ if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
+ return -EFAULT;
- pi = l2cap_pi(sk);
+ sent += count;
+ len -= count;
- memset(&control, 0, sizeof(control));
- control.frame_type = 's';
- control.poll = poll;
+ /* Continuation fragments (no L2CAP header) */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
- control.super = L2CAP_SFRAME_RNR;
+ count = min_t(unsigned int, conn->mtu, len);
+
+ tmp = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *frag = tmp;
+
+ if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+ return -EFAULT;
+
+ (*frag)->priority = skb->priority;
+
+ sent += count;
+ len -= count;
+
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
+ frag = &(*frag)->next;
+ }
+
+ return sent;
+}
+
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p len %zu priority %u", chan, len, priority);
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ skb->priority = priority;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
+ put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
+
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+ return skb;
+}
+
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p len %zu", chan, len);
+
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
+
+ skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ skb->priority = priority;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len);
+
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+ return skb;
+}
+
+static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u16 sdulen)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count, hlen;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p len %zu", chan, len);
+
+ if (!conn)
+ return ERR_PTR(-ENOTCONN);
+
+ hlen = __ertm_hdr_size(chan);
+
+ if (sdulen)
+ hlen += L2CAP_SDULEN_SIZE;
+
+ if (chan->fcs == L2CAP_FCS_CRC16)
+ hlen += L2CAP_FCS_SIZE;
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+
+ /* Control header is populated later */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
else
- control.super = L2CAP_SFRAME_RR;
+ put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
- control.reqseq = pi->buffer_seq;
- l2cap_ertm_send_sframe(sk, &control);
-}
+ if (sdulen)
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
-static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
- struct bt_l2cap_control control;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- memset(&control, 0, sizeof(control));
- control.frame_type = 's';
- control.final = 1;
- control.reqseq = pi->buffer_seq;
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
-
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- control.super = L2CAP_SFRAME_RNR;
- l2cap_ertm_send_sframe(sk, &control);
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
}
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->unacked_frames > 0))
- l2cap_ertm_start_retrans_timer(pi);
-
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
- /* Send pending iframes */
- l2cap_ertm_send(sk);
-
- if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
- /* F-bit wasn't sent in an s-frame or i-frame yet, so
- * send it now.
- */
- control.super = L2CAP_SFRAME_RR;
- l2cap_ertm_send_sframe(sk, &control);
- }
+ bt_cb(skb)->control.fcs = chan->fcs;
+ bt_cb(skb)->control.retries = 0;
+ return skb;
}
-static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
-{
- struct bt_l2cap_control control;
- struct l2cap_pinfo *pi;
- u16 seq;
-
- BT_DBG("sk %p, txseq %d", sk, (int)txseq);
-
- pi = l2cap_pi(sk);
- memset(&control, 0, sizeof(control));
- control.frame_type = 's';
- control.super = L2CAP_SFRAME_SREJ;
-
- for (seq = pi->expected_tx_seq; seq != txseq;
- seq = __next_seq(seq, pi)) {
- if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
- control.reqseq = seq;
- l2cap_ertm_send_sframe(sk, &control);
- l2cap_seq_list_append(&pi->srej_list, seq);
- }
- }
-
- pi->expected_tx_seq = __next_seq(txseq, pi);
-}
-
-static void l2cap_ertm_send_srej_tail(struct sock *sk)
-{
- struct bt_l2cap_control control;
- struct l2cap_pinfo *pi;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
- return;
-
- memset(&control, 0, sizeof(control));
- control.frame_type = 's';
- control.super = L2CAP_SFRAME_SREJ;
- control.reqseq = pi->srej_list.tail;
- l2cap_ertm_send_sframe(sk, &control);
-}
-
-static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
-{
- struct bt_l2cap_control control;
- struct l2cap_pinfo *pi;
- u16 initial_head;
- u16 seq;
-
- BT_DBG("sk %p, txseq %d", sk, (int) txseq);
-
- pi = l2cap_pi(sk);
- memset(&control, 0, sizeof(control));
- control.frame_type = 's';
- control.super = L2CAP_SFRAME_SREJ;
-
- /* Capture initial list head to allow only one pass through the list. */
- initial_head = pi->srej_list.head;
-
- do {
- seq = l2cap_seq_list_pop(&pi->srej_list);
- if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
- break;
-
- control.reqseq = seq;
- l2cap_ertm_send_sframe(sk, &control);
- l2cap_seq_list_append(&pi->srej_list, seq);
- } while (pi->srej_list.head != initial_head);
-}
-
-static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- BT_DBG("sk %p", sk);
-
- pi->expected_tx_seq = pi->buffer_seq;
- l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
- skb_queue_purge(SREJ_QUEUE(sk));
- pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
-}
-
-static int l2cap_ertm_tx_state_xmit(struct sock *sk,
- struct bt_l2cap_control *control,
- struct sk_buff_head *skbs, u8 event)
-{
- struct l2cap_pinfo *pi;
- int err = 0;
-
- BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
- (int)event);
- pi = l2cap_pi(sk);
-
- switch (event) {
- case L2CAP_ERTM_EVENT_DATA_REQUEST:
- if (sk->sk_send_head == NULL)
- sk->sk_send_head = skb_peek(skbs);
-
- skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
- l2cap_ertm_send(sk);
- break;
- case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
- BT_DBG("Enter LOCAL_BUSY");
- pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
-
- if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
- /* The SREJ_SENT state must be aborted if we are to
- * enter the LOCAL_BUSY state.
- */
- l2cap_ertm_abort_rx_srej_sent(sk);
- }
-
- l2cap_ertm_send_ack(sk);
-
- break;
- case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
- BT_DBG("Exit LOCAL_BUSY");
- pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
-
- if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
- if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(pi->conn, pi,
- pi->scid,
- L2CAP_MOVE_CHAN_CONFIRMED);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- } else if (pi->amp_move_role ==
- L2CAP_AMP_MOVE_RESPONDER) {
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident,
- pi->dcid,
- L2CAP_MOVE_CHAN_SUCCESS);
- }
- break;
- }
-
- if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
- (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
- struct bt_l2cap_control local_control;
-
- memset(&local_control, 0, sizeof(local_control));
- local_control.frame_type = 's';
- local_control.super = L2CAP_SFRAME_RR;
- local_control.poll = 1;
- local_control.reqseq = pi->buffer_seq;
- l2cap_ertm_send_sframe(sk, &local_control);
-
- pi->retry_count = 1;
- l2cap_ertm_start_monitor_timer(pi);
- pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
- }
- break;
- case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
- l2cap_ertm_process_reqseq(sk, control->reqseq);
- break;
- case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
- l2cap_ertm_send_rr_or_rnr(sk, 1);
- pi->retry_count = 1;
- l2cap_ertm_start_monitor_timer(pi);
- l2cap_ertm_stop_ack_timer(pi);
- pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
- break;
- case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
- l2cap_ertm_send_rr_or_rnr(sk, 1);
- pi->retry_count = 1;
- l2cap_ertm_start_monitor_timer(pi);
- pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
- break;
- case L2CAP_ERTM_EVENT_RECV_FBIT:
- /* Nothing to process */
- break;
- default:
- break;
- }
-
- return err;
-}
-
-static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
- struct bt_l2cap_control *control,
- struct sk_buff_head *skbs, u8 event)
-{
- struct l2cap_pinfo *pi;
- int err = 0;
-
- BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
- (int)event);
- pi = l2cap_pi(sk);
-
- switch (event) {
- case L2CAP_ERTM_EVENT_DATA_REQUEST:
- if (sk->sk_send_head == NULL)
- sk->sk_send_head = skb_peek(skbs);
- /* Queue data, but don't send. */
- skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
- break;
- case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
- BT_DBG("Enter LOCAL_BUSY");
- pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
-
- if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
- /* The SREJ_SENT state must be aborted if we are to
- * enter the LOCAL_BUSY state.
- */
- l2cap_ertm_abort_rx_srej_sent(sk);
- }
-
- l2cap_ertm_send_ack(sk);
-
- break;
- case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
- BT_DBG("Exit LOCAL_BUSY");
- pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
-
- if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
- struct bt_l2cap_control local_control;
- memset(&local_control, 0, sizeof(local_control));
- local_control.frame_type = 's';
- local_control.super = L2CAP_SFRAME_RR;
- local_control.poll = 1;
- local_control.reqseq = pi->buffer_seq;
- l2cap_ertm_send_sframe(sk, &local_control);
-
- pi->retry_count = 1;
- l2cap_ertm_start_monitor_timer(pi);
- pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
- }
- break;
- case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
- l2cap_ertm_process_reqseq(sk, control->reqseq);
-
- /* Fall through */
-
- case L2CAP_ERTM_EVENT_RECV_FBIT:
- if (control && control->final) {
- l2cap_ertm_stop_monitor_timer(pi);
- if (pi->unacked_frames > 0)
- l2cap_ertm_start_retrans_timer(pi);
- pi->retry_count = 0;
- pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
- BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
- }
- break;
- case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
- /* Ignore */
- break;
- case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
- if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
- l2cap_ertm_send_rr_or_rnr(sk, 1);
- l2cap_ertm_start_monitor_timer(pi);
- pi->retry_count += 1;
- } else
- l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
- break;
- default:
- break;
- }
-
- return err;
-}
-
-int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
- struct sk_buff_head *skbs, u8 event)
-{
- struct l2cap_pinfo *pi;
- int err = 0;
-
- BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
- sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
-
- pi = l2cap_pi(sk);
-
- switch (pi->tx_state) {
- case L2CAP_ERTM_TX_STATE_XMIT:
- err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
- break;
- case L2CAP_ERTM_TX_STATE_WAIT_F:
- err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
- break;
- default:
- /* Ignore event */
- break;
- }
-
- return err;
-}
-
-int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
- struct msghdr *msg, size_t len, int reseg)
+static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
u16 sdu_len;
size_t pdu_len;
- int err = 0;
u8 sar;
- BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
/* It is critical that ERTM PDUs fit in a single HCI fragment,
* so fragmented skbs are not used. The HCI layer's handling
@@ -2412,17 +2391,20 @@
*/
/* PDU size is derived from the HCI MTU */
- pdu_len = l2cap_pi(sk)->conn->mtu;
+ pdu_len = chan->conn->mtu;
- /* Constrain BR/EDR PDU size to fit within the largest radio packet */
- if (!l2cap_pi(sk)->ampcon)
+ /* Constrain PDU size for BR/EDR connections */
+ if (!chan->hs_hcon)
pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
- pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
+ if (chan->fcs)
+ pdu_len -= L2CAP_FCS_SIZE;
+
+ pdu_len -= __ertm_hdr_size(chan);
/* Remote device may have requested smaller PDUs */
- pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
if (len <= pdu_len) {
sar = L2CAP_SAR_UNSEGMENTED;
@@ -2434,10 +2416,8 @@
pdu_len -= L2CAP_SDULEN_SIZE;
}
- while (len) {
- skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
-
- BT_DBG("iframe skb %p", skb);
+ while (len > 0) {
+ skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
if (IS_ERR(skb)) {
__skb_queue_purge(seg_queue);
@@ -2461,387 +2441,427 @@
}
}
- return err;
-}
-
-static inline int is_initial_frame(u8 sar)
-{
- return (sar == L2CAP_SAR_UNSEGMENTED ||
- sar == L2CAP_SAR_START);
-}
-
-static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
- size_t veclen)
-{
- struct sk_buff *frag_iter;
-
- BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
-
- if (iv->iov_len + skb->len > veclen)
- return -ENOMEM;
-
- memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
- iv->iov_len += skb->len;
-
- skb_walk_frags(skb, frag_iter) {
- if (iv->iov_len + skb->len > veclen)
- return -ENOMEM;
-
- BT_DBG("Copying %d bytes", (int)frag_iter->len);
- memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
- frag_iter->len);
- iv->iov_len += frag_iter->len;
- }
-
return 0;
}
-int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority)
{
- void *buf;
- int buflen;
- int err = 0;
struct sk_buff *skb;
- struct msghdr msg;
- struct kvec iv;
- struct sk_buff_head old_frames;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int err;
+ struct sk_buff_head seg_queue;
- BT_DBG("sk %p", sk);
+ /* Connectionless channel */
+ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
+ skb = l2cap_create_connless_pdu(chan, msg, len, priority);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
- if (skb_queue_empty(queue))
- return 0;
-
- memset(&msg, 0, sizeof(msg));
- msg.msg_iov = (struct iovec *) &iv;
-
- buflen = pi->omtu + L2CAP_FCS_SIZE;
- buf = kzalloc(buflen, GFP_TEMPORARY);
-
- if (!buf) {
- BT_DBG("Could not allocate resegmentation buffer");
- return -ENOMEM;
+ l2cap_do_send(chan, skb);
+ return len;
}
- /* Move current frames off the original queue */
- __skb_queue_head_init(&old_frames);
- skb_queue_splice_tail_init(queue, &old_frames);
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > chan->omtu)
+ return -EMSGSIZE;
- while (!skb_queue_empty(&old_frames)) {
- struct sk_buff_head current_sdu;
- u8 original_sar;
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(chan, msg, len, priority);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
- /* Reassemble each SDU from one or more PDUs */
+ l2cap_do_send(chan, skb);
+ err = len;
+ break;
- iv.iov_base = buf;
- iv.iov_len = 0;
-
- skb = skb_peek(&old_frames);
- original_sar = bt_cb(skb)->control.sar;
-
- __skb_unlink(skb, &old_frames);
-
- /* Append data to SDU */
- if (pi->extended_control)
- skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
- else
- skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
-
- if (original_sar == L2CAP_SAR_START)
- skb_pull(skb, L2CAP_SDULEN_SIZE);
-
- err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
-
- if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
- iv.iov_len -= L2CAP_FCS_SIZE;
-
- /* Free skb */
- kfree_skb(skb);
-
- if (err)
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ /* Check outgoing MTU */
+ if (len > chan->omtu) {
+ err = -EMSGSIZE;
break;
-
- while (!skb_queue_empty(&old_frames) && !err) {
- /* Check next frame */
- skb = skb_peek(&old_frames);
-
- if (is_initial_frame(bt_cb(skb)->control.sar))
- break;
-
- __skb_unlink(skb, &old_frames);
-
- /* Append data to SDU */
- if (pi->extended_control)
- skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
- else
- skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
-
- if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
- skb_pull(skb, L2CAP_SDULEN_SIZE);
-
- err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
-
- if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
- iv.iov_len -= L2CAP_FCS_SIZE;
-
- /* Free skb */
- kfree_skb(skb);
}
- if (err)
- break;
+ __skb_queue_head_init(&seg_queue);
- /* Segment data */
-
- __skb_queue_head_init(¤t_sdu);
-
- /* skbs for the SDU were just freed, but the
- * resegmenting process could produce more, smaller
- * skbs due to smaller PDUs and reduced HCI MTU. The
- * overhead from the sk_buff structs could put us over
- * the sk_sndbuf limit.
- *
- * Since this code is running in response to a
- * received poll/final packet, it cannot block.
- * Therefore, memory allocation needs to be allowed by
- * falling back to bt_skb_alloc() (with
- * skb_set_owner_w() to maintain sk_wmem_alloc
- * correctly).
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
*/
- msg.msg_iovlen = iv.iov_len;
- err = l2cap_segment_sdu(sk, ¤t_sdu, &msg,
- msg.msg_iovlen, 1);
+ err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- if (err || skb_queue_empty(¤t_sdu)) {
- BT_DBG("Error %d resegmenting data for socket %p",
- err, sk);
- __skb_queue_purge(¤t_sdu);
+ /* The channel could have been closed while segmenting,
+ * check that it is still connected.
+ */
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
+ }
+
+ if (err)
break;
- }
- /* Fix up first PDU SAR bits */
- if (!is_initial_frame(original_sar)) {
- BT_DBG("Changing SAR bits, %d PDUs",
- skb_queue_len(¤t_sdu));
- skb = skb_peek(¤t_sdu);
+ if (chan->mode == L2CAP_MODE_ERTM)
+ l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
+ else
+ l2cap_streaming_send(chan, &seg_queue);
- if (skb_queue_len(¤t_sdu) == 1) {
- /* Change SAR from 'unsegmented' to 'end' */
- bt_cb(skb)->control.sar = L2CAP_SAR_END;
- } else {
- struct l2cap_hdr *lh;
- size_t hdrlen;
+ err = len;
- /* Change SAR from 'start' to 'continue' */
- bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
+ break;
- /* Start frames contain 2 bytes for
- * sdulen and continue frames don't.
- * Must rewrite header to eliminate
- * sdulen and then adjust l2cap frame
- * length.
- */
- if (pi->extended_control)
- hdrlen = L2CAP_EXTENDED_HDR_SIZE;
- else
- hdrlen = L2CAP_ENHANCED_HDR_SIZE;
-
- memmove(skb->data + L2CAP_SDULEN_SIZE,
- skb->data, hdrlen);
- skb_pull(skb, L2CAP_SDULEN_SIZE);
- lh = (struct l2cap_hdr *)skb->data;
- lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
- L2CAP_SDULEN_SIZE);
- }
- }
-
- /* Add to queue */
- skb_queue_splice_tail(¤t_sdu, queue);
+ default:
+ BT_DBG("bad state %1.1x", chan->mode);
+ err = -EBADFD;
}
- __skb_queue_purge(&old_frames);
- if (err)
- __skb_queue_purge(queue);
-
- kfree(buf);
-
- BT_DBG("Queue resegmented, err=%d", err);
return err;
}
-static void l2cap_resegment_worker(struct work_struct *work)
+static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
{
- int err = 0;
- struct l2cap_resegment_work *seg_work =
- container_of(work, struct l2cap_resegment_work, work);
- struct sock *sk = seg_work->sk;
+ struct l2cap_ctrl control;
+ u16 seq;
- kfree(seg_work);
+ BT_DBG("chan %p, txseq %u", chan, txseq);
- BT_DBG("sk %p", sk);
- lock_sock(sk);
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
- if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
- release_sock(sk);
- sock_put(sk);
- return;
+ for (seq = chan->expected_tx_seq; seq != txseq;
+ seq = __next_seq(chan, seq)) {
+ if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ }
}
- err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
-
- l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
-
- if (skb_queue_empty(TX_QUEUE(sk)))
- sk->sk_send_head = NULL;
- else
- sk->sk_send_head = skb_peek(TX_QUEUE(sk));
-
- if (err)
- l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
- else
- l2cap_ertm_send(sk);
-
- release_sock(sk);
- sock_put(sk);
+ chan->expected_tx_seq = __next_seq(chan, txseq);
}
-static int l2cap_setup_resegment(struct sock *sk)
+static void l2cap_send_srej_tail(struct l2cap_chan *chan)
{
- struct l2cap_resegment_work *seg_work;
+ struct l2cap_ctrl control;
- BT_DBG("sk %p", sk);
+ BT_DBG("chan %p", chan);
- if (skb_queue_empty(TX_QUEUE(sk)))
- return 0;
-
- seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
- if (!seg_work)
- return -ENOMEM;
-
- INIT_WORK(&seg_work->work, l2cap_resegment_worker);
- sock_hold(sk);
- seg_work->sk = sk;
-
- if (!queue_work(_l2cap_wq, &seg_work->work)) {
- kfree(seg_work);
- sock_put(sk);
- return -ENOMEM;
- }
-
- l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
-
- return 0;
-}
-
-static inline int l2cap_rmem_available(struct sock *sk)
-{
- BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
- atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
- return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
-}
-
-static inline int l2cap_rmem_full(struct sock *sk)
-{
- BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
- atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
- return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
-}
-
-void l2cap_amp_move_init(struct sock *sk)
-{
- BT_DBG("sk %p", sk);
-
- if (!l2cap_pi(sk)->conn)
+ if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
return;
- if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP) || !enable_hs)
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+ control.reqseq = chan->srej_list.tail;
+ l2cap_send_sframe(chan, &control);
+}
+
+static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 initial_head;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ /* Capture initial list head to allow only one pass through the list. */
+ initial_head = chan->srej_list.head;
+
+ do {
+ seq = l2cap_seq_list_pop(&chan->srej_list);
+ if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
+ break;
+
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ } while (chan->srej_list.head != initial_head);
+}
+
+static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
+{
+ struct sk_buff *acked_skb;
+ u16 ackseq;
+
+ BT_DBG("chan %p, reqseq %u", chan, reqseq);
+
+ if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
return;
- if (l2cap_pi(sk)->amp_id == 0) {
- if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
- return;
- l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
- l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
- amp_create_physical(l2cap_pi(sk)->conn, sk);
- } else {
- l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
- l2cap_pi(sk)->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
- l2cap_pi(sk)->amp_move_id = 0;
- l2cap_amp_move_setup(sk);
- l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
- l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ BT_DBG("expected_ack_seq %u, unacked_frames %u",
+ chan->expected_ack_seq, chan->unacked_frames);
+
+ for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
+ ackseq = __next_seq(chan, ackseq)) {
+
+ acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
+ if (acked_skb) {
+ skb_unlink(acked_skb, &chan->tx_q);
+ kfree_skb(acked_skb);
+ chan->unacked_frames--;
+ }
+ }
+
+ chan->expected_ack_seq = reqseq;
+
+ if (chan->unacked_frames == 0)
+ __clear_retrans_timer(chan);
+
+ BT_DBG("unacked_frames %u", chan->unacked_frames);
+}
+
+static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ chan->expected_tx_seq = chan->buffer_seq;
+ l2cap_seq_list_clear(&chan->srej_list);
+ skb_queue_purge(&chan->srej_q);
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+}
+
+static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ l2cap_ertm_send(chan);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ __clear_ack_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RETRANS_TO:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RECV_FBIT:
+ /* Nothing to process */
+ break;
+ default:
+ break;
}
}
-static void l2cap_chan_ready(struct sock *sk)
+static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
{
- struct sock *parent = bt_sk(sk)->parent;
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
- BT_DBG("sk %p, parent %p", sk, parent);
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+ /* Queue data, but don't send. */
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- l2cap_pi(sk)->conf_state = 0;
- l2cap_sock_clear_timer(sk);
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
- if (!parent) {
- /* Outgoing channel.
- * Wake up socket sleeping on connect.
- */
- sk->sk_state = BT_CONNECTED;
- sk->sk_state_change(sk);
- } else {
- /* Incoming channel.
- * Wake up socket sleeping on accept.
- */
- parent->sk_data_ready(parent, 0);
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ /* Fall through */
+
+ case L2CAP_EV_RECV_FBIT:
+ if (control && control->final) {
+ __clear_monitor_timer(chan);
+ if (chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+ chan->retry_count = 0;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+ BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
+ }
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ /* Ignore */
+ break;
+ case L2CAP_EV_MONITOR_TO:
+ if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
+ l2cap_send_rr_or_rnr(chan, 1);
+ __set_monitor_timer(chan);
+ chan->retry_count++;
+ } else {
+ l2cap_send_disconn_req(chan, ECONNABORTED);
+ }
+ break;
+ default:
+ break;
}
}
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
+ chan, control, skbs, event, chan->tx_state);
+
+ switch (chan->tx_state) {
+ case L2CAP_TX_STATE_XMIT:
+ l2cap_tx_state_xmit(chan, control, skbs, event);
+ break;
+ case L2CAP_TX_STATE_WAIT_F:
+ l2cap_tx_state_wait_f(chan, control, skbs, event);
+ break;
+ default:
+ /* Ignore event */
+ break;
+ }
+}
+
+static void l2cap_pass_to_tx(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
+}
+
+static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
+}
+
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct l2cap_chan_list *l = &conn->chan_list;
struct sk_buff *nskb;
- struct sock *sk;
+ struct l2cap_chan *chan;
BT_DBG("conn %p", conn);
- read_lock(&l->lock);
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- if (sk->sk_type != SOCK_RAW)
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ struct sock *sk = chan->sk;
+ if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
/* Don't send frame to the socket it came from */
if (skb->sk == sk)
continue;
- nskb = skb_clone(skb, GFP_ATOMIC);
+ nskb = skb_clone(skb, GFP_KERNEL);
if (!nskb)
continue;
- if (sock_queue_rcv_skb(sk, nskb))
+ if (chan->ops->recv(chan, nskb))
kfree_skb(nskb);
}
- read_unlock(&l->lock);
+
+ mutex_unlock(&conn->chan_lock);
}
/* ---- L2CAP signalling commands ---- */
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data)
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
+ u8 ident, u16 dlen, void *data)
{
struct sk_buff *skb, **frag;
struct l2cap_cmd_hdr *cmd;
struct l2cap_hdr *lh;
int len, count;
- unsigned int mtu = conn->hcon->hdev->acl_mtu;
- BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
- conn, code, ident, dlen);
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
+ conn, code, ident, dlen);
+
+ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
+ return NULL;
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
- count = min_t(unsigned int, mtu, len);
+ count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_alloc(count, GFP_ATOMIC);
+ skb = bt_skb_alloc(count, GFP_KERNEL);
if (!skb)
return NULL;
@@ -2849,9 +2869,9 @@
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
if (conn->hcon->type == LE_LINK)
- lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
else
- lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
@@ -2869,9 +2889,9 @@
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
- count = min_t(unsigned int, mtu, len);
+ count = min_t(unsigned int, conn->mtu, len);
- *frag = bt_skb_alloc(count, GFP_ATOMIC);
+ *frag = bt_skb_alloc(count, GFP_KERNEL);
if (!*frag)
goto fail;
@@ -2890,7 +2910,8 @@
return NULL;
}
-static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
+static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ unsigned long *val)
{
struct l2cap_conf_opt *opt = *ptr;
int len;
@@ -2919,7 +2940,7 @@
break;
}
- BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
return len;
}
@@ -2927,7 +2948,7 @@
{
struct l2cap_conf_opt *opt = *ptr;
- BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
opt->type = type;
opt->len = len;
@@ -2953,158 +2974,100 @@
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_ertm_ack_timeout(struct work_struct *work)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
{
- struct delayed_work *delayed =
- container_of(work, struct delayed_work, work);
- struct l2cap_pinfo *pi =
- container_of(delayed, struct l2cap_pinfo, ack_work);
- struct sock *sk = (struct sock *)pi;
+ struct l2cap_conf_efs efs;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_ERTM:
+ efs.id = chan->local_id;
+ efs.stype = chan->local_stype;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ efs.id = 1;
+ efs.stype = L2CAP_SERV_BESTEFFORT;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = 0;
+ efs.flush_to = 0;
+ break;
+
+ default:
+ return;
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+}
+
+static void l2cap_ack_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ ack_timer.work);
u16 frames_to_ack;
- BT_DBG("sk %p", sk);
+ BT_DBG("chan %p", chan);
- if (!sk)
- return;
+ l2cap_chan_lock(chan);
- lock_sock(sk);
-
- if (!l2cap_pi(sk)->conn) {
- release_sock(sk);
- return;
- }
-
- frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
- l2cap_pi(sk)->last_acked_seq,
- l2cap_pi(sk));
+ frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
if (frames_to_ack)
- l2cap_ertm_send_rr_or_rnr(sk, 0);
+ l2cap_send_rr_or_rnr(chan, 0);
- release_sock(sk);
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
-static void l2cap_ertm_retrans_timeout(struct work_struct *work)
+int l2cap_ertm_init(struct l2cap_chan *chan)
{
- struct delayed_work *delayed =
- container_of(work, struct delayed_work, work);
- struct l2cap_pinfo *pi =
- container_of(delayed, struct l2cap_pinfo, retrans_work);
- struct sock *sk = (struct sock *)pi;
+ int err;
- BT_DBG("sk %p", sk);
+ chan->next_tx_seq = 0;
+ chan->expected_tx_seq = 0;
+ chan->expected_ack_seq = 0;
+ chan->unacked_frames = 0;
+ chan->buffer_seq = 0;
+ chan->frames_sent = 0;
+ chan->last_acked_seq = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
- if (!sk)
- return;
+ skb_queue_head_init(&chan->tx_q);
- lock_sock(sk);
+ chan->local_amp_id = 0;
+ chan->move_id = 0;
+ chan->move_state = L2CAP_MOVE_STABLE;
+ chan->move_role = L2CAP_MOVE_ROLE_NONE;
- if (!l2cap_pi(sk)->conn) {
- release_sock(sk);
- return;
- }
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
- l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
- release_sock(sk);
-}
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
-static void l2cap_ertm_monitor_timeout(struct work_struct *work)
-{
- struct delayed_work *delayed =
- container_of(work, struct delayed_work, work);
- struct l2cap_pinfo *pi =
- container_of(delayed, struct l2cap_pinfo, monitor_work);
- struct sock *sk = (struct sock *)pi;
+ INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+ INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+ INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
- BT_DBG("sk %p", sk);
+ skb_queue_head_init(&chan->srej_q);
- if (!sk)
- return;
+ err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
+ if (err < 0)
+ return err;
- lock_sock(sk);
+ err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
+ if (err < 0)
+ l2cap_seq_list_free(&chan->srej_list);
- if (!l2cap_pi(sk)->conn) {
- release_sock(sk);
- return;
- }
-
- l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
-
- release_sock(sk);
-}
-
-static inline void l2cap_ertm_init(struct sock *sk)
-{
- l2cap_pi(sk)->next_tx_seq = 0;
- l2cap_pi(sk)->expected_tx_seq = 0;
- l2cap_pi(sk)->expected_ack_seq = 0;
- l2cap_pi(sk)->unacked_frames = 0;
- l2cap_pi(sk)->buffer_seq = 0;
- l2cap_pi(sk)->frames_sent = 0;
- l2cap_pi(sk)->last_acked_seq = 0;
- l2cap_pi(sk)->sdu = NULL;
- l2cap_pi(sk)->sdu_last_frag = NULL;
- l2cap_pi(sk)->sdu_len = 0;
- atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
-
- l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
- l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
-
- BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
- l2cap_pi(sk)->rx_state);
-
- l2cap_pi(sk)->amp_id = 0;
- l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
- l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
- l2cap_pi(sk)->amp_move_reqseq = 0;
- l2cap_pi(sk)->amp_move_event = 0;
-
- INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
- INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
- l2cap_ertm_retrans_timeout);
- INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
- l2cap_ertm_monitor_timeout);
- INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
- skb_queue_head_init(SREJ_QUEUE(sk));
- skb_queue_head_init(TX_QUEUE(sk));
-
- l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
- l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
- l2cap_pi(sk)->remote_tx_win);
-}
-
-void l2cap_ertm_destruct(struct sock *sk)
-{
- l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
- l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
-}
-
-void l2cap_ertm_shutdown(struct sock *sk)
-{
- l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
- l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
- l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
-}
-
-void l2cap_ertm_recv_done(struct sock *sk)
-{
- lock_sock(sk);
-
- if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM ||
- sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
- return;
- }
-
- /* Consume any queued incoming frames and update local busy status */
- if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
- l2cap_ertm_rx_queued_iframes(sk))
- l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
- else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
- l2cap_rmem_available(sk))
- l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
-
- release_sock(sk);
+ return err;
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -3120,165 +3083,21 @@
}
}
-static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
+static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
{
- if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
- (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
- pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
- pi->extended_control = 1;
- } else {
- if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
- pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
-
- pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
- pi->extended_control = 0;
- }
- pi->ack_win = pi->tx_win;
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
}
-static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
- struct hci_ext_fs *new,
- struct hci_ext_fs *agg)
+static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
{
- *agg = *cur;
- if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
- /* current flow spec has known rate */
- if ((new->max_sdu == 0xFFFF) ||
- (new->sdu_arr_time == 0xFFFFFFFF)) {
- /* new fs has unknown rate, so aggregate is unknown */
- agg->max_sdu = 0xFFFF;
- agg->sdu_arr_time = 0xFFFFFFFF;
- } else {
- /* new fs has known rate, so aggregate is known */
- u64 cur_rate;
- u64 new_rate;
- cur_rate = cur->max_sdu * 1000000ULL;
- if (cur->sdu_arr_time)
- cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
- new_rate = new->max_sdu * 1000000ULL;
- if (new->sdu_arr_time)
- new_rate = div_u64(new_rate, new->sdu_arr_time);
- cur_rate = cur_rate + new_rate;
- if (cur_rate)
- agg->sdu_arr_time = div64_u64(
- agg->max_sdu * 1000000ULL, cur_rate);
- }
- }
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
}
-static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
+static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
+ struct l2cap_conf_rfc *rfc)
{
- struct hci_ext_fs tx_fs;
- struct hci_ext_fs rx_fs;
-
- BT_DBG("chan %p", chan);
-
- if (((chan->tx_fs.max_sdu == 0xFFFF) ||
- (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
- ((chan->rx_fs.max_sdu == 0xFFFF) ||
- (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
- return 0;
-
- l2cap_aggregate_fs(&chan->tx_fs,
- (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
- l2cap_aggregate_fs(&chan->rx_fs,
- (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
- hci_chan_modify(chan, &tx_fs, &rx_fs);
- return 1;
-}
-
-static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
- struct hci_ext_fs *old,
- struct hci_ext_fs *agg)
-{
- *agg = *cur;
- if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
- u64 cur_rate;
- u64 old_rate;
- cur_rate = cur->max_sdu * 1000000ULL;
- if (cur->sdu_arr_time)
- cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
- old_rate = old->max_sdu * 1000000ULL;
- if (old->sdu_arr_time)
- old_rate = div_u64(old_rate, old->sdu_arr_time);
- cur_rate = cur_rate - old_rate;
- if (cur_rate)
- agg->sdu_arr_time = div64_u64(
- agg->max_sdu * 1000000ULL, cur_rate);
- }
-}
-
-static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
-{
- struct hci_ext_fs tx_fs;
- struct hci_ext_fs rx_fs;
-
- BT_DBG("chan %p", chan);
-
- if (((chan->tx_fs.max_sdu == 0xFFFF) ||
- (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
- ((chan->rx_fs.max_sdu == 0xFFFF) ||
- (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
- return 0;
-
- l2cap_deaggregate_fs(&chan->tx_fs,
- (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
- l2cap_deaggregate_fs(&chan->rx_fs,
- (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
- hci_chan_modify(chan, &tx_fs, &rx_fs);
- return 1;
-}
-
-static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct sock *sk)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct hci_dev *hdev;
- struct hci_conn *hcon;
- struct hci_chan *chan;
-
- hdev = hci_dev_get(amp_id);
- if (!hdev)
- return NULL;
-
- BT_DBG("hdev %s", hdev->name);
-
- hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
- if (!hcon) {
- chan = NULL;
- goto done;
- }
-
- chan = hci_chan_list_lookup_id(hdev, hcon->handle);
- if (chan) {
- l2cap_aggregate(chan, pi);
- sock_hold(sk);
- chan->l2cap_sk = sk;
- hci_chan_hold(chan);
- pi->ampchan = chan;
- goto done;
- }
-
- chan = hci_chan_add(hdev);
- if (chan) {
- chan->conn = hcon;
- sock_hold(sk);
- chan->l2cap_sk = sk;
- hci_chan_hold(chan);
- pi->ampchan = chan;
- hci_chan_create(chan,
- (struct hci_ext_fs *) &pi->local_fs,
- (struct hci_ext_fs *) &pi->remote_fs);
- }
-done:
- hci_dev_put(hdev);
- return chan;
-}
-
-static void l2cap_get_ertm_timeouts(struct l2cap_conf_rfc *rfc,
- struct l2cap_pinfo *pi)
-{
- if (pi->amp_id && pi->ampcon) {
- u64 ertm_to = pi->ampcon->hdev->amp_be_flush_to;
+ if (chan->local_amp_id && chan->hs_hcon) {
+ u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
/* Class 1 devices have must have ERTM timeouts
* exceeding the Link Supervision Timeout. The
@@ -3293,7 +3112,7 @@
*/
/* Convert timeout to milliseconds and round */
- ertm_to = div_u64(ertm_to + 999, 1000);
+ ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
/* This is the recommended formula for class 2 devices
* that start ERTM timers when packets are sent to the
@@ -3307,44 +3126,64 @@
rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
rfc->monitor_timeout = rfc->retrans_timeout;
} else {
- rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
}
}
-int l2cap_build_conf_req(struct sock *sk, void *data)
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+ __l2cap_ews_supported(chan)) {
+ /* use extended control field */
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ } else {
+ chan->tx_win = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ }
+ chan->ack_win = chan->tx_win;
+}
+
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+{
struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = pi->mode };
+ struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ u16 size;
- BT_DBG("sk %p mode %d", sk, pi->mode);
+ BT_DBG("chan %p", chan);
- if (pi->num_conf_req || pi->num_conf_rsp)
+ if (chan->num_conf_req || chan->num_conf_rsp)
goto done;
- switch (pi->mode) {
+ switch (chan->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
- if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
break;
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
/* fall through */
default:
- pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
+ chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
break;
}
done:
- if (pi->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+ if (chan->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
- switch (pi->mode) {
+ switch (chan->mode) {
case L2CAP_MODE_BASIC:
- if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
- !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
+ if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
+ !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
+
+ rfc.mode = L2CAP_MODE_BASIC;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
@@ -3352,150 +3191,95 @@
rfc.max_pdu_size = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc);
break;
case L2CAP_MODE_ERTM:
- l2cap_setup_txwin(pi);
- if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
- rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
- else
- rfc.txwin_size = pi->tx_win;
- rfc.max_transmit = pi->max_tx;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- l2cap_get_ertm_timeouts(&rfc, pi);
+ rfc.mode = L2CAP_MODE_ERTM;
+ rfc.max_transmit = chan->max_tx;
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
- rfc.max_pdu_size = cpu_to_le16(pi->imtu);
+ __l2cap_set_ertm_timeouts(chan, &rfc);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_txwin_setup(chan);
+
+ rfc.txwin_size = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc);
- if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
- pi->extended_control) {
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
- pi->tx_win);
- }
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
- if (pi->amp_id) {
- /* default best effort extended flow spec */
- struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
- sizeof(fs), (unsigned long) &fs);
- }
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
- if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
- break;
-
- if (pi->fcs == L2CAP_FCS_NONE ||
- pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
- pi->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
- }
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+ chan->fcs);
+ }
break;
case L2CAP_MODE_STREAMING:
- l2cap_setup_txwin(pi);
+ l2cap_txwin_setup(chan);
+ rfc.mode = L2CAP_MODE_STREAMING;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
- rfc.max_pdu_size = cpu_to_le16(pi->imtu);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc);
- if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
- pi->extended_control) {
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
- }
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
- if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
- break;
-
- if (pi->fcs == L2CAP_FCS_NONE ||
- pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
- pi->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
- }
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+ chan->fcs);
+ }
break;
}
- req->dcid = cpu_to_le16(pi->dcid);
- req->flags = cpu_to_le16(0);
+ req->dcid = cpu_to_le16(chan->dcid);
+ req->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
-
-static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = pi->mode };
- void *ptr = req->data;
-
- BT_DBG("sk %p", sk);
-
- switch (pi->mode) {
- case L2CAP_MODE_ERTM:
- rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = pi->tx_win;
- rfc.max_transmit = pi->max_tx;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- l2cap_get_ertm_timeouts(&rfc, pi);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
- rfc.max_pdu_size = cpu_to_le16(pi->imtu);
-
- break;
-
- default:
- return -ECONNREFUSED;
- }
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
-
- if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
- /* TODO assign fcs for br/edr based on socket config option */
- /* FCS is not used with AMP because it is redundant - lower
- * layers already include a checksum. */
- if (pi->amp_id)
- pi->local_conf.fcs = L2CAP_FCS_NONE;
- else
- pi->local_conf.fcs = L2CAP_FCS_CRC16;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->local_conf.fcs);
- pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
- }
-
- req->dcid = cpu_to_le16(pi->dcid);
- req->flags = cpu_to_le16(0);
-
- return ptr - data;
-}
-
-static int l2cap_parse_conf_req(struct sock *sk, void *data)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- void *req = pi->conf_req;
- int len = pi->conf_len;
+ void *req = chan->conf_req;
+ int len = chan->conf_len;
int type, hint, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
- struct l2cap_conf_ext_fs fs;
+ struct l2cap_conf_efs efs;
+ u8 remote_efs = 0;
u16 mtu = L2CAP_DEFAULT_MTU;
u16 result = L2CAP_CONF_SUCCESS;
+ u16 size;
- BT_DBG("sk %p", sk);
-
- if (pi->omtu > mtu)
- mtu = pi->omtu;
+ BT_DBG("chan %p", chan);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -3509,16 +3293,10 @@
break;
case L2CAP_CONF_FLUSH_TO:
- pi->flush_to = val;
- if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
- result = L2CAP_CONF_UNACCEPT;
- else
- pi->remote_conf.flush_to = val;
+ chan->flush_to = val;
break;
case L2CAP_CONF_QOS:
- if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
- result = L2CAP_CONF_UNACCEPT;
break;
case L2CAP_CONF_RFC:
@@ -3528,42 +3306,23 @@
case L2CAP_CONF_FCS:
if (val == L2CAP_FCS_NONE)
- pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
- pi->remote_conf.fcs = val;
+ set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
break;
- case L2CAP_CONF_EXT_FS:
- if (olen == sizeof(fs)) {
- pi->conf_state |= L2CAP_CONF_EFS_RECV;
- if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
- result = L2CAP_CONF_UNACCEPT;
- break;
- }
- memcpy(&fs, (void *) val, olen);
- if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
- result = L2CAP_CONF_FLOW_SPEC_REJECT;
- break;
- }
- pi->remote_conf.flush_to =
- le32_to_cpu(fs.flush_to);
- pi->remote_fs.id = fs.id;
- pi->remote_fs.type = fs.type;
- pi->remote_fs.max_sdu =
- le16_to_cpu(fs.max_sdu);
- pi->remote_fs.sdu_arr_time =
- le32_to_cpu(fs.sdu_arr_time);
- pi->remote_fs.acc_latency =
- le32_to_cpu(fs.acc_latency);
- pi->remote_fs.flush_to =
- le32_to_cpu(fs.flush_to);
- }
+ case L2CAP_CONF_EFS:
+ remote_efs = 1;
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *) val, olen);
break;
- case L2CAP_CONF_EXT_WINDOW:
- pi->extended_control = 1;
- pi->remote_tx_win = val;
- pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
- pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
+ case L2CAP_CONF_EWS:
+ if (!enable_hs)
+ return -ECONNREFUSED;
+
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ set_bit(CONF_EWS_RECV, &chan->conf_state);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ chan->remote_tx_win = val;
break;
default:
@@ -3576,87 +3335,129 @@
}
}
- if (pi->num_conf_rsp || pi->num_conf_req > 1)
+ if (chan->num_conf_rsp || chan->num_conf_req > 1)
goto done;
- switch (pi->mode) {
+ switch (chan->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
- if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
- pi->mode = l2cap_select_mode(rfc.mode,
- pi->conn->feat_mask);
+ if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
+ chan->mode = l2cap_select_mode(rfc.mode,
+ chan->conn->feat_mask);
break;
}
- if (pi->mode != rfc.mode)
+ if (remote_efs) {
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+ else
+ return -ECONNREFUSED;
+ }
+
+ if (chan->mode != rfc.mode)
return -ECONNREFUSED;
break;
}
done:
- if (pi->mode != rfc.mode) {
+ if (chan->mode != rfc.mode) {
result = L2CAP_CONF_UNACCEPT;
- rfc.mode = pi->mode;
- if (mtu > L2CAP_DEFAULT_MTU)
- pi->omtu = mtu;
+ rfc.mode = chan->mode;
- if (pi->num_conf_rsp == 1)
+ if (chan->num_conf_rsp == 1)
return -ECONNREFUSED;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
}
-
- if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
- !(pi->conf_state & L2CAP_CONF_EFS_RECV))
- return -ECONNREFUSED;
-
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
- if (mtu < L2CAP_DEFAULT_MIN_MTU) {
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
result = L2CAP_CONF_UNACCEPT;
- pi->omtu = L2CAP_DEFAULT_MIN_MTU;
- } else {
- pi->omtu = mtu;
- pi->conf_state |= L2CAP_CONF_MTU_DONE;
+ else {
+ chan->omtu = mtu;
+ set_bit(CONF_MTU_DONE, &chan->conf_state);
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype) {
+
+ result = L2CAP_CONF_UNACCEPT;
+
+ if (chan->num_conf_req >= 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+ set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ }
+ }
switch (rfc.mode) {
case L2CAP_MODE_BASIC:
- pi->fcs = L2CAP_FCS_NONE;
- pi->conf_state |= L2CAP_CONF_MODE_DONE;
+ chan->fcs = L2CAP_FCS_NONE;
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
break;
case L2CAP_MODE_ERTM:
- if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
- pi->remote_tx_win = rfc.txwin_size;
- pi->remote_max_tx = rfc.max_transmit;
- pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
- l2cap_get_ertm_timeouts(&rfc, pi);
+ if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+ chan->remote_tx_win = rfc.txwin_size;
+ else
+ rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
- pi->conf_state |= L2CAP_CONF_MODE_DONE;
+ chan->remote_max_tx = rfc.max_transmit;
+
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
+
+ __l2cap_set_ertm_timeouts(chan, &rfc);
+
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc);
- if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
- sizeof(fs), (unsigned long) &fs);
-
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+ chan->remote_stype = efs.stype;
+ chan->remote_msdu = le16_to_cpu(efs.msdu);
+ chan->remote_flush_to =
+ le32_to_cpu(efs.flush_to);
+ chan->remote_acc_lat =
+ le32_to_cpu(efs.acc_lat);
+ chan->remote_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ }
break;
case L2CAP_MODE_STREAMING:
- pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
- pi->conf_state |= L2CAP_CONF_MODE_DONE;
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
break;
@@ -3664,183 +3465,30 @@
result = L2CAP_CONF_UNACCEPT;
memset(&rfc, 0, sizeof(rfc));
- rfc.mode = pi->mode;
- }
-
- if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
- !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
- pi->conf_state |= L2CAP_CONF_PEND_SENT;
- result = L2CAP_CONF_PENDING;
-
- if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
- pi->amp_id) {
- struct hci_chan *chan;
- /* Trigger logical link creation only on AMP */
-
- chan = l2cap_chan_admit(pi->amp_id, sk);
- if (!chan)
- return -ECONNREFUSED;
-
- if (chan->state == BT_CONNECTED)
- l2cap_create_cfm(chan, 0);
- }
+ rfc.mode = chan->mode;
}
if (result == L2CAP_CONF_SUCCESS)
- pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
}
- rsp->scid = cpu_to_le16(pi->dcid);
+ rsp->scid = cpu_to_le16(chan->dcid);
rsp->result = cpu_to_le16(result);
- rsp->flags = cpu_to_le16(0x0000);
+ rsp->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
-static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
+static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ void *data, u16 *result)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_conf_rsp *rsp = data;
- void *ptr = rsp->data;
- void *req = pi->conf_req;
- int len = pi->conf_len;
- int type, hint, olen;
- unsigned long val;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
- struct l2cap_conf_ext_fs fs;
- u16 mtu = pi->omtu;
- u16 tx_win = pi->remote_tx_win;
- u16 result = L2CAP_CONF_SUCCESS;
-
- BT_DBG("sk %p", sk);
-
- while (len >= L2CAP_CONF_OPT_SIZE) {
- len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
-
- hint = type & L2CAP_CONF_HINT;
- type &= L2CAP_CONF_MASK;
-
- switch (type) {
- case L2CAP_CONF_MTU:
- mtu = val;
- break;
-
- case L2CAP_CONF_FLUSH_TO:
- if (pi->amp_move_id)
- result = L2CAP_CONF_UNACCEPT;
- else
- pi->remote_conf.flush_to = val;
- break;
-
- case L2CAP_CONF_QOS:
- if (pi->amp_move_id)
- result = L2CAP_CONF_UNACCEPT;
- break;
-
- case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *) val, olen);
- break;
-
- case L2CAP_CONF_FCS:
- pi->remote_conf.fcs = val;
- break;
-
- case L2CAP_CONF_EXT_FS:
- if (olen == sizeof(fs)) {
- memcpy(&fs, (void *) val, olen);
- if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
- result = L2CAP_CONF_FLOW_SPEC_REJECT;
- else {
- pi->remote_conf.flush_to =
- le32_to_cpu(fs.flush_to);
- }
- }
- break;
-
- case L2CAP_CONF_EXT_WINDOW:
- tx_win = val;
- break;
-
- default:
- if (hint)
- break;
-
- result = L2CAP_CONF_UNKNOWN;
- *((u8 *) ptr++) = type;
- break;
- }
- }
-
- BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
- result, pi->mode, rfc.mode);
-
- if (pi->mode != rfc.mode || rfc.mode == L2CAP_MODE_BASIC)
- result = L2CAP_CONF_UNACCEPT;
-
- if (result == L2CAP_CONF_SUCCESS) {
- /* Configure output options and let the other side know
- * which ones we don't like. */
-
- /* Don't allow mtu to decrease. */
- if (mtu < pi->omtu)
- result = L2CAP_CONF_UNACCEPT;
-
- BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
-
- /* Don't allow extended transmit window to change. */
- if (tx_win != pi->remote_tx_win) {
- result = L2CAP_CONF_UNACCEPT;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
- pi->remote_tx_win);
- }
-
- pi->remote_mps = rfc.max_pdu_size;
-
- if (rfc.mode == L2CAP_MODE_ERTM) {
- l2cap_get_ertm_timeouts(&rfc, pi);
- } else {
- rfc.retrans_timeout = 0;
- rfc.monitor_timeout = 0;
- }
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
- }
-
- if (result != L2CAP_CONF_SUCCESS)
- goto done;
-
- pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs;
-
- if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG)
- pi->flush_to = pi->remote_conf.flush_to;
-
-done:
- rsp->scid = cpu_to_le16(pi->dcid);
- rsp->result = cpu_to_le16(result);
- rsp->flags = cpu_to_le16(0x0000);
-
- return ptr - data;
-}
-
-static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
void *ptr = req->data;
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
- BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
-
- /* Initialize rfc in case no rfc option is received */
- rfc.mode = pi->mode;
- rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -3849,103 +3497,159 @@
case L2CAP_CONF_MTU:
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
- pi->imtu = L2CAP_DEFAULT_MIN_MTU;
+ chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
- pi->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+ chan->imtu = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
break;
case L2CAP_CONF_FLUSH_TO:
- pi->flush_to = val;
+ chan->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, pi->flush_to);
+ 2, chan->flush_to);
break;
case L2CAP_CONF_RFC:
if (olen == sizeof(rfc))
memcpy(&rfc, (void *)val, olen);
- if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
- rfc.mode != pi->mode)
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+ rfc.mode != chan->mode)
return -ECONNREFUSED;
- pi->fcs = 0;
+ chan->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc);
break;
- case L2CAP_CONF_EXT_WINDOW:
- pi->ack_win = min_t(u16, val, pi->ack_win);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
- 2, pi->tx_win);
+ case L2CAP_CONF_EWS:
+ chan->ack_win = min_t(u16, val, chan->ack_win);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
break;
- default:
+ case L2CAP_CONF_EFS:
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *)val, olen);
+
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+ break;
+
+ case L2CAP_CONF_FCS:
+ if (*result == L2CAP_CONF_PENDING)
+ if (val == L2CAP_FCS_NONE)
+ set_bit(CONF_RECV_NO_FCS,
+ &chan->conf_state);
break;
}
}
- if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
+ if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
return -ECONNREFUSED;
- pi->mode = rfc.mode;
+ chan->mode = rfc.mode;
- if (*result == L2CAP_CONF_SUCCESS) {
+ if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- if (!pi->extended_control) {
- pi->ack_win = min_t(u16, pi->ack_win,
- rfc.txwin_size);
+ chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->local_msdu = le16_to_cpu(efs.msdu);
+ chan->local_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+ chan->local_flush_to =
+ le32_to_cpu(efs.flush_to);
}
break;
+
case L2CAP_MODE_STREAMING:
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
- req->dcid = cpu_to_le16(pi->dcid);
- req->flags = cpu_to_le16(0x0000);
+ req->dcid = cpu_to_le16(chan->dcid);
+ req->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
-static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
+static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
+ u16 result, u16 flags)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- BT_DBG("sk %p", sk);
+ BT_DBG("chan %p", chan);
- rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp->scid = cpu_to_le16(chan->dcid);
rsp->result = cpu_to_le16(result);
rsp->flags = cpu_to_le16(flags);
return ptr - data;
}
-static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
+void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_conn *conn = chan->conn;
+ u8 buf[128];
+ u8 rsp_code;
+
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+
+ if (chan->hs_hcon)
+ rsp_code = L2CAP_CREATE_CHAN_RSP;
+ else
+ rsp_code = L2CAP_CONN_RSP;
+
+ BT_DBG("chan %p rsp_code %u", chan, rsp_code);
+
+ l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
+
+ if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
+ return;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+}
+
+static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
+{
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
- u16 txwin_ext = pi->ack_win;
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC or extended window size option.
+ */
+ u16 txwin_ext = chan->ack_win;
+ struct l2cap_conf_rfc rfc = {
+ .mode = chan->mode,
+ .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
+ .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
+ .max_pdu_size = cpu_to_le16(chan->imtu),
+ .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
+ };
- BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+ BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
- /* Initialize rfc in case no rfc option is received */
- rfc.mode = pi->mode;
- rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- rfc.txwin_size = min_t(u16, pi->ack_win, L2CAP_DEFAULT_TX_WINDOW);
-
- if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
+ if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
return;
while (len >= L2CAP_CONF_OPT_SIZE) {
@@ -3956,7 +3660,7 @@
if (olen == sizeof(rfc))
memcpy(&rfc, (void *)val, olen);
break;
- case L2CAP_CONF_EXT_WINDOW:
+ case L2CAP_CONF_EWS:
txwin_ext = val;
break;
}
@@ -3964,146 +3668,35 @@
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- if (pi->extended_control)
- pi->ack_win = min_t(u16, pi->ack_win, txwin_ext);
+ chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
else
- pi->ack_win = min_t(u16, pi->ack_win, rfc.txwin_size);
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
break;
case L2CAP_MODE_STREAMING:
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
-static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
+static inline int l2cap_command_rej(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- int type, olen;
- unsigned long val;
- struct l2cap_conf_ext_fs fs;
+ struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
- BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+ if (cmd_len < sizeof(*rej))
+ return -EPROTO;
- while (len >= L2CAP_CONF_OPT_SIZE) {
- len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
- if ((type == L2CAP_CONF_EXT_FS) &&
- (olen == sizeof(struct l2cap_conf_ext_fs))) {
- memcpy(&fs, (void *)val, olen);
- pi->local_fs.id = fs.id;
- pi->local_fs.type = fs.type;
- pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
- pi->local_fs.sdu_arr_time =
- le32_to_cpu(fs.sdu_arr_time);
- pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
- pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
- break;
- }
- }
-
-}
-
-static int l2cap_finish_amp_move(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
- int err;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
- pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
-
- if (pi->ampcon)
- pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
- else
- pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
-
- err = l2cap_setup_resegment(sk);
-
- return err;
-}
-
-static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
- u16 result)
-{
- int err = 0;
- struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
-
- if (pi->reconf_state == L2CAP_RECONF_NONE)
- return -ECONNREFUSED;
-
- if (result == L2CAP_CONF_SUCCESS) {
- while (len >= L2CAP_CONF_OPT_SIZE) {
- int type, olen;
- unsigned long val;
-
- len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
-
- if (type == L2CAP_CONF_RFC) {
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
-
- if (rfc.mode != pi->mode) {
- l2cap_send_disconn_req(pi->conn, sk,
- ECONNRESET);
- return -ECONNRESET;
- }
-
- goto done;
- }
- }
- }
-
- BT_ERR("Expected RFC option was missing, using existing values");
-
- rfc.mode = pi->mode;
- rfc.retrans_timeout = cpu_to_le16(pi->retrans_timeout);
- rfc.monitor_timeout = cpu_to_le16(pi->monitor_timeout);
-
-done:
- l2cap_ertm_stop_ack_timer(pi);
- l2cap_ertm_stop_retrans_timer(pi);
- l2cap_ertm_stop_monitor_timer(pi);
-
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- if (pi->mode == L2CAP_MODE_ERTM) {
- pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- }
-
- if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
- l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
-
- /* Respond to poll */
- err = l2cap_answer_move_poll(sk);
- } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
- if (pi->mode == L2CAP_MODE_ERTM) {
- l2cap_ertm_tx(sk, NULL, NULL,
- L2CAP_ERTM_EVENT_EXPLICIT_POLL);
- pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
- }
- }
-
- return err;
-}
-
-
-static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
-
- if (rej->reason != 0x0000)
+ if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
return 0;
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
- cmd->ident == conn->info_ident) {
- del_timer(&conn->info_timer);
+ cmd->ident == conn->info_ident) {
+ cancel_delayed_work(&conn->info_timer);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -4114,115 +3707,104 @@
return 0;
}
-static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u8 *data, u8 rsp_code,
- u8 amp_id)
+static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u8 *data, u8 rsp_code, u8 amp_id)
{
- struct l2cap_chan_list *list = &conn->chan_list;
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
+ struct l2cap_chan *chan = NULL, *pchan;
struct sock *parent, *sk = NULL;
int result, status = L2CAP_CS_NO_INFO;
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
__le16 psm = req->psm;
- BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
/* Check if we have socket listening on psm */
- parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
- if (!parent) {
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
+ if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
}
- bh_lock_sock(parent);
+ parent = pchan->sk;
+
+ mutex_lock(&conn->chan_lock);
+ lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
- if (psm != cpu_to_le16(0x0001) &&
- !hci_conn_check_link_mode(conn->hcon)) {
- conn->disc_reason = 0x05;
+ if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
+ !hci_conn_check_link_mode(conn->hcon)) {
+ conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
goto response;
}
result = L2CAP_CR_NO_MEM;
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
- goto response;
- }
-
- sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
- if (!sk)
- goto response;
-
- write_lock_bh(&list->lock);
-
/* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(list, scid)) {
- write_unlock_bh(&list->lock);
- sock_set_flag(sk, SOCK_ZAPPED);
- l2cap_sock_kill(sk);
- sk = NULL;
+ if (__l2cap_get_chan_by_dcid(conn, scid))
goto response;
- }
+
+ chan = pchan->ops->new_connection(pchan);
+ if (!chan)
+ goto response;
+
+ sk = chan->sk;
hci_conn_hold(conn->hcon);
- l2cap_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
- l2cap_pi(sk)->psm = psm;
- l2cap_pi(sk)->dcid = scid;
+ chan->psm = psm;
+ chan->dcid = scid;
+ chan->local_amp_id = amp_id;
- bt_accept_enqueue(parent, sk);
+ __l2cap_chan_add(conn, chan);
- __l2cap_chan_add(conn, sk);
- dcid = l2cap_pi(sk)->scid;
- l2cap_pi(sk)->amp_id = amp_id;
+ dcid = chan->scid;
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+ __set_chan_timer(chan, sk->sk_sndtimeo);
- l2cap_pi(sk)->ident = cmd->ident;
+ chan->ident = cmd->ident;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
- if (l2cap_check_security(sk)) {
- if (bt_sk(sk)->defer_setup) {
- sk->sk_state = BT_CONNECT2;
+ if (l2cap_chan_check_security(chan)) {
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
- parent->sk_data_ready(parent, 0);
+ chan->ops->defer(chan);
} else {
/* Force pending result for AMP controllers.
* The connection will succeed after the
- * physical link is up. */
+ * physical link is up.
+ */
if (amp_id) {
- sk->sk_state = BT_CONNECT2;
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
} else {
- sk->sk_state = BT_CONFIG;
+ __l2cap_state_change(chan, BT_CONFIG);
result = L2CAP_CR_SUCCESS;
}
status = L2CAP_CS_NO_INFO;
}
} else {
- sk->sk_state = BT_CONNECT2;
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
}
} else {
- sk->sk_state = BT_CONNECT2;
+ __l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_NO_INFO;
}
- write_unlock_bh(&list->lock);
-
response:
- bh_unlock_sock(parent);
+ release_sock(parent);
+ mutex_unlock(&conn->chan_lock);
sendresp:
rsp.scid = cpu_to_le16(scid);
@@ -4231,455 +3813,477 @@
rsp.status = cpu_to_le16(status);
l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
- if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
+ if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
struct l2cap_info_req info;
- info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- mod_timer(&conn->info_timer, jiffies +
- msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
- l2cap_send_cmd(conn, conn->info_ident,
- L2CAP_INFO_REQ, sizeof(info), &info);
+ l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+ sizeof(info), &info);
}
- if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
- result == L2CAP_CR_SUCCESS) {
+ if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ result == L2CAP_CR_SUCCESS) {
u8 buf[128];
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
}
- return sk;
+ return chan;
}
-static inline int l2cap_connect_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+static int l2cap_connect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
- l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+
+ if (cmd_len < sizeof(struct l2cap_conn_req))
+ return -EPROTO;
+
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+ mgmt_device_connected(hdev, &hcon->dst, hcon->type,
+ hcon->dst_type, 0, NULL, 0,
+ hcon->dev_class);
+ hci_dev_unlock(hdev);
+
+ l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
return 0;
}
-static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
u16 scid, dcid, result, status;
- struct sock *sk;
+ struct l2cap_chan *chan;
u8 req[128];
+ int err;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
result = __le16_to_cpu(rsp->result);
status = __le16_to_cpu(rsp->status);
- BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ dcid, scid, result, status);
+
+ mutex_lock(&conn->chan_lock);
if (scid) {
- sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
- if (!sk)
- return -EFAULT;
+ chan = __l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+ err = -EFAULT;
+ goto unlock;
+ }
} else {
- sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
- if (!sk)
- return -EFAULT;
+ chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+ if (!chan) {
+ err = -EFAULT;
+ goto unlock;
+ }
}
+ err = 0;
+
+ l2cap_chan_lock(chan);
+
switch (result) {
case L2CAP_CR_SUCCESS:
- sk->sk_state = BT_CONFIG;
- l2cap_pi(sk)->ident = 0;
- l2cap_pi(sk)->dcid = dcid;
- l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
+ l2cap_state_change(chan, BT_CONFIG);
+ chan->ident = 0;
+ chan->dcid = dcid;
+ clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
+ if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
break;
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, req), req);
- l2cap_pi(sk)->num_conf_req++;
+ l2cap_build_conf_req(chan, req), req);
+ chan->num_conf_req++;
break;
case L2CAP_CR_PEND:
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+ set_bit(CONF_CONNECT_PEND, &chan->conf_state);
break;
default:
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 5);
- break;
- }
-
- l2cap_chan_del(sk, ECONNREFUSED);
+ l2cap_chan_del(chan, ECONNREFUSED);
break;
}
- bh_unlock_sock(sk);
- return 0;
+ l2cap_chan_unlock(chan);
+
+unlock:
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
}
-static inline void set_default_fcs(struct l2cap_pinfo *pi)
+static inline void set_default_fcs(struct l2cap_chan *chan)
{
/* FCS is enabled only in ERTM or streaming mode, if one or both
* sides request it.
*/
- if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
- pi->fcs = L2CAP_FCS_NONE;
- else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
- pi->fcs = L2CAP_FCS_CRC16;
+ if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
+ chan->fcs = L2CAP_FCS_NONE;
+ else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
+ chan->fcs = L2CAP_FCS_CRC16;
}
-static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
+ u8 ident, u16 flags)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
+ flags);
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, data,
+ L2CAP_CONF_SUCCESS, flags), data);
+}
+
+static inline int l2cap_config_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
u16 dcid, flags;
- u8 rspbuf[64];
- struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
- struct sock *sk;
- int len;
- u8 amp_move_reconf = 0;
+ u8 rsp[64];
+ struct l2cap_chan *chan;
+ int len, err = 0;
+
+ if (cmd_len < sizeof(*req))
+ return -EPROTO;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
- sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
- if (!sk)
+ chan = l2cap_get_chan_by_scid(conn, dcid);
+ if (!chan)
return -ENOENT;
- BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
- "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
- sk->sk_state, l2cap_pi(sk)->rx_state,
- l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
- l2cap_pi(sk)->amp_move_id);
+ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
+ struct l2cap_cmd_rej_cid rej;
- /* Detect a reconfig request due to channel move between
- * BR/EDR and AMP
- */
- if (sk->sk_state == BT_CONNECTED &&
- l2cap_pi(sk)->rx_state ==
- L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
- l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
+ rej.scid = cpu_to_le16(chan->scid);
+ rej.dcid = cpu_to_le16(chan->dcid);
- if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
- amp_move_reconf = 1;
-
- if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
- struct l2cap_cmd_rej rej;
-
- rej.reason = cpu_to_le16(0x0002);
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
- sizeof(rej), &rej);
+ sizeof(rej), &rej);
goto unlock;
}
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
+ if (chan->conf_len + len > sizeof(chan->conf_req)) {
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(sk, rspbuf,
- L2CAP_CONF_REJECT, flags), rspbuf);
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_REJECT, flags), rsp);
goto unlock;
}
/* Store config. */
- memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
- l2cap_pi(sk)->conf_len += len;
+ memcpy(chan->conf_req + chan->conf_len, req->data, len);
+ chan->conf_len += len;
- if (flags & 0x0001) {
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
/* Incomplete config. Send empty response. */
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(sk, rspbuf,
- L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_SUCCESS, flags), rsp);
goto unlock;
}
/* Complete config. */
- if (!amp_move_reconf)
- len = l2cap_parse_conf_req(sk, rspbuf);
- else
- len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
-
+ len = l2cap_parse_conf_req(chan, rsp);
if (len < 0) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ l2cap_send_disconn_req(chan, ECONNRESET);
goto unlock;
}
- l2cap_pi(sk)->conf_ident = cmd->ident;
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
- rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
- !l2cap_pi(sk)->amp_id) {
- /* Send success response right after pending if using
- * lockstep config on BR/EDR
- */
- rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
- }
+ chan->ident = cmd->ident;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
+ chan->num_conf_rsp++;
/* Reset config buffer. */
- l2cap_pi(sk)->conf_len = 0;
+ chan->conf_len = 0;
- if (amp_move_reconf)
+ if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
goto unlock;
- l2cap_pi(sk)->num_conf_rsp++;
+ if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
+ set_default_fcs(chan);
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
- goto unlock;
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- set_default_fcs(l2cap_pi(sk));
+ if (err < 0)
+ l2cap_send_disconn_req(chan, -err);
+ else
+ l2cap_chan_ready(chan);
- sk->sk_state = BT_CONNECTED;
-
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
- l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
- l2cap_ertm_init(sk);
-
- l2cap_chan_ready(sk);
goto unlock;
}
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
+ if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
u8 buf[64];
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+ }
+
+ /* Got Conf Rsp PENDING from remote side and asume we sent
+ Conf Rsp PENDING in the code above */
+ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+ test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+ /* check compatibility */
+
+ /* Send rsp for BR/EDR channel */
+ if (!chan->hs_hcon)
+ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
+ else
+ chan->ident = cmd->ident;
}
unlock:
- bh_unlock_sock(sk);
- return 0;
+ l2cap_chan_unlock(chan);
+ return err;
}
-static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
- struct sock *sk;
- struct l2cap_pinfo *pi;
- int len = cmd->len - sizeof(*rsp);
+ struct l2cap_chan *chan;
+ int len = cmd_len - sizeof(*rsp);
+ int err = 0;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
- BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
- scid, flags, result);
+ BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
+ result, len);
- sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
- if (!sk)
+ chan = l2cap_get_chan_by_scid(conn, scid);
+ if (!chan)
return 0;
- pi = l2cap_pi(sk);
-
- if (pi->reconf_state != L2CAP_RECONF_NONE) {
- l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
- goto done;
- }
-
switch (result) {
case L2CAP_CONF_SUCCESS:
- if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
- !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
- /* Lockstep procedure requires a pending response
- * before success.
- */
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
- goto done;
- }
-
- l2cap_conf_rfc_get(sk, rsp->data, len);
+ l2cap_conf_rfc_get(chan, rsp->data, len);
+ clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
break;
case L2CAP_CONF_PENDING:
- if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
- goto done;
- }
+ set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
- l2cap_conf_rfc_get(sk, rsp->data, len);
+ if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+ char buf[64];
- pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
-
- l2cap_conf_ext_fs_get(sk, rsp->data, len);
-
- if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
- struct hci_chan *chan;
-
- /* Already sent a 'pending' response, so set up
- * the logical link now
- */
- chan = l2cap_chan_admit(pi->amp_id, sk);
- if (!chan) {
- l2cap_send_disconn_req(pi->conn, sk,
- ECONNRESET);
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ buf, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
}
- if (chan->state == BT_CONNECTED)
- l2cap_create_cfm(chan, 0);
+ if (!chan->hs_hcon) {
+ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
+ 0);
+ } else {
+ if (l2cap_check_efs(chan)) {
+ amp_create_logical_link(chan);
+ chan->ident = cmd->ident;
+ }
+ }
}
-
goto done;
case L2CAP_CONF_UNACCEPT:
- if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
+ if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64];
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
}
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
- len = l2cap_parse_conf_rsp(sk, rsp->data,
- len, req, &result);
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ req, &result);
if (len < 0) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
}
l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_CONF_REQ, len, req);
- pi->num_conf_req++;
+ L2CAP_CONF_REQ, len, req);
+ chan->num_conf_req++;
if (result != L2CAP_CONF_SUCCESS)
goto done;
break;
}
default:
- sk->sk_err = ECONNRESET;
- l2cap_sock_set_timer(sk, HZ * 5);
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ l2cap_chan_set_err(chan, ECONNRESET);
+
+ __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
+ l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
}
- if (flags & 0x01)
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION)
goto done;
- pi->conf_state |= L2CAP_CONF_INPUT_DONE;
+ set_bit(CONF_INPUT_DONE, &chan->conf_state);
- if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
- set_default_fcs(pi);
+ if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
+ set_default_fcs(chan);
- sk->sk_state = BT_CONNECTED;
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
- if (pi->mode == L2CAP_MODE_ERTM ||
- pi->mode == L2CAP_MODE_STREAMING)
- l2cap_ertm_init(sk);
-
- l2cap_chan_ready(sk);
+ if (err < 0)
+ l2cap_send_disconn_req(chan, -err);
+ else
+ l2cap_chan_ready(chan);
}
done:
- bh_unlock_sock(sk);
- return 0;
+ l2cap_chan_unlock(chan);
+ return err;
}
-static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
struct l2cap_disconn_rsp rsp;
u16 dcid, scid;
+ struct l2cap_chan *chan;
struct sock *sk;
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
scid = __le16_to_cpu(req->scid);
dcid = __le16_to_cpu(req->dcid);
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
- sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
- if (!sk)
- return 0;
+ mutex_lock(&conn->chan_lock);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ chan = __l2cap_get_chan_by_scid(conn, dcid);
+ if (!chan) {
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
+ l2cap_chan_lock(chan);
+
+ sk = chan->sk;
+
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.scid = cpu_to_le16(chan->dcid);
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
- /* Only do cleanup if a disconnect request was not sent already */
- if (sk->sk_state != BT_DISCONN) {
- sk->sk_shutdown = SHUTDOWN_MASK;
+ lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ release_sock(sk);
- sk->sk_send_head = NULL;
- skb_queue_purge(TX_QUEUE(sk));
+ l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, ECONNRESET);
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
- skb_queue_purge(SREJ_QUEUE(sk));
+ l2cap_chan_unlock(chan);
- __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
- __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
- __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
- }
- }
+ chan->ops->close(chan);
+ l2cap_chan_put(chan);
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- return 0;
- }
+ mutex_unlock(&conn->chan_lock);
- l2cap_chan_del(sk, ECONNRESET);
-
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
return 0;
}
-static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
u16 dcid, scid;
- struct sock *sk;
+ struct l2cap_chan *chan;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
- sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
- if (!sk)
- return 0;
+ mutex_lock(&conn->chan_lock);
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
+ chan = __l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+ mutex_unlock(&conn->chan_lock);
return 0;
}
- l2cap_chan_del(sk, 0);
- bh_unlock_sock(sk);
+ l2cap_chan_lock(chan);
- l2cap_sock_kill(sk);
+ l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, 0);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
+
return 0;
}
-static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_information_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_info_req *req = (struct l2cap_info_req *) data;
u16 type;
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
type = __le16_to_cpu(req->type);
BT_DBG("type 0x%4.4x", type);
@@ -4688,42 +4292,53 @@
u8 buf[8];
u32 feat_mask = l2cap_feat_mask;
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
- | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
+ | L2CAP_FEAT_FCS;
+ if (enable_hs)
+ feat_mask |= L2CAP_FEAT_EXT_FLOW
+ | L2CAP_FEAT_EXT_WINDOW;
+
put_unaligned_le32(feat_mask, rsp->data);
- l2cap_send_cmd(conn, cmd->ident,
- L2CAP_INFO_RSP, sizeof(buf), buf);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+ buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
u8 buf[12];
- u8 fc_mask = l2cap_fc_mask;
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+
if (enable_hs)
- fc_mask |= L2CAP_FC_A2MP;
- memset(rsp->data, 0, 8);
- rsp->data[0] = fc_mask;
- l2cap_send_cmd(conn, cmd->ident,
- L2CAP_INFO_RSP, sizeof(buf), buf);
+ l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+ else
+ l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
+ rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
+ memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+ buf);
} else {
struct l2cap_info_rsp rsp;
rsp.type = cpu_to_le16(type);
- rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
- l2cap_send_cmd(conn, cmd->ident,
- L2CAP_INFO_RSP, sizeof(rsp), &rsp);
+ rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
+ &rsp);
}
return 0;
}
-static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
u16 type, result;
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
type = __le16_to_cpu(rsp->type);
result = __le16_to_cpu(rsp->result);
@@ -4731,10 +4346,10 @@
/* L2CAP Info req/rsp are unbound to channels, add extra checks */
if (cmd->ident != conn->info_ident ||
- conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
+ conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
return 0;
- del_timer(&conn->info_timer);
+ cancel_delayed_work(&conn->info_timer);
if (result != L2CAP_IR_SUCCESS) {
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -4745,964 +4360,780 @@
return 0;
}
- if (type == L2CAP_IT_FEAT_MASK) {
+ switch (type) {
+ case L2CAP_IT_FEAT_MASK:
conn->feat_mask = get_unaligned_le32(rsp->data);
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
conn->info_ident = l2cap_get_ident(conn);
l2cap_send_cmd(conn, conn->info_ident,
- L2CAP_INFO_REQ, sizeof(req), &req);
+ L2CAP_INFO_REQ, sizeof(req), &req);
} else {
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
l2cap_conn_start(conn);
}
- } else if (type == L2CAP_IT_FIXED_CHAN) {
- conn->fc_mask = rsp->data[0];
+ break;
+
+ case L2CAP_IT_FIXED_CHAN:
+ conn->fixed_chan_mask = rsp->data[0];
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
l2cap_conn_start(conn);
+ break;
}
return 0;
}
-static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
- struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
+static int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_create_chan_req *req = data;
+ struct l2cap_create_chan_rsp rsp;
+ struct l2cap_chan *chan;
+ struct hci_dev *hdev;
+ u16 psm, scid;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
+
+ /* For controller id 0 make BR/EDR connection */
+ if (req->amp_id == HCI_BREDR_ID) {
+ l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+ req->amp_id);
+ return 0;
+ }
+
+ /* Validate AMP controller id */
+ hdev = hci_dev_get(req->amp_id);
+ if (!hdev)
+ goto error;
+
+ if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
+ hci_dev_put(hdev);
+ goto error;
+ }
+
+ chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+ req->amp_id);
+ if (chan) {
+ struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ struct hci_conn *hs_hcon;
+
+ hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
+ if (!hs_hcon) {
+ hci_dev_put(hdev);
+ return -EFAULT;
+ }
+
+ BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
+
+ mgr->bredr_chan = chan;
+ chan->hs_hcon = hs_hcon;
+ chan->fcs = L2CAP_FCS_NONE;
+ conn->mtu = hdev->block_mtu;
+ }
+
+ hci_dev_put(hdev);
+
+ return 0;
+
+error:
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ return -EFAULT;
+}
+
+static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
{
struct l2cap_move_chan_req req;
u8 ident;
- BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
- (int) dest_amp_id);
+ BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
- ident = l2cap_get_ident(conn);
- if (pi)
- pi->ident = ident;
+ ident = l2cap_get_ident(chan->conn);
+ chan->ident = ident;
- req.icid = cpu_to_le16(icid);
+ req.icid = cpu_to_le16(chan->scid);
req.dest_amp_id = dest_amp_id;
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
+ l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
+ &req);
+
+ __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
}
-static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid, u16 result)
+static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
{
struct l2cap_move_chan_rsp rsp;
- BT_DBG("icid %d, result %d", (int) icid, (int) result);
+ BT_DBG("chan %p, result 0x%4.4x", chan, result);
- rsp.icid = cpu_to_le16(icid);
+ rsp.icid = cpu_to_le16(chan->dcid);
rsp.result = cpu_to_le16(result);
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
+ sizeof(rsp), &rsp);
}
-static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
- struct l2cap_pinfo *pi, u16 icid, u16 result)
+static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
{
struct l2cap_move_chan_cfm cfm;
- u8 ident;
- BT_DBG("icid %d, result %d", (int) icid, (int) result);
+ BT_DBG("chan %p, result 0x%4.4x", chan, result);
- ident = l2cap_get_ident(conn);
- if (pi)
- pi->ident = ident;
+ chan->ident = l2cap_get_ident(chan->conn);
- cfm.icid = cpu_to_le16(icid);
+ cfm.icid = cpu_to_le16(chan->scid);
cfm.result = cpu_to_le16(result);
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
+ sizeof(cfm), &cfm);
+
+ __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
+}
+
+static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
+{
+ struct l2cap_move_chan_cfm cfm;
+
+ BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
+
+ cfm.icid = cpu_to_le16(icid);
+ cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
+ sizeof(cfm), &cfm);
}
static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
+ u16 icid)
{
struct l2cap_move_chan_cfm_rsp rsp;
- BT_DBG("icid %d", (int) icid);
+ BT_DBG("icid 0x%4.4x", icid);
rsp.icid = cpu_to_le16(icid);
l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
}
-static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+static void __release_logical_link(struct l2cap_chan *chan)
{
- struct l2cap_create_chan_req *req =
- (struct l2cap_create_chan_req *) data;
- struct sock *sk;
- u16 psm, scid;
+ chan->hs_hchan = NULL;
+ chan->hs_hcon = NULL;
- psm = le16_to_cpu(req->psm);
- scid = le16_to_cpu(req->scid);
-
- BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
- (int) req->amp_id);
-
- if (req->amp_id) {
- struct hci_dev *hdev;
-
- /* Validate AMP controller id */
- hdev = hci_dev_get(req->amp_id);
- if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
- struct l2cap_create_chan_rsp rsp;
-
- rsp.dcid = 0;
- rsp.scid = cpu_to_le16(scid);
- rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
- rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- if (hdev)
- hci_dev_put(hdev);
-
- return 0;
- }
-
- hci_dev_put(hdev);
- }
-
- sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
-
- if (sk)
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
-
- if (sk && req->amp_id &&
- (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
- amp_accept_physical(conn, req->amp_id, sk);
-
- return 0;
+ /* Placeholder - release the logical link */
}
-static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+static void l2cap_logical_fail(struct l2cap_chan *chan)
{
- BT_DBG("conn %p", conn);
+ /* Logical link setup failed */
+ if (chan->state != BT_CONNECTED) {
+ /* Create channel failure, disconnect */
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return;
+ }
- return l2cap_connect_rsp(conn, cmd, data);
+ switch (chan->move_role) {
+ case L2CAP_MOVE_ROLE_RESPONDER:
+ l2cap_move_done(chan);
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
+ break;
+ case L2CAP_MOVE_ROLE_INITIATOR:
+ if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
+ chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
+ /* Remote has only sent pending or
+ * success responses, clean up
+ */
+ l2cap_move_done(chan);
+ }
+
+ /* Other amp move states imply that the move
+ * has already aborted
+ */
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+ break;
+ }
+}
+
+static void l2cap_logical_finish_create(struct l2cap_chan *chan,
+ struct hci_chan *hchan)
+{
+ struct l2cap_conf_rsp rsp;
+
+ chan->hs_hchan = hchan;
+ chan->hs_hcon->l2cap_data = chan->conn;
+
+ l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
+
+ if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
+ int err;
+
+ set_default_fcs(chan);
+
+ err = l2cap_ertm_init(chan);
+ if (err < 0)
+ l2cap_send_disconn_req(chan, -err);
+ else
+ l2cap_chan_ready(chan);
+ }
+}
+
+static void l2cap_logical_finish_move(struct l2cap_chan *chan,
+ struct hci_chan *hchan)
+{
+ chan->hs_hcon = hchan->conn;
+ chan->hs_hcon->l2cap_data = chan->conn;
+
+ BT_DBG("move_state %d", chan->move_state);
+
+ switch (chan->move_state) {
+ case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+ /* Move confirm will be sent after a success
+ * response is received
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+ break;
+ case L2CAP_MOVE_WAIT_LOGICAL_CFM:
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+ } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+ } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+ }
+ break;
+ default:
+ /* Move was not in expected state, free the channel */
+ __release_logical_link(chan);
+
+ chan->move_state = L2CAP_MOVE_STABLE;
+ }
+}
+
+/* Call with chan locked */
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+ u8 status)
+{
+ BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
+
+ if (status) {
+ l2cap_logical_fail(chan);
+ __release_logical_link(chan);
+ return;
+ }
+
+ if (chan->state != BT_CONNECTED) {
+ /* Ignore logical link if channel is on BR/EDR */
+ if (chan->local_amp_id)
+ l2cap_logical_finish_create(chan, hchan);
+ } else {
+ l2cap_logical_finish_move(chan, hchan);
+ }
+}
+
+void l2cap_move_start(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ if (chan->local_amp_id == HCI_BREDR_ID) {
+ if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
+ return;
+ chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+ chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+ /* Placeholder - start physical link setup */
+ } else {
+ chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+ chan->move_id = 0;
+ l2cap_move_setup(chan);
+ l2cap_send_move_chan_req(chan, 0);
+ }
+}
+
+static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ u8 local_amp_id, u8 remote_amp_id)
+{
+ BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
+ local_amp_id, remote_amp_id);
+
+ chan->fcs = L2CAP_FCS_NONE;
+
+ /* Outgoing channel on AMP */
+ if (chan->state == BT_CONNECT) {
+ if (result == L2CAP_CR_SUCCESS) {
+ chan->local_amp_id = local_amp_id;
+ l2cap_send_create_chan_req(chan, remote_amp_id);
+ } else {
+ /* Revert to BR/EDR connect */
+ l2cap_send_conn_req(chan);
+ }
+
+ return;
+ }
+
+ /* Incoming channel on AMP */
+ if (__l2cap_no_conn_pending(chan)) {
+ struct l2cap_conn_rsp rsp;
+ char buf[128];
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+
+ if (result == L2CAP_CR_SUCCESS) {
+ /* Send successful response */
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+ } else {
+ /* Send negative response */
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+ }
+
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (result == L2CAP_CR_SUCCESS) {
+ __l2cap_state_change(chan, BT_CONFIG);
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+ }
+ }
+}
+
+static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
+ u8 remote_amp_id)
+{
+ l2cap_move_setup(chan);
+ chan->move_id = local_amp_id;
+ chan->move_state = L2CAP_MOVE_WAIT_RSP;
+
+ l2cap_send_move_chan_req(chan, remote_amp_id);
+}
+
+static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
+{
+ struct hci_chan *hchan = NULL;
+
+ /* Placeholder - get hci_chan for logical link */
+
+ if (hchan) {
+ if (hchan->state == BT_CONNECTED) {
+ /* Logical link is ready to go */
+ chan->hs_hcon = hchan->conn;
+ chan->hs_hcon->l2cap_data = chan->conn;
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+
+ l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+ } else {
+ /* Wait for logical link to be ready */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+ }
+ } else {
+ /* Logical link not available */
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
+ }
+}
+
+static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
+{
+ if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+ u8 rsp_result;
+ if (result == -EINVAL)
+ rsp_result = L2CAP_MR_BAD_ID;
+ else
+ rsp_result = L2CAP_MR_NOT_ALLOWED;
+
+ l2cap_send_move_chan_rsp(chan, rsp_result);
+ }
+
+ chan->move_role = L2CAP_MOVE_ROLE_NONE;
+ chan->move_state = L2CAP_MOVE_STABLE;
+
+ /* Restart data transmission */
+ l2cap_ertm_send(chan);
+}
+
+/* Invoke with locked chan */
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
+{
+ u8 local_amp_id = chan->local_amp_id;
+ u8 remote_amp_id = chan->remote_amp_id;
+
+ BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
+ chan, result, local_amp_id, remote_amp_id);
+
+ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
+ l2cap_chan_unlock(chan);
+ return;
+ }
+
+ if (chan->state != BT_CONNECTED) {
+ l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
+ } else if (result != L2CAP_MR_SUCCESS) {
+ l2cap_do_move_cancel(chan, result);
+ } else {
+ switch (chan->move_role) {
+ case L2CAP_MOVE_ROLE_INITIATOR:
+ l2cap_do_move_initiate(chan, local_amp_id,
+ remote_amp_id);
+ break;
+ case L2CAP_MOVE_ROLE_RESPONDER:
+ l2cap_do_move_respond(chan, result);
+ break;
+ default:
+ l2cap_do_move_cancel(chan, result);
+ break;
+ }
+ }
}
static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
- struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
- struct sock *sk;
- struct l2cap_pinfo *pi;
+ struct l2cap_move_chan_req *req = data;
+ struct l2cap_move_chan_rsp rsp;
+ struct l2cap_chan *chan;
u16 icid = 0;
- u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
+ u16 result = L2CAP_MR_NOT_ALLOWED;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
icid = le16_to_cpu(req->icid);
- BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
+ BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
- read_lock(&conn->chan_list.lock);
- sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
- read_unlock(&conn->chan_list.lock);
+ if (!enable_hs)
+ return -EINVAL;
- if (!sk)
- goto send_move_response;
+ chan = l2cap_get_chan_by_dcid(conn, icid);
+ if (!chan) {
+ rsp.icid = cpu_to_le16(icid);
+ rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+ return 0;
+ }
- lock_sock(sk);
- pi = l2cap_pi(sk);
+ chan->ident = cmd->ident;
- if (pi->scid < L2CAP_CID_DYN_START ||
- (pi->mode != L2CAP_MODE_ERTM &&
- pi->mode != L2CAP_MODE_STREAMING)) {
+ if (chan->scid < L2CAP_CID_DYN_START ||
+ chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
+ (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING)) {
+ result = L2CAP_MR_NOT_ALLOWED;
goto send_move_response;
}
- if (pi->amp_id == req->dest_amp_id) {
- result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
+ if (chan->local_amp_id == req->dest_amp_id) {
+ result = L2CAP_MR_SAME_ID;
goto send_move_response;
}
if (req->dest_amp_id) {
struct hci_dev *hdev;
hdev = hci_dev_get(req->dest_amp_id);
- if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
+ if (!hdev || hdev->dev_type != HCI_AMP ||
+ !test_bit(HCI_UP, &hdev->flags)) {
if (hdev)
hci_dev_put(hdev);
- result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
+ result = L2CAP_MR_BAD_ID;
goto send_move_response;
}
hci_dev_put(hdev);
}
- if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
- pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
- pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
- bacmp(conn->src, conn->dst) > 0) {
- result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
+ /* Detect a move collision. Only send a collision response
+ * if this side has "lost", otherwise proceed with the move.
+ * The winner has the larger bd_addr.
+ */
+ if ((__chan_is_moving(chan) ||
+ chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
+ bacmp(conn->src, conn->dst) > 0) {
+ result = L2CAP_MR_COLLISION;
goto send_move_response;
}
- if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
- result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
- goto send_move_response;
- }
+ chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+ l2cap_move_setup(chan);
+ chan->move_id = req->dest_amp_id;
+ icid = chan->dcid;
- pi->amp_move_cmd_ident = cmd->ident;
- pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
- l2cap_amp_move_setup(sk);
- pi->amp_move_id = req->dest_amp_id;
- icid = pi->dcid;
-
- if (req->dest_amp_id == 0) {
+ if (!req->dest_amp_id) {
/* Moving to BR/EDR */
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
- result = L2CAP_MOVE_CHAN_PENDING;
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+ result = L2CAP_MR_PEND;
} else {
- pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
- result = L2CAP_MOVE_CHAN_SUCCESS;
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+ result = L2CAP_MR_SUCCESS;
}
} else {
- pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
- amp_accept_physical(pi->conn, req->dest_amp_id, sk);
- result = L2CAP_MOVE_CHAN_PENDING;
+ chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+ /* Placeholder - uncomment when amp functions are available */
+ /*amp_accept_physical(chan, req->dest_amp_id);*/
+ result = L2CAP_MR_PEND;
}
send_move_response:
- l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+ l2cap_send_move_chan_rsp(chan, result);
- if (sk)
- release_sock(sk);
+ l2cap_chan_unlock(chan);
return 0;
}
-static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
{
- struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
- struct sock *sk;
- struct l2cap_pinfo *pi;
+ struct l2cap_chan *chan;
+ struct hci_chan *hchan = NULL;
+
+ chan = l2cap_get_chan_by_scid(conn, icid);
+ if (!chan) {
+ l2cap_send_move_chan_cfm_icid(conn, icid);
+ return;
+ }
+
+ __clear_chan_timer(chan);
+ if (result == L2CAP_MR_PEND)
+ __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
+
+ switch (chan->move_state) {
+ case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+ /* Move confirm will be sent when logical link
+ * is complete.
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+ break;
+ case L2CAP_MOVE_WAIT_RSP_SUCCESS:
+ if (result == L2CAP_MR_PEND) {
+ break;
+ } else if (test_bit(CONN_LOCAL_BUSY,
+ &chan->conn_state)) {
+ chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+ } else {
+ /* Logical link is up or moving to BR/EDR,
+ * proceed with move
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+ }
+ break;
+ case L2CAP_MOVE_WAIT_RSP:
+ /* Moving to AMP */
+ if (result == L2CAP_MR_SUCCESS) {
+ /* Remote is ready, send confirm immediately
+ * after logical link is ready
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+ } else {
+ /* Both logical link and move success
+ * are required to confirm
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
+ }
+
+ /* Placeholder - get hci_chan for logical link */
+ if (!hchan) {
+ /* Logical link not available */
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+ break;
+ }
+
+ /* If the logical link is not yet connected, do not
+ * send confirmation.
+ */
+ if (hchan->state != BT_CONNECTED)
+ break;
+
+ /* Logical link is already ready to go */
+
+ chan->hs_hcon = hchan->conn;
+ chan->hs_hcon->l2cap_data = chan->conn;
+
+ if (result == L2CAP_MR_SUCCESS) {
+ /* Can confirm now */
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+ } else {
+ /* Now only need move success
+ * to confirm
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+ }
+
+ l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+ break;
+ default:
+ /* Any other amp move state means the move failed. */
+ chan->move_id = chan->local_amp_id;
+ l2cap_move_done(chan);
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+ }
+
+ l2cap_chan_unlock(chan);
+}
+
+static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
+ u16 result)
+{
+ struct l2cap_chan *chan;
+
+ chan = l2cap_get_chan_by_ident(conn, ident);
+ if (!chan) {
+ /* Could not locate channel, icid is best guess */
+ l2cap_send_move_chan_cfm_icid(conn, icid);
+ return;
+ }
+
+ __clear_chan_timer(chan);
+
+ if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+ if (result == L2CAP_MR_COLLISION) {
+ chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+ } else {
+ /* Cleanup - cancel move */
+ chan->move_id = chan->local_amp_id;
+ l2cap_move_done(chan);
+ }
+ }
+
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+
+ l2cap_chan_unlock(chan);
+}
+
+static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_rsp *rsp = data;
u16 icid, result;
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
icid = le16_to_cpu(rsp->icid);
result = le16_to_cpu(rsp->result);
- BT_DBG("icid %d, result %d", (int) icid, (int) result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
- switch (result) {
- case L2CAP_MOVE_CHAN_SUCCESS:
- case L2CAP_MOVE_CHAN_PENDING:
- read_lock(&conn->chan_list.lock);
- sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
- read_unlock(&conn->chan_list.lock);
-
- if (!sk) {
- l2cap_send_move_chan_cfm(conn, NULL, icid,
- L2CAP_MOVE_CHAN_UNCONFIRMED);
- break;
- }
-
- lock_sock(sk);
- pi = l2cap_pi(sk);
-
- l2cap_sock_clear_timer(sk);
- if (result == L2CAP_MOVE_CHAN_PENDING)
- l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
-
- if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
- /* Move confirm will be sent when logical link
- * is complete.
- */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
- } else if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
- if (result == L2CAP_MOVE_CHAN_PENDING) {
- break;
- } else if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
- } else {
- /* Logical link is up or moving to BR/EDR,
- * proceed with move */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(conn, pi, pi->scid,
- L2CAP_MOVE_CHAN_CONFIRMED);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- }
- } else if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
- struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- struct hci_chan *chan;
- /* Moving to AMP */
- if (result == L2CAP_MOVE_CHAN_SUCCESS) {
- /* Remote is ready, send confirm immediately
- * after logical link is ready
- */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
- } else {
- /* Both logical link and move success
- * are required to confirm
- */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
- }
- pi->remote_fs = default_fs;
- pi->local_fs = default_fs;
- chan = l2cap_chan_admit(pi->amp_move_id, sk);
- if (!chan) {
- /* Logical link not available */
- l2cap_send_move_chan_cfm(conn, pi, pi->scid,
- L2CAP_MOVE_CHAN_UNCONFIRMED);
- break;
- }
-
- if (chan->state == BT_CONNECTED) {
- /* Logical link is already ready to go */
- pi->ampcon = chan->conn;
- pi->ampcon->l2cap_data = pi->conn;
- if (result == L2CAP_MOVE_CHAN_SUCCESS) {
- /* Can confirm now */
- l2cap_send_move_chan_cfm(conn, pi,
- pi->scid,
- L2CAP_MOVE_CHAN_CONFIRMED);
- } else {
- /* Now only need move success
- * required to confirm
- */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
- }
-
- l2cap_create_cfm(chan, 0);
- }
- } else {
- /* Any other amp move state means the move failed. */
- pi->amp_move_id = pi->amp_id;
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- l2cap_amp_move_revert(sk);
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
- l2cap_send_move_chan_cfm(conn, pi, pi->scid,
- L2CAP_MOVE_CHAN_UNCONFIRMED);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- }
- break;
- default:
- /* Failed (including collision case) */
- read_lock(&conn->chan_list.lock);
- sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
- read_unlock(&conn->chan_list.lock);
-
- if (!sk) {
- /* Could not locate channel, icid is best guess */
- l2cap_send_move_chan_cfm(conn, NULL, icid,
- L2CAP_MOVE_CHAN_UNCONFIRMED);
- break;
- }
-
- lock_sock(sk);
- pi = l2cap_pi(sk);
-
- l2cap_sock_clear_timer(sk);
-
- if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
- if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
- pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
- else {
- /* Cleanup - cancel move */
- pi->amp_move_id = pi->amp_id;
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- l2cap_amp_move_revert(sk);
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
- }
- }
-
- l2cap_send_move_chan_cfm(conn, pi, pi->scid,
- L2CAP_MOVE_CHAN_UNCONFIRMED);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- break;
- }
-
- if (sk)
- release_sock(sk);
+ if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
+ l2cap_move_continue(conn, icid, result);
+ else
+ l2cap_move_fail(conn, cmd->ident, icid, result);
return 0;
}
-static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
- struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
- struct sock *sk;
- struct l2cap_pinfo *pi;
+ struct l2cap_move_chan_cfm *cfm = data;
+ struct l2cap_chan *chan;
u16 icid, result;
+ if (cmd_len != sizeof(*cfm))
+ return -EPROTO;
+
icid = le16_to_cpu(cfm->icid);
result = le16_to_cpu(cfm->result);
- BT_DBG("icid %d, result %d", (int) icid, (int) result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
- read_lock(&conn->chan_list.lock);
- sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
- read_unlock(&conn->chan_list.lock);
-
- if (!sk) {
- BT_DBG("Bad channel (%d)", (int) icid);
- goto send_move_confirm_response;
+ chan = l2cap_get_chan_by_dcid(conn, icid);
+ if (!chan) {
+ /* Spec requires a response even if the icid was not found */
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+ return 0;
}
- lock_sock(sk);
- pi = l2cap_pi(sk);
-
- if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
- pi->amp_id = pi->amp_move_id;
- if (!pi->amp_id && pi->ampchan) {
- struct hci_chan *ampchan = pi->ampchan;
- struct hci_conn *ampcon = pi->ampcon;
- /* Have moved off of AMP, free the channel */
- pi->ampchan = NULL;
- pi->ampcon = NULL;
- if (hci_chan_put(ampchan))
- ampcon->l2cap_data = NULL;
- else
- l2cap_deaggregate(ampchan, pi);
- }
- l2cap_amp_move_success(sk);
+ if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
+ if (result == L2CAP_MC_CONFIRMED) {
+ chan->local_amp_id = chan->move_id;
+ if (!chan->local_amp_id)
+ __release_logical_link(chan);
} else {
- pi->amp_move_id = pi->amp_id;
- l2cap_amp_move_revert(sk);
+ chan->move_id = chan->local_amp_id;
}
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
- } else if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
- BT_DBG("Bad AMP_MOVE_STATE (%d)", pi->amp_move_state);
+
+ l2cap_move_done(chan);
}
-send_move_confirm_response:
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
- if (sk)
- release_sock(sk);
+ l2cap_chan_unlock(chan);
return 0;
}
static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
- struct l2cap_move_chan_cfm_rsp *rsp =
- (struct l2cap_move_chan_cfm_rsp *) data;
- struct sock *sk;
- struct l2cap_pinfo *pi;
-
+ struct l2cap_move_chan_cfm_rsp *rsp = data;
+ struct l2cap_chan *chan;
u16 icid;
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
icid = le16_to_cpu(rsp->icid);
- BT_DBG("icid %d", (int) icid);
+ BT_DBG("icid 0x%4.4x", icid);
- read_lock(&conn->chan_list.lock);
- sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
- read_unlock(&conn->chan_list.lock);
-
- if (!sk)
+ chan = l2cap_get_chan_by_scid(conn, icid);
+ if (!chan)
return 0;
- lock_sock(sk);
- pi = l2cap_pi(sk);
+ __clear_chan_timer(chan);
- l2cap_sock_clear_timer(sk);
+ if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
+ chan->local_amp_id = chan->move_id;
- if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- pi->amp_id = pi->amp_move_id;
+ if (!chan->local_amp_id && chan->hs_hchan)
+ __release_logical_link(chan);
- if (!pi->amp_id && pi->ampchan) {
- struct hci_chan *ampchan = pi->ampchan;
- struct hci_conn *ampcon = pi->ampcon;
- /* Have moved off of AMP, free the channel */
- pi->ampchan = NULL;
- pi->ampcon = NULL;
- if (hci_chan_put(ampchan))
- ampcon->l2cap_data = NULL;
- else
- l2cap_deaggregate(ampchan, pi);
- }
-
- l2cap_amp_move_success(sk);
-
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ l2cap_move_done(chan);
}
- release_sock(sk);
-
- return 0;
-}
-
-static void l2cap_amp_signal_worker(struct work_struct *work)
-{
- int err = 0;
- struct l2cap_amp_signal_work *ampwork =
- container_of(work, struct l2cap_amp_signal_work, work);
-
- switch (ampwork->cmd.code) {
- case L2CAP_MOVE_CHAN_REQ:
- err = l2cap_move_channel_req(ampwork->conn, &work->cmd,
- ampwork->data);
- break;
-
- case L2CAP_MOVE_CHAN_RSP:
- err = l2cap_move_channel_rsp(ampwork->conn, &work->cmd,
- ampwork->data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM:
- err = l2cap_move_channel_confirm(ampwork->conn, &work->cmd,
- ampwork->data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM_RSP:
- err = l2cap_move_channel_confirm_rsp(ampwork->conn,
- &work->cmd, ampwork->data);
- break;
-
- default:
- BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
- err = -EINVAL;
- break;
- }
-
- if (err) {
- struct l2cap_cmd_rej rej;
- BT_DBG("error %d", err);
-
- /* In this context, commands are only rejected with
- * "command not understood", code 0.
- */
- rej.reason = cpu_to_le16(0);
- l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
- L2CAP_COMMAND_REJ, sizeof(rej), &rej);
- }
-
- kfree_skb(ampwork->skb);
- kfree(ampwork);
-}
-
-void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
- struct sock *sk)
-{
- struct l2cap_pinfo *pi;
-
- BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
- (int) local_id, (int) remote_id, sk);
-
- lock_sock(sk);
-
- if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
- release_sock(sk);
- return;
- }
-
- pi = l2cap_pi(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- if (bt_sk(sk)->parent) {
- struct l2cap_conn_rsp rsp;
- char buf[128];
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
-
- /* Incoming channel on AMP */
- if (result == L2CAP_CREATE_CHAN_SUCCESS) {
- /* Send successful response */
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- } else {
- /* Send negative response */
- rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- }
-
- l2cap_send_cmd(pi->conn, pi->ident,
- L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- if (result == L2CAP_CREATE_CHAN_SUCCESS) {
- sk->sk_state = BT_CONFIG;
- pi->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(pi->conn,
- l2cap_get_ident(pi->conn),
- L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
- }
- } else {
- /* Outgoing channel on AMP */
- if (result != L2CAP_CREATE_CHAN_SUCCESS) {
- /* Revert to BR/EDR connect */
- l2cap_send_conn_req(sk);
- } else {
- pi->amp_id = local_id;
- l2cap_send_create_chan_req(sk, remote_id);
- }
- }
- } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
- pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
- l2cap_amp_move_setup(sk);
- pi->amp_move_id = local_id;
- pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
-
- l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
- pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
- struct hci_chan *chan;
- struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- pi->remote_fs = default_fs;
- pi->local_fs = default_fs;
- chan = l2cap_chan_admit(local_id, sk);
- if (chan) {
- if (chan->state == BT_CONNECTED) {
- /* Logical link is ready to go */
- pi->ampcon = chan->conn;
- pi->ampcon->l2cap_data = pi->conn;
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident, pi->dcid,
- L2CAP_MOVE_CHAN_SUCCESS);
-
- l2cap_create_cfm(chan, 0);
- } else {
- /* Wait for logical link to be ready */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
- }
- } else {
- /* Logical link not available */
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident, pi->dcid,
- L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
- }
- } else {
- BT_DBG("result %d, role %d, local_busy %d", result,
- (int) pi->amp_move_role,
- (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
-
- if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
- if (result == -EINVAL)
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident, pi->dcid,
- L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
- else
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident, pi->dcid,
- L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
- }
-
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
-
- if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
- l2cap_rmem_available(sk))
- l2cap_ertm_tx(sk, 0, 0,
- L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
-
- /* Restart data transmission */
- l2cap_ertm_send(sk);
- }
-
- release_sock(sk);
-}
-
-static void l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
-{
- struct l2cap_pinfo *pi;
- struct sock *sk;
- struct hci_chan *ampchan;
- struct hci_conn *ampcon;
-
- BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
-
- sk = chan->l2cap_sk;
- chan->l2cap_sk = NULL;
-
- BT_DBG("sk %p", sk);
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
- release_sock(sk);
- return;
- }
-
- pi = l2cap_pi(sk);
-
- if ((!status) && (chan != NULL)) {
- pi->ampcon = chan->conn;
- pi->ampcon->l2cap_data = pi->conn;
-
- BT_DBG("amp_move_state %d", pi->amp_move_state);
-
- if (sk->sk_state != BT_CONNECTED) {
- struct l2cap_conf_rsp rsp;
-
- /* Must use spinlock to prevent concurrent
- * execution of l2cap_config_rsp()
- */
- bh_lock_sock(sk);
- l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(sk, &rsp,
- L2CAP_CONF_SUCCESS, 0), &rsp);
- pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- set_default_fcs(l2cap_pi(sk));
-
- sk->sk_state = BT_CONNECTED;
-
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
- l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
- l2cap_ertm_init(sk);
-
- l2cap_chan_ready(sk);
- }
- bh_unlock_sock(sk);
- } else if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
- /* Move confirm will be sent after a success
- * response is received
- */
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
- } else if (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
- else if (pi->amp_move_role ==
- L2CAP_AMP_MOVE_INITIATOR) {
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
- L2CAP_MOVE_CHAN_SUCCESS);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- } else if (pi->amp_move_role ==
- L2CAP_AMP_MOVE_RESPONDER) {
- pi->amp_move_state =
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident, pi->dcid,
- L2CAP_MOVE_CHAN_SUCCESS);
- }
- } else if ((pi->amp_move_state !=
- L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) &&
- (pi->amp_move_state !=
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) &&
- (pi->amp_move_state !=
- L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP)) {
- /* Move was not in expected state, free the channel */
- ampchan = pi->ampchan;
- ampcon = pi->ampcon;
- pi->ampchan = NULL;
- pi->ampcon = NULL;
- if (ampchan) {
- if (hci_chan_put(ampchan))
- ampcon->l2cap_data = NULL;
- else
- l2cap_deaggregate(ampchan, pi);
- }
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- }
- } else {
- /* Logical link setup failed. */
-
- if (sk->sk_state != BT_CONNECTED)
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
- l2cap_amp_move_revert(sk);
- l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- l2cap_send_move_chan_rsp(pi->conn,
- pi->amp_move_cmd_ident, pi->dcid,
- L2CAP_MOVE_CHAN_REFUSED_CONFIG);
- } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
- if ((pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
- (pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
- /* Remote has only sent pending or
- * success responses, clean up
- */
- l2cap_amp_move_revert(sk);
- l2cap_pi(sk)->amp_move_role =
- L2CAP_AMP_MOVE_NONE;
- pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
- }
-
- /* Other amp move states imply that the move
- * has already aborted
- */
- l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
- L2CAP_MOVE_CHAN_UNCONFIRMED);
- l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
- }
- ampchan = pi->ampchan;
- ampcon = pi->ampcon;
- pi->ampchan = NULL;
- pi->ampcon = NULL;
- if (ampchan) {
- if (hci_chan_put(ampchan))
- ampcon->l2cap_data = NULL;
- else
- l2cap_deaggregate(ampchan, pi);
- }
- }
-
- release_sock(sk);
-}
-
-static void l2cap_logical_link_worker(struct work_struct *work)
-{
- struct l2cap_logical_link_work *log_link_work =
- container_of(work, struct l2cap_logical_link_work, work);
- struct sock *sk = log_link_work->chan->l2cap_sk;
-
- if (sk) {
- l2cap_logical_link_complete(log_link_work->chan,
- log_link_work->status);
- sock_put(sk);
- }
- hci_chan_put(log_link_work->chan);
- kfree(log_link_work);
-}
-
-static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
-{
- struct l2cap_logical_link_work *amp_work;
-
- if (!chan->l2cap_sk) {
- BT_ERR("Expected l2cap_sk to point to connecting socket");
- return -EFAULT;
- }
-
- amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
- if (!amp_work) {
- sock_put(chan->l2cap_sk);
- return -ENOMEM;
- }
-
- INIT_WORK(&_work->work, l2cap_logical_link_worker);
- amp_work->chan = chan;
- amp_work->status = status;
-
- hci_chan_hold(chan);
-
- if (!queue_work(_l2cap_wq, &_work->work)) {
- kfree(amp_work);
- sock_put(chan->l2cap_sk);
- hci_chan_put(chan);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
-{
- struct l2cap_conn *conn = chan->conn->l2cap_data;
-
- BT_DBG("chan %p conn %p status %d", chan, conn, status);
-
- /* TODO: if failed status restore previous fs */
- return 0;
-}
-
-int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
-{
- struct l2cap_chan_list *l;
- struct l2cap_conn *conn = chan->conn->l2cap_data;
- struct sock *sk;
-
- BT_DBG("chan %p conn %p", chan, conn);
-
- if (!conn)
- return 0;
-
- l = &conn->chan_list;
-
- read_lock(&l->lock);
-
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
- /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
- if (l2cap_pi(sk)->ampchan == chan) {
- struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
- l2cap_pi(sk)->ampchan = NULL;
- l2cap_pi(sk)->ampcon = NULL;
- if (hci_chan_put(chan))
- ampcon->l2cap_data = NULL;
- else
- l2cap_deaggregate(chan, l2cap_pi(sk));
-
- l2cap_amp_move_init(sk);
- }
- bh_unlock_sock(sk);
- }
-
- read_unlock(&l->lock);
-
- return 0;
-
-
-}
-
-static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
- u8 *data, struct sk_buff *skb)
-{
- struct l2cap_amp_signal_work *amp_work;
-
- amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
- if (!amp_work)
- return -ENOMEM;
-
- INIT_WORK(&_work->work, l2cap_amp_signal_worker);
- amp_work->conn = conn;
- amp_work->cmd = *cmd;
- amp_work->data = data;
- amp_work->skb = skb_clone(skb, GFP_ATOMIC);
- if (!amp_work->skb) {
- kfree(amp_work);
- return -ENOMEM;
- }
-
- if (!queue_work(_l2cap_wq, &_work->work)) {
- kfree_skb(amp_work->skb);
- kfree(amp_work);
- return -ENOMEM;
- }
+ l2cap_chan_unlock(chan);
return 0;
}
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
- u16 to_multiplier)
+ u16 to_multiplier)
{
u16 max_latency;
@@ -5723,13 +5154,13 @@
}
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+ struct l2cap_cmd_hdr *cmd,
+ u8 *data)
{
struct hci_conn *hcon = conn->hcon;
struct l2cap_conn_param_update_req *req;
struct l2cap_conn_param_update_rsp rsp;
- struct sock *sk;
- u16 min, max, latency, timeout, cmd_len;
+ u16 min, max, latency, to_multiplier, cmd_len;
int err;
if (!(hcon->link_mode & HCI_LM_MASTER))
@@ -5739,53 +5170,50 @@
if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
return -EPROTO;
+ req = (struct l2cap_conn_param_update_req *) data;
+ min = __le16_to_cpu(req->min);
+ max = __le16_to_cpu(req->max);
+ latency = __le16_to_cpu(req->latency);
+ to_multiplier = __le16_to_cpu(req->to_multiplier);
+
+ BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+ min, max, latency, to_multiplier);
+
memset(&rsp, 0, sizeof(rsp));
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
- sk = l2cap_find_sock_by_fixed_cid_and_dir(4, conn->src, conn->dst, 0);
-
- if (sk && !bt_sk(sk)->le_params.prohibit_remote_chg) {
- req = (struct l2cap_conn_param_update_req *) data;
- min = __le16_to_cpu(req->min);
- max = __le16_to_cpu(req->max);
- latency = __le16_to_cpu(req->latency);
- timeout = __le16_to_cpu(req->to_multiplier);
-
- err = l2cap_check_conn_param(min, max, latency, timeout);
- if (!err) {
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
- hci_le_conn_update(hcon, min, max, latency, timeout);
- bt_sk(sk)->le_params.interval_min = min;
- bt_sk(sk)->le_params.interval_max = max;
- bt_sk(sk)->le_params.latency = latency;
- bt_sk(sk)->le_params.supervision_timeout = timeout;
- }
- }
+ err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+ if (err)
+ rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ else
+ rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
- sizeof(rsp), &rsp);
+ sizeof(rsp), &rsp);
+ if (!err)
+ hci_le_conn_update(hcon, min, max, latency, to_multiplier);
return 0;
}
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
- struct sk_buff *skb)
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
{
int err = 0;
switch (cmd->code) {
case L2CAP_COMMAND_REJ:
- l2cap_command_rej(conn, cmd, data);
+ l2cap_command_rej(conn, cmd, cmd_len, data);
break;
case L2CAP_CONN_REQ:
- err = l2cap_connect_req(conn, cmd, data);
+ err = l2cap_connect_req(conn, cmd, cmd_len, data);
break;
case L2CAP_CONN_RSP:
- err = l2cap_connect_rsp(conn, cmd, data);
+ case L2CAP_CREATE_CHAN_RSP:
+ err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_CONF_REQ:
@@ -5793,15 +5221,15 @@
break;
case L2CAP_CONF_RSP:
- err = l2cap_config_rsp(conn, cmd, data);
+ err = l2cap_config_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_DISCONN_REQ:
- err = l2cap_disconnect_req(conn, cmd, data);
+ err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
break;
case L2CAP_DISCONN_RSP:
- err = l2cap_disconnect_rsp(conn, cmd, data);
+ err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_ECHO_REQ:
@@ -5812,27 +5240,33 @@
break;
case L2CAP_INFO_REQ:
- err = l2cap_information_req(conn, cmd, data);
+ err = l2cap_information_req(conn, cmd, cmd_len, data);
break;
case L2CAP_INFO_RSP:
- err = l2cap_information_rsp(conn, cmd, data);
+ err = l2cap_information_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_CREATE_CHAN_REQ:
- err = l2cap_create_channel_req(conn, cmd, data);
- break;
-
- case L2CAP_CREATE_CHAN_RSP:
- err = l2cap_create_channel_rsp(conn, cmd, data);
+ err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
break;
case L2CAP_MOVE_CHAN_REQ:
- case L2CAP_MOVE_CHAN_RSP:
- case L2CAP_MOVE_CHAN_CFM:
- case L2CAP_MOVE_CHAN_CFM_RSP:
- err = l2cap_sig_amp(conn, cmd, data, skb);
+ err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ break;
+
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -5843,7 +5277,7 @@
}
static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u8 *data)
+ struct l2cap_cmd_hdr *cmd, u8 *data)
{
switch (cmd->code) {
case L2CAP_COMMAND_REJ:
@@ -5862,7 +5296,7 @@
}
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
u8 *data = skb->data;
int len = skb->len;
@@ -5879,7 +5313,8 @@
cmd_len = le16_to_cpu(cmd.len);
- BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
+ BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
+ cmd.ident);
if (cmd_len > len || !cmd.ident) {
BT_DBG("corrupted command");
@@ -5889,17 +5324,17 @@
if (conn->hcon->type == LE_LINK)
err = l2cap_le_sig_cmd(conn, &cmd, data);
else
- err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
- data, skb);
+ err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
if (err) {
- struct l2cap_cmd_rej rej;
+ struct l2cap_cmd_rej_unk rej;
BT_ERR("Wrong link type (%d)", err);
/* FIXME: Map err to a valid reason */
- rej.reason = cpu_to_le16(0);
- l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
+ sizeof(rej), &rej);
}
data += cmd_len;
@@ -5909,183 +5344,67 @@
kfree_skb(skb);
}
-static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
+static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
{
u16 our_fcs, rcv_fcs;
int hdr_size;
- if (pi->extended_control)
- hdr_size = L2CAP_EXTENDED_HDR_SIZE;
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hdr_size = L2CAP_EXT_HDR_SIZE;
else
- hdr_size = L2CAP_ENHANCED_HDR_SIZE;
+ hdr_size = L2CAP_ENH_HDR_SIZE;
- if (pi->fcs == L2CAP_FCS_CRC16) {
+ if (chan->fcs == L2CAP_FCS_CRC16) {
skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
- if (our_fcs != rcv_fcs) {
- BT_DBG("Bad FCS");
+ if (our_fcs != rcv_fcs)
return -EBADMSG;
- }
}
return 0;
}
-static void l2cap_ertm_pass_to_tx(struct sock *sk,
- struct bt_l2cap_control *control)
+static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
- BT_DBG("sk %p, control %p", sk, control);
- l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
-}
+ struct l2cap_ctrl control;
-static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
- struct bt_l2cap_control *control)
-{
- BT_DBG("sk %p, control %p", sk, control);
- l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
-}
+ BT_DBG("chan %p", chan);
-static void l2cap_ertm_resend(struct sock *sk)
-{
- struct bt_l2cap_control control;
- struct l2cap_pinfo *pi;
- struct sk_buff *skb;
- struct sk_buff *tx_skb;
- u16 seq;
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.final = 1;
+ control.reqseq = chan->buffer_seq;
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
- BT_DBG("sk %p", sk);
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ control.super = L2CAP_SUPER_RNR;
+ l2cap_send_sframe(chan, &control);
+ }
- pi = l2cap_pi(sk);
+ if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
+ chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
- if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
- return;
+ /* Send pending iframes */
+ l2cap_ertm_send(chan);
- if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
- pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
- return;
-
- while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
- seq = l2cap_seq_list_pop(&pi->retrans_list);
-
- skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
- if (!skb) {
- BT_DBG("Error: Can't retransmit seq %d, frame missing",
- (int) seq);
- continue;
- }
-
- bt_cb(skb)->retries += 1;
- control = bt_cb(skb)->control;
-
- if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
- BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- l2cap_seq_list_clear(&pi->retrans_list);
- break;
- }
-
- control.reqseq = pi->buffer_seq;
- if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
- control.final = 1;
- pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
- } else {
- control.final = 0;
- }
-
- if (skb_cloned(skb)) {
- /* Cloned sk_buffs are read-only, so we need a
- * writeable copy
- */
- tx_skb = skb_copy(skb, GFP_ATOMIC);
- } else {
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- }
-
- if (!tx_skb) {
- l2cap_seq_list_clear(&pi->retrans_list);
- break;
- }
-
- /* Update skb contents */
- if (pi->extended_control) {
- put_unaligned_le32(__pack_extended_control(&control),
- tx_skb->data + L2CAP_HDR_SIZE);
- } else {
- put_unaligned_le16(__pack_enhanced_control(&control),
- tx_skb->data + L2CAP_HDR_SIZE);
- }
-
- if (pi->fcs == L2CAP_FCS_CRC16)
- apply_fcs(tx_skb);
-
- sock_hold(sk);
- tx_skb->sk = sk;
- tx_skb->destructor = l2cap_skb_destructor;
- atomic_inc(&pi->ertm_queued);
-
- l2cap_do_send(sk, tx_skb);
-
- BT_DBG("Resent txseq %d", (int)control.txseq);
-
- pi->last_acked_seq = pi->buffer_seq;
+ if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
+ test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
+ /* F-bit wasn't sent in an s-frame or i-frame yet, so
+ * send it now.
+ */
+ control.super = L2CAP_SUPER_RR;
+ l2cap_send_sframe(chan, &control);
}
}
-static inline void l2cap_ertm_retransmit(struct sock *sk,
- struct bt_l2cap_control *control)
-{
- BT_DBG("sk %p, control %p", sk, control);
-
- l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
- l2cap_ertm_resend(sk);
-}
-
-static void l2cap_ertm_retransmit_all(struct sock *sk,
- struct bt_l2cap_control *control)
-{
- struct l2cap_pinfo *pi;
- struct sk_buff *skb;
-
- BT_DBG("sk %p, control %p", sk, control);
-
- pi = l2cap_pi(sk);
-
- if (control->poll)
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
-
- l2cap_seq_list_clear(&pi->retrans_list);
-
- if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
- return;
-
- if (pi->unacked_frames) {
- skb_queue_walk(TX_QUEUE(sk), skb) {
- if ((bt_cb(skb)->control.txseq == control->reqseq) ||
- skb == sk->sk_send_head)
- break;
- }
-
- skb_queue_walk_from(TX_QUEUE(sk), skb) {
- if (skb == sk->sk_send_head)
- break;
-
- l2cap_seq_list_append(&pi->retrans_list,
- bt_cb(skb)->control.txseq);
- }
-
- l2cap_ertm_resend(sk);
- }
-}
-
-static inline void append_skb_frag(struct sk_buff *skb,
- struct sk_buff *new_frag, struct sk_buff **last_frag)
+static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
+ struct sk_buff **last_frag)
{
/* skb->len reflects data in skb as well as all fragments
- skb->data_len reflects only data in fragments
+ * skb->data_len reflects only data in fragments
*/
- BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
-
if (!skb_has_frag_list(skb))
skb_shinfo(skb)->frag_list = new_frag;
@@ -6099,335 +5418,290 @@
skb->truesize += new_frag->truesize;
}
-static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
- struct bt_l2cap_control *control, struct sk_buff *skb)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
+ struct l2cap_ctrl *control)
{
- struct l2cap_pinfo *pi;
int err = -EINVAL;
- BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
- skb, skb->len, skb->truesize);
-
- if (!control)
- return err;
-
- pi = l2cap_pi(sk);
-
- BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
- control->frame_type, control->sar, control->txseq,
- control->reqseq, control->final);
-
switch (control->sar) {
case L2CAP_SAR_UNSEGMENTED:
- if (pi->sdu) {
- BT_DBG("Unexpected unsegmented PDU during reassembly");
- kfree_skb(pi->sdu);
- pi->sdu = NULL;
- pi->sdu_last_frag = NULL;
- pi->sdu_len = 0;
- }
+ if (chan->sdu)
+ break;
- BT_DBG("Unsegmented");
- err = sock_queue_rcv_skb(sk, skb);
+ err = chan->ops->recv(chan, skb);
break;
case L2CAP_SAR_START:
- if (pi->sdu) {
- BT_DBG("Unexpected start PDU during reassembly");
- kfree_skb(pi->sdu);
- }
+ if (chan->sdu)
+ break;
- pi->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ chan->sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
- if (pi->sdu_len > pi->imtu) {
+ if (chan->sdu_len > chan->imtu) {
err = -EMSGSIZE;
break;
}
- if (skb->len >= pi->sdu_len)
+ if (skb->len >= chan->sdu_len)
break;
- pi->sdu = skb;
- pi->sdu_last_frag = skb;
-
- BT_DBG("Start");
+ chan->sdu = skb;
+ chan->sdu_last_frag = skb;
skb = NULL;
err = 0;
break;
case L2CAP_SAR_CONTINUE:
- if (!pi->sdu)
+ if (!chan->sdu)
break;
- append_skb_frag(pi->sdu, skb,
- &pi->sdu_last_frag);
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
skb = NULL;
- if (pi->sdu->len >= pi->sdu_len)
+ if (chan->sdu->len >= chan->sdu_len)
break;
- BT_DBG("Continue, reassembled %d", pi->sdu->len);
-
err = 0;
break;
case L2CAP_SAR_END:
- if (!pi->sdu)
+ if (!chan->sdu)
break;
- append_skb_frag(pi->sdu, skb,
- &pi->sdu_last_frag);
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
skb = NULL;
- if (pi->sdu->len != pi->sdu_len)
+ if (chan->sdu->len != chan->sdu_len)
break;
- BT_DBG("End, reassembled %d", pi->sdu->len);
- /* If the sender used tiny PDUs, the rcv queuing could fail.
- * Applications that have issues here should use a larger
- * sk_rcvbuf.
- */
- err = sock_queue_rcv_skb(sk, pi->sdu);
+ err = chan->ops->recv(chan, chan->sdu);
if (!err) {
/* Reassembly complete */
- pi->sdu = NULL;
- pi->sdu_last_frag = NULL;
- pi->sdu_len = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
}
break;
-
- default:
- BT_DBG("Bad SAR value");
- break;
}
if (err) {
- BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
- err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
- if (pi->sdu) {
- kfree_skb(pi->sdu);
- pi->sdu = NULL;
- }
- pi->sdu_last_frag = NULL;
- pi->sdu_len = 0;
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
}
- /* Update local busy state */
- if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
- l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
-
return err;
}
-static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
+static int l2cap_resegment(struct l2cap_chan *chan)
+{
+ /* Placeholder */
+ return 0;
+}
+
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
+{
+ u8 event;
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
+
+ event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
+ l2cap_tx(chan, NULL, NULL, event);
+}
+
+static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
{
int err = 0;
- /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
+ /* Pass sequential frames to l2cap_reassemble_sdu()
* until a gap is encountered.
*/
- struct l2cap_pinfo *pi;
+ BT_DBG("chan %p", chan);
- BT_DBG("sk %p", sk);
- pi = l2cap_pi(sk);
-
- while (l2cap_rmem_available(sk)) {
+ while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
struct sk_buff *skb;
BT_DBG("Searching for skb with txseq %d (queue len %d)",
- (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
+ chan->buffer_seq, skb_queue_len(&chan->srej_q));
- skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
+ skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
if (!skb)
break;
- skb_unlink(skb, SREJ_QUEUE(sk));
- pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
- err = l2cap_ertm_rx_expected_iframe(sk,
- &bt_cb(skb)->control, skb);
+ skb_unlink(skb, &chan->srej_q);
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+ err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
if (err)
break;
}
- if (skb_queue_empty(SREJ_QUEUE(sk))) {
- pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
- l2cap_ertm_send_ack(sk);
+ if (skb_queue_empty(&chan->srej_q)) {
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ l2cap_send_ack(chan);
}
return err;
}
-static void l2cap_ertm_handle_srej(struct sock *sk,
- struct bt_l2cap_control *control)
+static void l2cap_handle_srej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- struct l2cap_pinfo *pi;
struct sk_buff *skb;
- BT_DBG("sk %p, control %p", sk, control);
+ BT_DBG("chan %p, control %p", chan, control);
- pi = l2cap_pi(sk);
-
- if (control->reqseq == pi->next_tx_seq) {
- BT_DBG("Invalid reqseq %d, disconnecting",
- (int) control->reqseq);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan, ECONNRESET);
return;
}
- skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
if (skb == NULL) {
BT_DBG("Seq %d not available for retransmission",
- (int) control->reqseq);
+ control->reqseq);
return;
}
- if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
- BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan, ECONNRESET);
return;
}
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
if (control->poll) {
- l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_pass_to_tx(chan, control);
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
- l2cap_ertm_retransmit(sk, control);
- l2cap_ertm_send(sk);
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_retransmit(chan, control);
+ l2cap_ertm_send(chan);
- if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- pi->srej_save_reqseq = control->reqseq;
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
}
} else {
- l2cap_ertm_pass_to_tx_fbit(sk, control);
+ l2cap_pass_to_tx_fbit(chan, control);
if (control->final) {
- if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
- (pi->srej_save_reqseq == control->reqseq)) {
- pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
- } else {
- l2cap_ertm_retransmit(sk, control);
- }
+ if (chan->srej_save_reqseq != control->reqseq ||
+ !test_and_clear_bit(CONN_SREJ_ACT,
+ &chan->conn_state))
+ l2cap_retransmit(chan, control);
} else {
- l2cap_ertm_retransmit(sk, control);
- if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- pi->srej_save_reqseq = control->reqseq;
+ l2cap_retransmit(chan, control);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
}
}
}
}
-static void l2cap_ertm_handle_rej(struct sock *sk,
- struct bt_l2cap_control *control)
+static void l2cap_handle_rej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- struct l2cap_pinfo *pi;
struct sk_buff *skb;
- BT_DBG("sk %p, control %p", sk, control);
+ BT_DBG("chan %p, control %p", chan, control);
- pi = l2cap_pi(sk);
-
- if (control->reqseq == pi->next_tx_seq) {
- BT_DBG("Invalid reqseq %d, disconnecting",
- (int) control->reqseq);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan, ECONNRESET);
return;
}
- skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
- BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ if (chan->max_tx && skb &&
+ bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan, ECONNRESET);
return;
}
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_pass_to_tx(chan, control);
if (control->final) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else
- l2cap_ertm_retransmit_all(sk, control);
+ if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
+ l2cap_retransmit_all(chan, control);
} else {
- l2cap_ertm_retransmit_all(sk, control);
- l2cap_ertm_send(sk);
- if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
- pi->conn_state |= L2CAP_CONN_REJ_ACT;
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
+ set_bit(CONN_REJ_ACT, &chan->conn_state);
}
}
-static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
+static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
{
- struct l2cap_pinfo *pi;
+ BT_DBG("chan %p, txseq %d", chan, txseq);
- BT_DBG("sk %p, txseq %d", sk, (int)txseq);
- pi = l2cap_pi(sk);
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
+ chan->expected_tx_seq);
- BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
- (int)pi->expected_tx_seq);
-
- if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
- if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
/* See notes below regarding "double poll" and
* invalid packets.
*/
- if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
- BT_DBG("Invalid/Ignore - txseq outside "
- "tx window after SREJ sent");
- return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - after SREJ");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
} else {
- BT_DBG("Invalid - bad txseq within tx "
- "window after SREJ sent");
- return L2CAP_ERTM_TXSEQ_INVALID;
+ BT_DBG("Invalid - in window after SREJ sent");
+ return L2CAP_TXSEQ_INVALID;
}
}
- if (pi->srej_list.head == txseq) {
+ if (chan->srej_list.head == txseq) {
BT_DBG("Expected SREJ");
- return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
+ return L2CAP_TXSEQ_EXPECTED_SREJ;
}
- if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
+ if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
BT_DBG("Duplicate SREJ - txseq already stored");
- return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
+ return L2CAP_TXSEQ_DUPLICATE_SREJ;
}
- if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
- BT_DBG("Unexpected SREJ - txseq not requested "
- "with SREJ");
- return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
+ if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - not requested");
+ return L2CAP_TXSEQ_UNEXPECTED_SREJ;
}
}
- if (pi->expected_tx_seq == txseq) {
- if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ if (chan->expected_tx_seq == txseq) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
BT_DBG("Invalid - txseq outside tx window");
- return L2CAP_ERTM_TXSEQ_INVALID;
+ return L2CAP_TXSEQ_INVALID;
} else {
BT_DBG("Expected");
- return L2CAP_ERTM_TXSEQ_EXPECTED;
+ return L2CAP_TXSEQ_EXPECTED;
}
}
- if (__delta_seq(txseq, pi->last_acked_seq, pi) <
- __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) <
+ __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
BT_DBG("Duplicate - expected_tx_seq later than txseq");
- return L2CAP_ERTM_TXSEQ_DUPLICATE;
+ return L2CAP_TXSEQ_DUPLICATE;
}
- if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
/* A source of invalid packets is a "double poll" condition,
* where delays cause us to send multiple poll packets. If
* the remote stack receives and processes both polls,
@@ -6445,74 +5719,73 @@
* causes a disconnect.
*/
- if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
BT_DBG("Invalid/Ignore - txseq outside tx window");
- return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
+ return L2CAP_TXSEQ_INVALID_IGNORE;
} else {
BT_DBG("Invalid - txseq outside tx window");
- return L2CAP_ERTM_TXSEQ_INVALID;
+ return L2CAP_TXSEQ_INVALID;
}
} else {
BT_DBG("Unexpected - txseq indicates missing frames");
- return L2CAP_ERTM_TXSEQ_UNEXPECTED;
+ return L2CAP_TXSEQ_UNEXPECTED;
}
}
-static int l2cap_ertm_rx_state_recv(struct sock *sk,
- struct bt_l2cap_control *control,
- struct sk_buff *skb, u8 event)
+static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
{
- struct l2cap_pinfo *pi;
int err = 0;
bool skb_in_use = 0;
- BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
- (int)event);
- pi = l2cap_pi(sk);
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
switch (event) {
- case L2CAP_ERTM_EVENT_RECV_IFRAME:
- switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
- case L2CAP_ERTM_TXSEQ_EXPECTED:
- l2cap_ertm_pass_to_tx(sk, control);
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, control->txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ l2cap_pass_to_tx(chan, control);
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
BT_DBG("Busy, discarding expected seq %d",
- control->txseq);
+ control->txseq);
break;
}
- pi->expected_tx_seq = __next_seq(control->txseq, pi);
- pi->buffer_seq = pi->expected_tx_seq;
+ chan->expected_tx_seq = __next_seq(chan,
+ control->txseq);
+
+ chan->buffer_seq = chan->expected_tx_seq;
skb_in_use = 1;
- err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ err = l2cap_reassemble_sdu(chan, skb, control);
if (err)
break;
if (control->final) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
control->final = 0;
- l2cap_ertm_retransmit_all(sk, control);
- l2cap_ertm_send(sk);
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
}
}
- if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
- l2cap_ertm_send_ack(sk);
+ if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ l2cap_send_ack(chan);
break;
- case L2CAP_ERTM_TXSEQ_UNEXPECTED:
- l2cap_ertm_pass_to_tx(sk, control);
+ case L2CAP_TXSEQ_UNEXPECTED:
+ l2cap_pass_to_tx(chan, control);
/* Can't issue SREJ frames in the local busy state.
* Drop this frame, it will be seen as missing
* when local busy is exited.
*/
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
BT_DBG("Busy, discarding unexpected seq %d",
- control->txseq);
+ control->txseq);
break;
}
@@ -6520,69 +5793,66 @@
* must be sent for each missing frame. The
* current frame is stored for later use.
*/
- skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_queue_tail(&chan->srej_q, skb);
skb_in_use = 1;
BT_DBG("Queued %p (queue len %d)", skb,
- skb_queue_len(SREJ_QUEUE(sk)));
+ skb_queue_len(&chan->srej_q));
- pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
- l2cap_seq_list_clear(&pi->srej_list);
- l2cap_ertm_send_srej(sk, control->txseq);
+ clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
+ l2cap_send_srej(chan, control->txseq);
- pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
+ chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
break;
- case L2CAP_ERTM_TXSEQ_DUPLICATE:
- l2cap_ertm_pass_to_tx(sk, control);
+ case L2CAP_TXSEQ_DUPLICATE:
+ l2cap_pass_to_tx(chan, control);
break;
- case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
+ case L2CAP_TXSEQ_INVALID_IGNORE:
break;
- case L2CAP_ERTM_TXSEQ_INVALID:
+ case L2CAP_TXSEQ_INVALID:
default:
- l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
- ECONNRESET);
+ l2cap_send_disconn_req(chan, ECONNRESET);
break;
}
break;
- case L2CAP_ERTM_EVENT_RECV_RR:
- l2cap_ertm_pass_to_tx(sk, control);
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
if (control->final) {
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
- pi->amp_move_state ==
- L2CAP_AMP_STATE_WAIT_PREPARE) {
+ if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
+ !__chan_is_moving(chan)) {
control->final = 0;
- l2cap_ertm_retransmit_all(sk, control);
+ l2cap_retransmit_all(chan, control);
}
- l2cap_ertm_send(sk);
+ l2cap_ertm_send(chan);
} else if (control->poll) {
- l2cap_ertm_send_i_or_rr_or_rnr(sk);
+ l2cap_send_i_or_rr_or_rnr(chan);
} else {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- pi->unacked_frames)
- l2cap_ertm_start_retrans_timer(pi);
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- l2cap_ertm_send(sk);
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
+ __set_retrans_timer(chan);
+
+ l2cap_ertm_send(chan);
}
break;
- case L2CAP_ERTM_EVENT_RECV_RNR:
- pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
- l2cap_ertm_pass_to_tx(sk, control);
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
if (control && control->poll) {
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
- l2cap_ertm_send_rr_or_rnr(sk, 0);
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_rr_or_rnr(chan, 0);
}
- l2cap_ertm_stop_retrans_timer(pi);
- l2cap_seq_list_clear(&pi->retrans_list);
+ __clear_retrans_timer(chan);
+ l2cap_seq_list_clear(&chan->retrans_list);
break;
- case L2CAP_ERTM_EVENT_RECV_REJ:
- l2cap_ertm_handle_rej(sk, control);
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
break;
- case L2CAP_ERTM_EVENT_RECV_SREJ:
- l2cap_ertm_handle_srej(sk, control);
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
break;
default:
break;
@@ -6596,141 +5866,138 @@
return err;
}
-static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
- struct bt_l2cap_control *control,
- struct sk_buff *skb, u8 event)
+static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
{
- struct l2cap_pinfo *pi;
int err = 0;
u16 txseq = control->txseq;
bool skb_in_use = 0;
- BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
- (int)event);
- pi = l2cap_pi(sk);
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
switch (event) {
- case L2CAP_ERTM_EVENT_RECV_IFRAME:
- switch (l2cap_ertm_classify_txseq(sk, txseq)) {
- case L2CAP_ERTM_TXSEQ_EXPECTED:
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
/* Keep frame for reassembly later */
- l2cap_ertm_pass_to_tx(sk, control);
- skb_queue_tail(SREJ_QUEUE(sk), skb);
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
skb_in_use = 1;
BT_DBG("Queued %p (queue len %d)", skb,
- skb_queue_len(SREJ_QUEUE(sk)));
+ skb_queue_len(&chan->srej_q));
- pi->expected_tx_seq = __next_seq(txseq, pi);
+ chan->expected_tx_seq = __next_seq(chan, txseq);
break;
- case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
- l2cap_seq_list_pop(&pi->srej_list);
+ case L2CAP_TXSEQ_EXPECTED_SREJ:
+ l2cap_seq_list_pop(&chan->srej_list);
- l2cap_ertm_pass_to_tx(sk, control);
- skb_queue_tail(SREJ_QUEUE(sk), skb);
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
skb_in_use = 1;
BT_DBG("Queued %p (queue len %d)", skb,
- skb_queue_len(SREJ_QUEUE(sk)));
+ skb_queue_len(&chan->srej_q));
- err = l2cap_ertm_rx_queued_iframes(sk);
+ err = l2cap_rx_queued_iframes(chan);
if (err)
break;
break;
- case L2CAP_ERTM_TXSEQ_UNEXPECTED:
+ case L2CAP_TXSEQ_UNEXPECTED:
/* Got a frame that can't be reassembled yet.
* Save it for later, and send SREJs to cover
* the missing frames.
*/
- skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_queue_tail(&chan->srej_q, skb);
skb_in_use = 1;
BT_DBG("Queued %p (queue len %d)", skb,
- skb_queue_len(SREJ_QUEUE(sk)));
+ skb_queue_len(&chan->srej_q));
- l2cap_ertm_pass_to_tx(sk, control);
- l2cap_ertm_send_srej(sk, control->txseq);
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej(chan, control->txseq);
break;
- case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
+ case L2CAP_TXSEQ_UNEXPECTED_SREJ:
/* This frame was requested with an SREJ, but
* some expected retransmitted frames are
* missing. Request retransmission of missing
* SREJ'd frames.
*/
- skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_queue_tail(&chan->srej_q, skb);
skb_in_use = 1;
BT_DBG("Queued %p (queue len %d)", skb,
- skb_queue_len(SREJ_QUEUE(sk)));
+ skb_queue_len(&chan->srej_q));
- l2cap_ertm_pass_to_tx(sk, control);
- l2cap_ertm_send_srej_list(sk, control->txseq);
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej_list(chan, control->txseq);
break;
- case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
+ case L2CAP_TXSEQ_DUPLICATE_SREJ:
/* We've already queued this frame. Drop this copy. */
- l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_pass_to_tx(chan, control);
break;
- case L2CAP_ERTM_TXSEQ_DUPLICATE:
+ case L2CAP_TXSEQ_DUPLICATE:
/* Expecting a later sequence number, so this frame
* was already received. Ignore it completely.
*/
break;
- case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
+ case L2CAP_TXSEQ_INVALID_IGNORE:
break;
- case L2CAP_ERTM_TXSEQ_INVALID:
+ case L2CAP_TXSEQ_INVALID:
default:
- l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
- ECONNRESET);
+ l2cap_send_disconn_req(chan, ECONNRESET);
break;
}
break;
- case L2CAP_ERTM_EVENT_RECV_RR:
- l2cap_ertm_pass_to_tx(sk, control);
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
if (control->final) {
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
control->final = 0;
- l2cap_ertm_retransmit_all(sk, control);
+ l2cap_retransmit_all(chan, control);
}
- l2cap_ertm_send(sk);
+ l2cap_ertm_send(chan);
} else if (control->poll) {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- pi->unacked_frames) {
- l2cap_ertm_start_retrans_timer(pi);
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames) {
+ __set_retrans_timer(chan);
}
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
- l2cap_ertm_send_srej_tail(sk);
+
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_srej_tail(chan);
} else {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- pi->unacked_frames) {
- l2cap_ertm_start_retrans_timer(pi);
- }
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- l2cap_ertm_send_ack(sk);
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
+ __set_retrans_timer(chan);
+
+ l2cap_send_ack(chan);
}
break;
- case L2CAP_ERTM_EVENT_RECV_RNR:
- pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
- l2cap_ertm_pass_to_tx(sk, control);
- if (control->poll)
- l2cap_ertm_send_srej_tail(sk);
- else {
- struct bt_l2cap_control rr_control;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control->poll) {
+ l2cap_send_srej_tail(chan);
+ } else {
+ struct l2cap_ctrl rr_control;
memset(&rr_control, 0, sizeof(rr_control));
- rr_control.frame_type = 's';
- rr_control.super = L2CAP_SFRAME_RR;
- rr_control.reqseq = pi->buffer_seq;
- l2cap_ertm_send_sframe(sk, &rr_control);
+ rr_control.sframe = 1;
+ rr_control.super = L2CAP_SUPER_RR;
+ rr_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &rr_control);
}
break;
- case L2CAP_ERTM_EVENT_RECV_REJ:
- l2cap_ertm_handle_rej(sk, control);
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
break;
- case L2CAP_ERTM_EVENT_RECV_SREJ:
- l2cap_ertm_handle_srej(sk, control);
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
break;
}
@@ -6742,340 +6009,127 @@
return err;
}
-static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
- struct bt_l2cap_control *control,
- struct sk_buff *skb, u8 event)
+static int l2cap_finish_move(struct l2cap_chan *chan)
{
- struct l2cap_pinfo *pi;
- int err = 0;
- bool skb_in_use = 0;
+ BT_DBG("chan %p", chan);
- BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
- (int)event);
- pi = l2cap_pi(sk);
+ chan->rx_state = L2CAP_RX_STATE_RECV;
- /* Only handle expected frames, to avoid state changes. */
-
- switch (event) {
- case L2CAP_ERTM_EVENT_RECV_IFRAME:
- if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
- L2CAP_ERTM_TXSEQ_EXPECTED) {
- l2cap_ertm_pass_to_tx(sk, control);
-
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- BT_DBG("Busy, discarding expected seq %d",
- control->txseq);
- break;
- }
-
- pi->expected_tx_seq = __next_seq(control->txseq, pi);
- pi->buffer_seq = pi->expected_tx_seq;
- skb_in_use = 1;
-
- err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
- if (err)
- break;
-
- if (control->final) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else
- control->final = 0;
- }
- }
- break;
- case L2CAP_ERTM_EVENT_RECV_RR:
- case L2CAP_ERTM_EVENT_RECV_RNR:
- case L2CAP_ERTM_EVENT_RECV_REJ:
- l2cap_ertm_process_reqseq(sk, control->reqseq);
- break;
- case L2CAP_ERTM_EVENT_RECV_SREJ:
- /* Ignore */
- break;
- default:
- break;
- }
-
- if (skb && !skb_in_use) {
- BT_DBG("Freeing %p", skb);
- kfree_skb(skb);
- }
-
- return err;
-}
-
-static int l2cap_answer_move_poll(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
- struct bt_l2cap_control control;
- int err = 0;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
-
- if (!skb_queue_empty(TX_QUEUE(sk)))
- sk->sk_send_head = skb_peek(TX_QUEUE(sk));
+ if (chan->hs_hcon)
+ chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
else
- sk->sk_send_head = NULL;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+
+ return l2cap_resegment(chan);
+}
+
+static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
+
+ if (!control->poll)
+ return -EPROTO;
+
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ if (!skb_queue_empty(&chan->tx_q))
+ chan->tx_send_head = skb_peek(&chan->tx_q);
+ else
+ chan->tx_send_head = NULL;
/* Rewind next_tx_seq to the point expected
* by the receiver.
*/
- pi->next_tx_seq = pi->amp_move_reqseq;
- pi->unacked_frames = 0;
+ chan->next_tx_seq = control->reqseq;
+ chan->unacked_frames = 0;
- err = l2cap_finish_amp_move(sk);
-
+ err = l2cap_finish_move(chan);
if (err)
return err;
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
- l2cap_ertm_send_i_or_rr_or_rnr(sk);
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_i_or_rr_or_rnr(chan);
- memset(&control, 0, sizeof(control));
- control.reqseq = pi->amp_move_reqseq;
+ if (event == L2CAP_EV_RECV_IFRAME)
+ return -EPROTO;
- if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
- err = -EPROTO;
+ return l2cap_rx_state_recv(chan, control, NULL, event);
+}
+
+static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err;
+
+ if (!control->final)
+ return -EPROTO;
+
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ if (!skb_queue_empty(&chan->tx_q))
+ chan->tx_send_head = skb_peek(&chan->tx_q);
else
- err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
- pi->amp_move_event);
+ chan->tx_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ chan->next_tx_seq = control->reqseq;
+ chan->unacked_frames = 0;
+
+ if (chan->hs_hcon)
+ chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+ else
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+
+ err = l2cap_resegment(chan);
+
+ if (!err)
+ err = l2cap_rx_state_recv(chan, control, skb, event);
return err;
}
-static void l2cap_amp_move_setup(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
- struct sk_buff *skb;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- l2cap_ertm_stop_ack_timer(pi);
- l2cap_ertm_stop_retrans_timer(pi);
- l2cap_ertm_stop_monitor_timer(pi);
-
- pi->retry_count = 0;
- skb_queue_walk(TX_QUEUE(sk), skb) {
- if (bt_cb(skb)->retries)
- bt_cb(skb)->retries = 1;
- else
- break;
- }
-
- pi->expected_tx_seq = pi->buffer_seq;
-
- pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
- l2cap_seq_list_clear(&pi->retrans_list);
- l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
- skb_queue_purge(SREJ_QUEUE(sk));
-
- pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
- pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
-
- BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
- pi->rx_state);
-
- pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
-}
-
-static void l2cap_amp_move_revert(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
- l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
- pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
- } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
- pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
-}
-
-static int l2cap_amp_move_reconf(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
- u8 buf[64];
- int err = 0;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
- l2cap_build_amp_reconf_req(sk, buf), buf);
- return err;
-}
-
-static void l2cap_amp_move_success(struct sock *sk)
-{
- struct l2cap_pinfo *pi;
-
- BT_DBG("sk %p", sk);
-
- pi = l2cap_pi(sk);
-
- if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
- int err = 0;
- /* Send reconfigure request */
- if (pi->mode == L2CAP_MODE_ERTM) {
- pi->reconf_state = L2CAP_RECONF_INT;
- if (enable_reconfig)
- err = l2cap_amp_move_reconf(sk);
-
- if (err || !enable_reconfig) {
- pi->reconf_state = L2CAP_RECONF_NONE;
- l2cap_ertm_tx(sk, NULL, NULL,
- L2CAP_ERTM_EVENT_EXPLICIT_POLL);
- pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
- }
- } else
- pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
- } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
- if (pi->mode == L2CAP_MODE_ERTM)
- pi->rx_state =
- L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
- else
- pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
- }
-}
-
-static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
+static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
/* Make sure reqseq is for a packet that has been sent but not acked */
- u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
- return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
+ u16 unacked;
+
+ unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
+ return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
}
-static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
- struct sk_buff *skb)
+static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
{
- struct l2cap_pinfo *pi;
int err = 0;
- BT_DBG("sk %p, control %p, skb %p, state %d",
- sk, control, skb, l2cap_pi(sk)->rx_state);
+ BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
+ control, skb, event, chan->rx_state);
- pi = l2cap_pi(sk);
-
- if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
- L2CAP_ERTM_TXSEQ_EXPECTED) {
- l2cap_ertm_pass_to_tx(sk, control);
-
- BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
- __next_seq(pi->buffer_seq, pi));
-
- pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
-
- l2cap_ertm_rx_expected_iframe(sk, control, skb);
- } else {
- if (pi->sdu) {
- kfree_skb(pi->sdu);
- pi->sdu = NULL;
- }
- pi->sdu_last_frag = NULL;
- pi->sdu_len = 0;
-
- if (skb) {
- BT_DBG("Freeing %p", skb);
- kfree_skb(skb);
- }
- }
-
- pi->last_acked_seq = control->txseq;
- pi->expected_tx_seq = __next_seq(control->txseq, pi);
-
- return err;
-}
-
-static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
- struct sk_buff *skb, u8 event)
-{
- struct l2cap_pinfo *pi;
- int err = 0;
-
- BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
- sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
-
- pi = l2cap_pi(sk);
-
- if (__valid_reqseq(pi, control->reqseq)) {
- switch (pi->rx_state) {
- case L2CAP_ERTM_RX_STATE_RECV:
- err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
+ if (__valid_reqseq(chan, control->reqseq)) {
+ switch (chan->rx_state) {
+ case L2CAP_RX_STATE_RECV:
+ err = l2cap_rx_state_recv(chan, control, skb, event);
break;
- case L2CAP_ERTM_RX_STATE_SREJ_SENT:
- err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
- event);
+ case L2CAP_RX_STATE_SREJ_SENT:
+ err = l2cap_rx_state_srej_sent(chan, control, skb,
+ event);
break;
- case L2CAP_ERTM_RX_STATE_AMP_MOVE:
- err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
- event);
+ case L2CAP_RX_STATE_WAIT_P:
+ err = l2cap_rx_state_wait_p(chan, control, skb, event);
break;
- case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
- if (control->final) {
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
-
- pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
- l2cap_ertm_process_reqseq(sk, control->reqseq);
-
- if (!skb_queue_empty(TX_QUEUE(sk)))
- sk->sk_send_head =
- skb_peek(TX_QUEUE(sk));
- else
- sk->sk_send_head = NULL;
-
- /* Rewind next_tx_seq to the point expected
- * by the receiver.
- */
- pi->next_tx_seq = control->reqseq;
- pi->unacked_frames = 0;
-
- if (pi->ampcon)
- pi->conn->mtu =
- pi->ampcon->hdev->acl_mtu;
- else
- pi->conn->mtu =
- pi->conn->hcon->hdev->acl_mtu;
-
- err = l2cap_setup_resegment(sk);
-
- if (err)
- break;
-
- err = l2cap_ertm_rx_state_recv(sk, control, skb,
- event);
- }
- break;
- case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
- if (control->poll) {
- pi->amp_move_reqseq = control->reqseq;
- pi->amp_move_event = event;
- err = l2cap_answer_move_poll(sk);
- }
- break;
- case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
- if (control->poll) {
- pi->amp_move_reqseq = control->reqseq;
- pi->amp_move_event = event;
-
- BT_DBG("amp_move_role 0x%2.2x, "
- "reconf_state 0x%2.2x",
- pi->amp_move_role, pi->reconf_state);
-
- if (pi->reconf_state == L2CAP_RECONF_ACC)
- err = l2cap_amp_move_reconf(sk);
- else
- err = l2cap_answer_move_poll(sk);
- }
+ case L2CAP_RX_STATE_WAIT_F:
+ err = l2cap_rx_state_wait_f(chan, control, skb, event);
break;
default:
/* shut it down */
@@ -7083,163 +6137,189 @@
}
} else {
BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
- control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ control->reqseq, chan->next_tx_seq,
+ chan->expected_ack_seq);
+ l2cap_send_disconn_req(chan, ECONNRESET);
}
return err;
}
-void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
+static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb)
{
- lock_sock(sk);
+ int err = 0;
- l2cap_pi(sk)->fixed_channel = 1;
+ BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ chan->rx_state);
- l2cap_pi(sk)->imtu = opt->imtu;
- l2cap_pi(sk)->omtu = opt->omtu;
- l2cap_pi(sk)->remote_mps = opt->omtu;
- l2cap_pi(sk)->mps = opt->omtu;
- l2cap_pi(sk)->flush_to = opt->flush_to;
- l2cap_pi(sk)->mode = opt->mode;
- l2cap_pi(sk)->fcs = opt->fcs;
- l2cap_pi(sk)->max_tx = opt->max_tx;
- l2cap_pi(sk)->remote_max_tx = opt->max_tx;
- l2cap_pi(sk)->tx_win = opt->txwin_size;
- l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
- l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+ if (l2cap_classify_txseq(chan, control->txseq) ==
+ L2CAP_TXSEQ_EXPECTED) {
+ l2cap_pass_to_tx(chan, control);
- if (opt->mode == L2CAP_MODE_ERTM ||
- l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
- l2cap_ertm_init(sk);
+ BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
+ __next_seq(chan, chan->buffer_seq));
- release_sock(sk);
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
- return;
+ l2cap_reassemble_sdu(chan, skb, control);
+ } else {
+ if (chan->sdu) {
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ }
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+ }
+
+ chan->last_acked_seq = control->txseq;
+ chan->expected_tx_seq = __next_seq(chan, control->txseq);
+
+ return err;
}
-static const u8 l2cap_ertm_rx_func_to_event[4] = {
- L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
- L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
-};
-
-int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
+static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
- struct l2cap_pinfo *pi;
- struct bt_l2cap_control *control;
+ struct l2cap_ctrl *control = &bt_cb(skb)->control;
u16 len;
u8 event;
- pi = l2cap_pi(sk);
- BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
+ __unpack_control(chan, skb);
- if (sk->sk_state != BT_CONNECTED)
+ len = skb->len;
+
+ /*
+ * We can just drop the corrupted I-frame here.
+ * Receiver will miss it and start proper recovery
+ * procedures and ask for retransmission.
+ */
+ if (l2cap_check_fcs(chan, skb))
goto drop;
- switch (pi->mode) {
+ if (!control->sframe && control->sar == L2CAP_SAR_START)
+ len -= L2CAP_SDULEN_SIZE;
+
+ if (chan->fcs == L2CAP_FCS_CRC16)
+ len -= L2CAP_FCS_SIZE;
+
+ if (len > chan->mps) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto drop;
+ }
+
+ if (!control->sframe) {
+ int err;
+
+ BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
+ control->sar, control->reqseq, control->final,
+ control->txseq);
+
+ /* Validate F-bit - F=0 always valid, F=1 only
+ * valid in TX WAIT_F
+ */
+ if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
+ goto drop;
+
+ if (chan->mode != L2CAP_MODE_STREAMING) {
+ event = L2CAP_EV_RECV_IFRAME;
+ err = l2cap_rx(chan, control, skb, event);
+ } else {
+ err = l2cap_stream_rx(chan, control, skb);
+ }
+
+ if (err)
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ } else {
+ const u8 rx_func_to_event[4] = {
+ L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
+ };
+
+ /* Only I-frames are expected in streaming mode */
+ if (chan->mode == L2CAP_MODE_STREAMING)
+ goto drop;
+
+ BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
+ control->reqseq, control->final, control->poll,
+ control->super);
+
+ if (len != 0) {
+ BT_ERR("Trailing bytes: %d in sframe", len);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto drop;
+ }
+
+ /* Validate F and P bits */
+ if (control->final && (control->poll ||
+ chan->tx_state != L2CAP_TX_STATE_WAIT_F))
+ goto drop;
+
+ event = rx_func_to_event[control->super];
+ if (l2cap_rx(chan, control, skb, event))
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ }
+
+ return 0;
+
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
+{
+ struct l2cap_chan *chan;
+
+ chan = l2cap_get_chan_by_scid(conn, cid);
+ if (!chan) {
+ if (cid == L2CAP_CID_A2MP) {
+ chan = a2mp_channel_create(conn, skb);
+ if (!chan) {
+ kfree_skb(skb);
+ return;
+ }
+
+ l2cap_chan_lock(chan);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
+ }
+ }
+
+ BT_DBG("chan %p, len %d", chan, skb->len);
+
+ if (chan->state != BT_CONNECTED)
+ goto drop;
+
+ switch (chan->mode) {
case L2CAP_MODE_BASIC:
/* If socket recv buffers overflows we drop data here
* which is *bad* because L2CAP has to be reliable.
* But we don't have any other choice. L2CAP doesn't
* provide flow control mechanism. */
- if (pi->imtu < skb->len)
+ if (chan->imtu < skb->len)
goto drop;
- if (!sock_queue_rcv_skb(sk, skb))
+ if (!chan->ops->recv(chan, skb))
goto done;
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
- control = &bt_cb(skb)->control;
- if (pi->extended_control) {
- __get_extended_control(get_unaligned_le32(skb->data),
- control);
- skb_pull(skb, 4);
- } else {
- __get_enhanced_control(get_unaligned_le16(skb->data),
- control);
- skb_pull(skb, 2);
- }
-
- len = skb->len;
-
- if (l2cap_check_fcs(pi, skb))
- goto drop;
-
- if ((control->frame_type == 'i') &&
- (control->sar == L2CAP_SAR_START))
- len -= 2;
-
- if (pi->fcs == L2CAP_FCS_CRC16)
- len -= 2;
-
- /*
- * We can just drop the corrupted I-frame here.
- * Receiver will miss it and start proper recovery
- * procedures and ask for retransmission.
- */
- if (len > pi->mps) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- goto drop;
- }
-
- if (control->frame_type == 'i') {
-
- int err;
-
- BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
- control->sar, control->reqseq, control->final,
- control->txseq);
-
- /* Validate F-bit - F=0 always valid, F=1 only
- * valid in TX WAIT_F
- */
- if (control->final && (pi->tx_state !=
- L2CAP_ERTM_TX_STATE_WAIT_F))
- goto drop;
-
- if (pi->mode != L2CAP_MODE_STREAMING) {
- event = L2CAP_ERTM_EVENT_RECV_IFRAME;
- err = l2cap_ertm_rx(sk, control, skb, event);
- } else
- err = l2cap_strm_rx(sk, control, skb);
- if (err)
- l2cap_send_disconn_req(pi->conn, sk,
- ECONNRESET);
- } else {
- /* Only I-frames are expected in streaming mode */
- if (pi->mode == L2CAP_MODE_STREAMING)
- goto drop;
-
- BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
- control->reqseq, control->final, control->poll,
- control->super);
-
- if (len != 0) {
- l2cap_send_disconn_req(pi->conn, sk,
- ECONNRESET);
- goto drop;
- }
-
- /* Validate F and P bits */
- if (control->final &&
- ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
- || control->poll))
- goto drop;
-
- event = l2cap_ertm_rx_func_to_event[control->super];
- if (l2cap_ertm_rx(sk, control, skb, event))
- l2cap_send_disconn_req(pi->conn, sk,
- ECONNRESET);
- }
-
+ l2cap_data_rcv(chan, skb);
goto done;
default:
- BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
+ BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
break;
}
@@ -7247,161 +6327,63 @@
kfree_skb(skb);
done:
- return 0;
+ l2cap_chan_unlock(chan);
}
-void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
+static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ struct sk_buff *skb)
{
- lock_sock(sk);
- l2cap_data_channel(sk, skb);
- release_sock(sk);
-}
+ struct l2cap_chan *chan;
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
-{
- struct sock *sk;
-
- sk = l2cap_get_sock_by_psm(0, psm, conn->src);
- if (!sk)
+ chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
+ if (!chan)
goto drop;
- bh_lock_sock(sk);
+ BT_DBG("chan %p, len %d", chan, skb->len);
- BT_DBG("sk %p, len %d", sk, skb->len);
-
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
+ if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
- if (l2cap_pi(sk)->imtu < skb->len)
+ if (chan->imtu < skb->len)
goto drop;
- if (!sock_queue_rcv_skb(sk, skb))
- goto done;
+ if (!chan->ops->recv(chan, skb))
+ return;
drop:
kfree_skb(skb);
-
-done:
- if (sk)
- bh_unlock_sock(sk);
- return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid,
- struct sk_buff *skb)
+static void l2cap_att_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
{
- struct sock *sk = NULL;
- struct sk_buff *skb_rsp;
- struct l2cap_hdr *lh;
- int dir;
- struct work_struct *open_worker;
- u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
- L2CAP_ATT_NOT_SUPPORTED};
+ struct l2cap_chan *chan;
- if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
- u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
-
- skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
- GFP_ATOMIC);
- if (!skb_rsp)
- goto drop;
-
- lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(mtu_rsp));
- lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
- memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
- sizeof(mtu_rsp));
- hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
-
- goto free_skb;
- }
-
- dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1;
-
- sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src,
- conn->dst, dir);
-
- BT_DBG("sk %p, dir:%d", sk, dir);
-
- if (!sk)
+ chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA,
+ conn->src, conn->dst);
+ if (!chan)
goto drop;
- bh_lock_sock(sk);
+ BT_DBG("chan %p, len %d", chan, skb->len);
- BT_DBG("sk %p, len %d", sk, skb->len);
-
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) {
- att_chn_params.cid = cid;
- att_chn_params.conn = conn;
- att_chn_params.dir = dir;
- att_chn_params.skb = skb;
- open_worker = kzalloc(sizeof(*open_worker), GFP_ATOMIC);
- if (!open_worker)
- BT_ERR("Out of memory");
- INIT_WORK(open_worker, l2cap_queue_acl_data);
- schedule_work(open_worker);
- goto done;
- }
-
- if (l2cap_pi(sk)->imtu < skb->len)
+ if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
- if (!sock_queue_rcv_skb(sk, skb))
- goto done;
+ if (chan->imtu < skb->len)
+ goto drop;
+
+ if (!chan->ops->recv(chan, skb))
+ return;
drop:
- if (skb->data[0] != L2CAP_ATT_INDICATE)
- goto not_indicate;
-
- /* If this is an incoming Indication, we are required to confirm */
-
- skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
- if (!skb_rsp)
- goto free_skb;
-
- lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(u8));
- lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
- err_rsp[0] = L2CAP_ATT_CONFIRM;
- memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
- hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
- goto free_skb;
-
-not_indicate:
- if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
- skb->data[0] == L2CAP_ATT_CONFIRM)
- goto free_skb;
-
- /* If this is an incoming PDU that requires a response, respond with
- * a generic error so remote device doesn't hang */
-
- skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
- if (!skb_rsp)
- goto free_skb;
-
- lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(err_rsp));
- lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
- err_rsp[1] = skb->data[0];
- memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
- hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
-
-free_skb:
kfree_skb(skb);
-
-done:
- if (sk)
- bh_unlock_sock(sk);
- return 0;
}
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_hdr *lh = (void *) skb->data;
- struct sock *sk;
u16 cid, len;
__le16 psm;
- struct work_struct *smp_worker;
skb_pull(skb, L2CAP_HDR_SIZE);
cid = __le16_to_cpu(lh->cid);
@@ -7421,300 +6403,246 @@
break;
case L2CAP_CID_CONN_LESS:
- psm = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ psm = get_unaligned((__le16 *) skb->data);
+ skb_pull(skb, L2CAP_PSMLEN_SIZE);
l2cap_conless_channel(conn, psm, skb);
break;
case L2CAP_CID_LE_DATA:
- l2cap_att_channel(conn, cid, skb);
+ l2cap_att_channel(conn, skb);
break;
case L2CAP_CID_SMP:
- BT_DBG("get socket state");
- sk = l2cap_find_sock_by_fixed_cid_and_dir(
- L2CAP_CID_LE_DATA, conn->src, conn->dst, 1);
- if (sk) {
- BT_DBG("socket exists sk %p", sk);
- bh_lock_sock(sk);
-
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) {
- BT_DBG("socket state sk %p state %d", sk, sk->sk_state);
- smp_chn_params.cid = L2CAP_CID_LE_DATA;
- smp_chn_params.conn = conn;
- smp_chn_params.skb = skb;
- smp_worker = kzalloc(sizeof(*smp_worker), GFP_ATOMIC);
- if (!smp_worker) {
- BT_ERR("Out of memory smp_worker");
- } else {
- INIT_WORK(smp_worker, l2cap_queue_smp_data);
- BT_DBG("schedule smp_worker");
- schedule_work(smp_worker);
- }
-
- bh_unlock_sock(sk);
- goto done;
- } else {
- BT_DBG("Socket state is BT_BOUND and BT_CONNECTED ");
- bh_unlock_sock(sk);
- }
- }
-
if (smp_sig_channel(conn, skb))
- l2cap_conn_del(conn->hcon, EACCES, 0);
-
-done:
+ l2cap_conn_del(conn->hcon, EACCES);
break;
default:
- sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
- if (sk) {
- if (sock_owned_by_user(sk)) {
- BT_DBG("backlog sk %p", sk);
- if (sk_add_backlog(sk, skb))
- kfree_skb(skb);
- } else
- l2cap_data_channel(sk, skb);
-
- bh_unlock_sock(sk);
- } else if ((cid == L2CAP_CID_A2MP) && enable_hs) {
- BT_DBG("A2MP");
- amp_conn_ind(conn->hcon, skb);
- } else {
- BT_DBG("unknown cid 0x%4.4x", cid);
- kfree_skb(skb);
- }
-
+ l2cap_data_channel(conn, cid, skb);
break;
}
}
/* ---- L2CAP interface with lower layer (HCI) ---- */
-static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
int exact = 0, lm1 = 0, lm2 = 0;
- register struct sock *sk;
- struct hlist_node *node;
+ struct l2cap_chan *c;
- if (type != ACL_LINK)
- return 0;
-
- BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+ BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
/* Find listening sockets and check their link_mode */
- read_lock(&l2cap_sk_list.lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- if (sk->sk_state != BT_LISTEN)
+ read_lock(&chan_list_lock);
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
+
+ if (c->state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (l2cap_pi(sk)->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (l2cap_pi(sk)->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm2 |= HCI_LM_MASTER;
}
}
- read_unlock(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
return exact ? lm1 : lm2;
}
-static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn;
- BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
- if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
- return -EINVAL;
+ BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
- conn = l2cap_conn_add(hcon, status);
+ conn = l2cap_conn_add(hcon);
if (conn)
l2cap_conn_ready(conn);
- } else
- l2cap_conn_del(hcon, bt_err(status), 0);
-
- return 0;
+ } else {
+ l2cap_conn_del(hcon, bt_to_errno(status));
+ }
}
-static int l2cap_disconn_ind(struct hci_conn *hcon)
+int l2cap_disconn_ind(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
BT_DBG("hcon %p", hcon);
- if (hcon->type != ACL_LINK || !conn)
- return 0x13;
-
+ if (!conn)
+ return HCI_ERROR_REMOTE_USER_TERM;
return conn->disc_reason;
}
-static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason, u8 is_process)
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
- return -EINVAL;
-
- l2cap_conn_del(hcon, bt_err(reason), is_process);
-
- return 0;
+ l2cap_conn_del(hcon, bt_to_errno(reason));
}
-static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
+static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
{
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
return;
if (encrypt == 0x00) {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ * 5);
- } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH ||
- l2cap_pi(sk)->sec_level == BT_SECURITY_VERY_HIGH)
- __l2cap_sock_close(sk, ECONNREFUSED);
+ if (chan->sec_level == BT_SECURITY_MEDIUM) {
+ __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
+ } else if (chan->sec_level == BT_SECURITY_HIGH)
+ l2cap_chan_close(chan, ECONNREFUSED);
} else {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
- l2cap_sock_clear_timer(sk);
+ if (chan->sec_level == BT_SECURITY_MEDIUM)
+ __clear_chan_timer(chan);
}
}
-static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
{
- struct l2cap_chan_list *l;
struct l2cap_conn *conn = hcon->l2cap_data;
- struct sock *sk;
- int smp = 0;
+ struct l2cap_chan *chan;
if (!conn)
return 0;
- l = &conn->chan_list;
+ BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
- BT_DBG("conn %p", conn);
+ if (hcon->type == LE_LINK) {
+ if (!status && encrypt)
+ smp_distribute_keys(conn, 0);
+ cancel_delayed_work(&conn->security_timer);
+ }
- read_lock(&l->lock);
+ mutex_lock(&conn->chan_lock);
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ l2cap_chan_lock(chan);
- BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
+ BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+ state_to_string(chan->state));
- if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (chan->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
- l2cap_pi(sk)->sec_level = hcon->sec_level;
- l2cap_chan_ready(sk);
+ chan->sec_level = hcon->sec_level;
+ l2cap_chan_ready(chan);
}
- smp = 1;
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
continue;
}
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
- bh_unlock_sock(sk);
+ if (!__l2cap_no_conn_pending(chan)) {
+ l2cap_chan_unlock(chan);
continue;
}
- if (!status && (sk->sk_state == BT_CONNECTED ||
- sk->sk_state == BT_CONFIG)) {
- l2cap_check_encryption(sk, encrypt);
- bh_unlock_sock(sk);
+ if (!status && (chan->state == BT_CONNECTED ||
+ chan->state == BT_CONFIG)) {
+ struct sock *sk = chan->sk;
+
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ sk->sk_state_change(sk);
+
+ l2cap_check_encryption(chan, encrypt);
+ l2cap_chan_unlock(chan);
continue;
}
- if (sk->sk_state == BT_CONNECT) {
+ if (chan->state == BT_CONNECT) {
if (!status) {
- l2cap_pi(sk)->conf_state |=
- L2CAP_CONF_CONNECT_PEND;
- if ((l2cap_pi(sk)->amp_pref ==
- BT_AMP_POLICY_PREFER_AMP) &&
- enable_hs) {
- amp_create_physical(l2cap_pi(sk)->conn,
- sk);
- } else
- l2cap_send_conn_req(sk);
+ l2cap_start_connection(chan);
} else {
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 10);
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
- } else if (sk->sk_state == BT_CONNECT2) {
+ } else if (chan->state == BT_CONNECT2) {
+ struct sock *sk = chan->sk;
struct l2cap_conn_rsp rsp;
- __u16 result;
+ __u16 res, stat;
+
+ lock_sock(sk);
if (!status) {
- if (l2cap_pi(sk)->amp_id) {
- amp_accept_physical(conn,
- l2cap_pi(sk)->amp_id, sk);
- bh_unlock_sock(sk);
- continue;
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
+ res = L2CAP_CR_PEND;
+ stat = L2CAP_CS_AUTHOR_PEND;
+ chan->ops->defer(chan);
+ } else {
+ __l2cap_state_change(chan, BT_CONFIG);
+ res = L2CAP_CR_SUCCESS;
+ stat = L2CAP_CS_NO_INFO;
}
-
- sk->sk_state = BT_CONFIG;
- result = L2CAP_CR_SUCCESS;
} else {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_set_timer(sk, HZ / 10);
- result = L2CAP_CR_SEC_BLOCK;
+ __l2cap_state_change(chan, BT_DISCONN);
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
+ res = L2CAP_CR_SEC_BLOCK;
+ stat = L2CAP_CS_NO_INFO;
}
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ release_sock(sk);
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
- result == L2CAP_CR_SUCCESS) {
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = cpu_to_le16(res);
+ rsp.status = cpu_to_le16(stat);
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ res == L2CAP_CR_SUCCESS) {
char buf[128];
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf),
+ l2cap_build_conf_req(chan, buf),
buf);
- l2cap_pi(sk)->num_conf_req++;
+ chan->num_conf_req++;
}
}
- bh_unlock_sock(sk);
+ l2cap_chan_unlock(chan);
}
- read_unlock(&l->lock);
-
- if (smp) {
- del_timer(&hcon->smp_timer);
- smp_link_encrypt_cmplt(conn, status, encrypt);
- }
+ mutex_unlock(&conn->chan_lock);
return 0;
}
-static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_hdr *hdr;
+ int len;
+ /* For AMP controller do not create l2cap conn */
if (!conn && hcon->hdev->dev_type != HCI_BREDR)
goto drop;
if (!conn)
- conn = l2cap_conn_add(hcon, 0);
+ conn = l2cap_conn_add(hcon);
if (!conn)
goto drop;
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
- if (flags & ACL_START) {
- struct l2cap_hdr *hdr;
- int len;
-
+ switch (flags) {
+ case ACL_START:
+ case ACL_START_NO_FLUSH:
+ case ACL_COMPLETE:
if (conn->rx_len) {
BT_ERR("Unexpected start frame (len %d)", skb->len);
kfree_skb(conn->rx_skb);
@@ -7739,32 +6667,26 @@
return 0;
}
- if (flags & ACL_CONT) {
- BT_ERR("Complete frame is incomplete "
- "(len %d, expected len %d)",
- skb->len, len);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
-
BT_DBG("Start: total len %d, frag len %d", len, skb->len);
if (skb->len > len) {
BT_ERR("Frame is too long (len %d, expected len %d)",
- skb->len, len);
+ skb->len, len);
l2cap_conn_unreliable(conn, ECOMM);
goto drop;
}
/* Allocate skb for the complete frame (with header) */
- conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
+ conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
if (!conn->rx_skb)
goto drop;
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
- skb->len);
+ skb->len);
conn->rx_len = len - skb->len;
- } else {
+ break;
+
+ case ACL_CONT:
BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
if (!conn->rx_len) {
@@ -7775,7 +6697,7 @@
if (skb->len > conn->rx_len) {
BT_ERR("Fragment is too long (len %d, expected %d)",
- skb->len, conn->rx_len);
+ skb->len, conn->rx_len);
kfree_skb(conn->rx_skb);
conn->rx_skb = NULL;
conn->rx_len = 0;
@@ -7784,7 +6706,7 @@
}
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
- skb->len);
+ skb->len);
conn->rx_len -= skb->len;
if (!conn->rx_len) {
@@ -7792,6 +6714,7 @@
l2cap_recv_frame(conn, conn->rx_skb);
conn->rx_skb = NULL;
}
+ break;
}
drop:
@@ -7799,193 +6722,27 @@
return 0;
}
-static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to)
-{
- struct hci_cp_write_automatic_flush_timeout flush_tm;
- if (hcon && hcon->hdev) {
- flush_tm.handle = hcon->handle;
- if (flush_to == L2CAP_DEFAULT_FLUSH_TO)
- flush_to = 0;
- flush_tm.timeout = (flush_to < L2CAP_MAX_FLUSH_TO) ?
- flush_to : L2CAP_MAX_FLUSH_TO;
- hci_send_cmd(hcon->hdev,
- HCI_OP_WRITE_AUTOMATIC_FLUSH_TIMEOUT,
- 4, &(flush_tm));
- }
-}
-
-static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l)
-{
- int ret_flush_to = L2CAP_DEFAULT_FLUSH_TO;
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->flush_to > 0 &&
- l2cap_pi(s)->flush_to < ret_flush_to)
- ret_flush_to = l2cap_pi(s)->flush_to;
- }
- return ret_flush_to;
-}
-
static int l2cap_debugfs_show(struct seq_file *f, void *p)
{
- struct sock *sk;
- struct hlist_node *node;
+ struct l2cap_chan *c;
- read_lock_bh(&l2cap_sk_list.lock);
+ read_lock(&chan_list_lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
- seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
- batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst),
- sk->sk_state, __le16_to_cpu(pi->psm),
- pi->scid, pi->dcid,
- pi->imtu, pi->omtu, pi->sec_level,
- pi->mode);
+ seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
+ &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ c->state, __le16_to_cpu(c->psm),
+ c->scid, c->dcid, c->imtu, c->omtu,
+ c->sec_level, c->mode);
}
- read_unlock_bh(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
return 0;
}
-static void l2cap_queue_smp_data(struct work_struct *worker)
-{
- struct sock *sk = NULL;
- struct hci_conn *hcon = NULL;
- int attempts = 0;
- __u8 reason;
-
- for (attempts = 0; attempts < 40; attempts++) {
- msleep(50);
- BT_DBG("sock state check attempt %d", attempts);
- if (!smp_chn_params.conn) {
- BT_DBG("smp_chn_params.conn is NULL");
- return;
- }
- sk = l2cap_find_sock_by_fixed_cid_and_dir(
- smp_chn_params.cid,
- smp_chn_params.conn->src,
- smp_chn_params.conn->dst, 1);
-
- if (!sk) {
- BT_DBG("sock does not exist");
- goto err;
- }
-
- bh_lock_sock(sk);
- if (sk->sk_state == BT_CONNECTED) {
- BT_DBG("sock state BT_CONNECTED");
-
- bh_unlock_sock(sk);
- if (smp_sig_channel(
- smp_chn_params.conn,
- smp_chn_params.skb))
- l2cap_conn_del(
- smp_chn_params.conn->hcon,
- EACCES, 0);
- return;
- }
- bh_unlock_sock(sk);
- }
-
-err:
- //If sock state is not connected after 40 attepmts
- //respond to the remote saying SMP_UNSPECIFIED
- hcon = smp_chn_params.conn->hcon;
- reason = SMP_UNSPECIFIED;
- BT_ERR("SMP_CMD_PAIRING_FAIL: %d", reason);
- smp_conn_security_fail(
- smp_chn_params.conn,
- SMP_CMD_PAIRING_FAIL,
- reason);
- del_timer(&hcon->smp_timer);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
- mgmt_auth_failed(hcon->hdev->id,
- smp_chn_params.conn->dst,
- reason);
- hci_conn_put(hcon);
-
- kfree_skb(smp_chn_params.skb);
- l2cap_conn_del(smp_chn_params.conn->hcon, EACCES, 0);
-}
-
-
-static void l2cap_queue_acl_data(struct work_struct *worker)
-{
- struct sock *sk = NULL;
- int attempts = 0;
- struct sk_buff *skb_rsp;
- struct l2cap_hdr *lh;
- u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
- L2CAP_ATT_NOT_SUPPORTED};
-
- for (attempts = 0; attempts < 40; attempts++) {
- msleep(50);
- if (!att_chn_params.conn) {
- BT_DBG("att_chn_params.conn is NULL");
- return;
- }
- sk = l2cap_find_sock_by_fixed_cid_and_dir
- (att_chn_params.cid,
- att_chn_params.conn->src,
- att_chn_params.conn->dst,
- att_chn_params.dir);
- bh_lock_sock(sk);
- if (sk->sk_state == BT_CONNECTED) {
- sock_queue_rcv_skb(sk, att_chn_params.skb);
- if (sk)
- bh_unlock_sock(sk);
- return;
- }
- bh_unlock_sock(sk);
- }
- bh_lock_sock(sk);
-
- if (att_chn_params.skb->data[0] != L2CAP_ATT_INDICATE)
- goto not_indicate;
-
- /* If this is an incoming Indication, we are required to confirm */
- skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
- if (!skb_rsp)
- goto free_skb;
-
- lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(u8));
- lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
- err_rsp[0] = L2CAP_ATT_CONFIRM;
- memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
- hci_send_acl(att_chn_params.conn->hcon, NULL, skb_rsp, 0);
- goto free_skb;
-
-not_indicate:
- if (att_chn_params.skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
- att_chn_params.skb->data[0] == L2CAP_ATT_CONFIRM)
- goto free_skb;
-
- /* If this is an incoming PDU that requires a response, respond with
- * a generic error so remote device doesn't hang */
-
- skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
- if (!skb_rsp)
- goto free_skb;
-
- lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(err_rsp));
- lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
- err_rsp[1] = att_chn_params.skb->data[0];
- memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
- hci_send_acl(att_chn_params.conn->hcon, NULL, skb_rsp, 0);
-
-free_skb:
- kfree_skb(att_chn_params.skb);
-
- if (sk)
- bh_unlock_sock(sk);
-
-}
-
static int l2cap_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, l2cap_debugfs_show, inode->i_private);
@@ -8000,20 +6757,6 @@
static struct dentry *l2cap_debugfs;
-static struct hci_proto l2cap_hci_proto = {
- .name = "L2CAP",
- .id = HCI_PROTO_L2CAP,
- .connect_ind = l2cap_connect_ind,
- .connect_cfm = l2cap_connect_cfm,
- .disconn_ind = l2cap_disconn_ind,
- .disconn_cfm = l2cap_disconn_cfm,
- .security_cfm = l2cap_security_cfm,
- .recv_acldata = l2cap_recv_acldata,
- .create_cfm = l2cap_create_cfm,
- .modify_cfm = l2cap_modify_cfm,
- .destroy_cfm = l2cap_destroy_cfm,
-};
-
int __init l2cap_init(void)
{
int err;
@@ -8022,59 +6765,21 @@
if (err < 0)
return err;
- _l2cap_wq = create_singlethread_workqueue("l2cap");
- if (!_l2cap_wq) {
- err = -ENOMEM;
- goto error;
- }
-
- err = hci_register_proto(&l2cap_hci_proto);
- if (err < 0) {
- BT_ERR("L2CAP protocol registration failed");
- bt_sock_unregister(BTPROTO_L2CAP);
- goto error;
- }
-
if (bt_debugfs) {
- l2cap_debugfs = debugfs_create_file("l2cap", 0444,
- bt_debugfs, NULL, &l2cap_debugfs_fops);
+ l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
+ NULL, &l2cap_debugfs_fops);
if (!l2cap_debugfs)
BT_ERR("Failed to create L2CAP debug file");
}
- if (amp_init() < 0) {
- BT_ERR("AMP Manager initialization failed");
- goto error;
- }
-
return 0;
-
-error:
- destroy_workqueue(_l2cap_wq);
- l2cap_cleanup_sockets();
- return err;
}
void l2cap_exit(void)
{
- amp_exit();
-
debugfs_remove(l2cap_debugfs);
-
- flush_workqueue(_l2cap_wq);
- destroy_workqueue(_l2cap_wq);
-
- if (hci_unregister_proto(&l2cap_hci_proto) < 0)
- BT_ERR("L2CAP protocol unregistration failed");
-
l2cap_cleanup_sockets();
}
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable A2MP protocol");
-
-module_param(enable_reconfig, bool, 0644);
-MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index d778ae3..36fed40 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1,8 +1,9 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2011-2012 The Linux Foundation. All rights reserved.
+ Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -26,107 +27,32 @@
/* Bluetooth L2CAP sockets. */
-#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
-#include <net/bluetooth/amp.h>
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_timeout(unsigned long arg)
+static struct bt_sock_list l2cap_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
+static const struct proto_ops l2cap_sock_ops;
+static void l2cap_sock_init(struct sock *sk, struct sock *parent);
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio);
+
+bool l2cap_is_socket(struct socket *sock)
{
- struct sock *sk = (struct sock *) arg;
- int reason;
-
- BT_DBG("sock %p state %d", sk, sk->sk_state);
-
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- sock_put(sk);
- return;
- }
-
- if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
- reason = ECONNREFUSED;
- else if (sk->sk_state == BT_CONNECT &&
- l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
- reason = ECONNREFUSED;
- else
- reason = ETIMEDOUT;
-
- __l2cap_sock_close(sk, reason);
-
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
- sock_put(sk);
+ return sock && sock->ops == &l2cap_sock_ops;
}
-
-void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-void l2cap_sock_clear_timer(struct sock *sk)
-{
- BT_DBG("sock %p state %d", sk, sk->sk_state);
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
-int l2cap_sock_le_params_valid(struct bt_le_params *le_params)
-{
- if (!le_params || le_params->latency > BT_LE_LATENCY_MAX ||
- le_params->scan_window > BT_LE_SCAN_WINDOW_MAX ||
- le_params->scan_interval < BT_LE_SCAN_INTERVAL_MIN ||
- le_params->scan_window > le_params->scan_interval ||
- le_params->interval_min < BT_LE_CONN_INTERVAL_MIN ||
- le_params->interval_max > BT_LE_CONN_INTERVAL_MAX ||
- le_params->interval_min > le_params->interval_max ||
- le_params->supervision_timeout < BT_LE_SUP_TO_MIN ||
- le_params->supervision_timeout > BT_LE_SUP_TO_MAX) {
- return 0;
- }
-
- return 1;
-}
-
-int l2cap_sock_le_conn_update_params_valid(struct bt_le_params *le_params)
-{
- if (!le_params || le_params->latency > BT_LE_LATENCY_MAX ||
- le_params->interval_min < BT_LE_CONN_INTERVAL_MIN ||
- le_params->interval_max > BT_LE_CONN_INTERVAL_MAX ||
- le_params->interval_min > le_params->interval_max ||
- le_params->supervision_timeout < BT_LE_SUP_TO_MIN ||
- le_params->supervision_timeout > BT_LE_SUP_TO_MAX) {
- return 0;
- }
-
- return 1;
-}
-
-static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
-{
- struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &l2cap_sk_list.head)
- if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
- goto found;
- sk = NULL;
-found:
- return sk;
-}
+EXPORT_SYMBOL(l2cap_is_socket);
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
@@ -165,40 +91,37 @@
}
}
- write_lock_bh(&l2cap_sk_list.lock);
-
- if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
- l2cap_pi(sk)->sport = la.l2_psm;
- sk->sk_state = BT_BOUND;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- }
-
if (la.l2_cid)
- l2cap_pi(sk)->scid = la.l2_cid;
+ err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
+ else
+ err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
- write_unlock_bh(&l2cap_sk_list.lock);
+ if (err < 0)
+ goto done;
+
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+ __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
+ chan->sec_level = BT_SECURITY_SDP;
+
+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+
+ chan->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
return err;
}
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
- BT_DBG("sk %p type %d mode %d state %d", sk, sk->sk_type,
- l2cap_pi(sk)->mode, sk->sk_state);
+ BT_DBG("sk %p", sk);
if (!addr || alen < sizeof(addr->sa_family) ||
addr->sa_family != AF_BLUETOOTH)
@@ -211,93 +134,42 @@
if (la.l2_cid && la.l2_psm)
return -EINVAL;
+ err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
+ &la.l2_bdaddr, la.l2_bdaddr_type);
+ if (err)
+ return err;
+
lock_sock(sk);
- if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
- && !(la.l2_psm || la.l2_cid || l2cap_pi(sk)->fixed_channel)) {
- err = -EINVAL;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- goto wait;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- goto done;
- }
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
- !l2cap_pi(sk)->fixed_channel &&
- sk->sk_type != SOCK_RAW && !la.l2_cid) {
- BT_DBG("Bad PSM 0x%x", (int)__le16_to_cpu(la.l2_psm));
- err = -EINVAL;
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
- l2cap_pi(sk)->dcid = la.l2_cid;
-
- err = l2cap_do_connect(sk);
- if (err)
- goto done;
-
-wait:
err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
- if (err)
- BT_ERR("failed %d", err);
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+
release_sock(sk);
+
return err;
}
static int l2cap_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
- switch (l2cap_pi(sk)->mode) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_ERTM:
@@ -310,30 +182,10 @@
goto done;
}
- if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->scid) {
- bdaddr_t *src = &bt_sk(sk)->src;
- u16 psm;
-
- err = -EINVAL;
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- for (psm = 0x1001; psm < 0x1100; psm += 2)
- if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
- l2cap_pi(sk)->psm = cpu_to_le16(psm);
- l2cap_pi(sk)->sport = cpu_to_le16(psm);
- err = 0;
- break;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
- if (err < 0)
- goto done;
- }
-
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
+ chan->state = BT_LISTEN;
sk->sk_state = BT_LISTEN;
done:
@@ -341,7 +193,8 @@
return err;
}
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
@@ -350,39 +203,39 @@
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
+ nsk = bt_accept_dequeue(sk, newsock);
+ if (nsk)
+ break;
+
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -397,32 +250,37 @@
return err;
}
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *len, int peer)
{
struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
BT_DBG("sock %p, sk %p", sock, sk);
+ memset(la, 0, sizeof(struct sockaddr_l2));
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_l2);
if (peer) {
- la->l2_psm = l2cap_pi(sk)->psm;
+ la->l2_psm = chan->psm;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ la->l2_cid = cpu_to_le16(chan->dcid);
} else {
- la->l2_psm = l2cap_pi(sk)->sport;
+ la->l2_psm = chan->sport;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
+ la->l2_cid = cpu_to_le16(chan->scid);
}
return 0;
}
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
int len, err = 0;
@@ -438,13 +296,13 @@
switch (optname) {
case L2CAP_OPTIONS:
memset(&opts, 0, sizeof(opts));
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = l2cap_pi(sk)->tx_win;
+ opts.imtu = chan->imtu;
+ opts.omtu = chan->omtu;
+ opts.flush_to = chan->flush_to;
+ opts.mode = chan->mode;
+ opts.fcs = chan->fcs;
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -453,7 +311,7 @@
break;
case L2CAP_LM:
- switch (l2cap_pi(sk)->sec_level) {
+ switch (chan->sec_level) {
case BT_SECURITY_LOW:
opt = L2CAP_LM_AUTH;
break;
@@ -462,36 +320,34 @@
break;
case BT_SECURITY_HIGH:
opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
- L2CAP_LM_SECURE;
+ L2CAP_LM_SECURE;
break;
default:
opt = 0;
break;
}
- if (l2cap_pi(sk)->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
opt |= L2CAP_LM_MASTER;
- if (l2cap_pi(sk)->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
opt |= L2CAP_LM_RELIABLE;
- if (l2cap_pi(sk)->flushable)
- opt |= L2CAP_LM_FLUSHABLE;
-
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
- cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
- memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = chan->conn->hcon->handle;
+ memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
@@ -508,9 +364,11 @@
return err;
}
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
int len, err = 0;
@@ -530,18 +388,20 @@
switch (optname) {
case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
break;
}
memset(&sec, 0, sizeof(sec));
- sec.level = l2cap_pi(sk)->sec_level;
+ if (chan->conn) {
+ sec.level = chan->conn->hcon->sec_level;
- if (sk->sk_state == BT_CONNECTED) {
- sec.key_size = l2cap_pi(sk)->conn->hcon->enc_key_size;
- sec.level = l2cap_pi(sk)->conn->hcon->sec_level;
+ if (sk->sk_state == BT_CONNECTED)
+ sec.key_size = chan->conn->hcon->enc_key_size;
+ } else {
+ sec.level = chan->sec_level;
}
len = min_t(unsigned int, len, sizeof(sec));
@@ -556,19 +416,27 @@
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case BT_FLUSHABLE:
+ if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
case BT_POWER:
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
- pwr.force_active = l2cap_pi(sk)->force_active;
+ pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
@@ -576,19 +444,13 @@
break;
- case BT_AMP_POLICY:
- if (put_user(l2cap_pi(sk)->amp_pref, (u32 __user *) optval))
- err = -EFAULT;
- break;
-
- case BT_LE_PARAMS:
- if (l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) {
- err = -EINVAL;
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
break;
}
- if (copy_to_user(optval, (char *) &bt_sk(sk)->le_params,
- sizeof(bt_sk(sk)->le_params)))
+ if (put_user(chan->chan_policy, (u32 __user *) optval))
err = -EFAULT;
break;
@@ -601,33 +463,49 @@
return err;
}
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
+{
+ switch (chan->scid) {
+ case L2CAP_CID_LE_DATA:
+ if (mtu < L2CAP_LE_MIN_MTU)
+ return false;
+ break;
+
+ default:
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ return false;
+ }
+
+ return true;
+}
+
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
- int len, le_sock, err = 0;
+ int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
- le_sock = l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA;
-
switch (optname) {
case L2CAP_OPTIONS:
- if (sk->sk_state == BT_CONNECTED && !le_sock) {
+ if (sk->sk_state == BT_CONNECTED) {
err = -EINVAL;
break;
}
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = l2cap_pi(sk)->tx_win;
+ opts.imtu = chan->imtu;
+ opts.omtu = chan->omtu;
+ opts.flush_to = chan->flush_to;
+ opts.mode = chan->mode;
+ opts.fcs = chan->fcs;
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -635,39 +513,23 @@
break;
}
- if ((opts.imtu || opts.omtu) && le_sock &&
- (sk->sk_state == BT_CONNECTED)) {
- if (opts.imtu >= L2CAP_LE_DEFAULT_MTU)
- l2cap_pi(sk)->imtu = opts.imtu;
- if (opts.omtu >= L2CAP_LE_DEFAULT_MTU)
- l2cap_pi(sk)->omtu = opts.omtu;
- if (opts.imtu < L2CAP_LE_DEFAULT_MTU ||
- opts.omtu < L2CAP_LE_DEFAULT_MTU)
- err = -EINVAL;
- break;
- }
-
- if (opts.txwin_size < 1 ||
- opts.txwin_size > L2CAP_TX_WIN_MAX_EXTENDED) {
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
break;
}
- l2cap_pi(sk)->mode = opts.mode;
- switch (l2cap_pi(sk)->mode) {
+ if (!l2cap_valid_mtu(chan, opts.imtu)) {
+ err = -EINVAL;
+ break;
+ }
+
+ chan->mode = opts.mode;
+ switch (chan->mode) {
case L2CAP_MODE_BASIC:
- l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
- break;
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm) {
- /* No fallback to ERTM or Basic mode */
- l2cap_pi(sk)->conf_state |=
- L2CAP_CONF_STATE2_DEVICE;
- break;
- }
- err = -EINVAL;
+ clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
break;
case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
@@ -676,12 +538,12 @@
break;
}
- l2cap_pi(sk)->imtu = opts.imtu;
- l2cap_pi(sk)->omtu = opts.omtu;
- l2cap_pi(sk)->fcs = opts.fcs;
- l2cap_pi(sk)->max_tx = opts.max_tx;
- l2cap_pi(sk)->tx_win = opts.txwin_size;
- l2cap_pi(sk)->flush_to = opts.flush_to;
+ chan->imtu = opts.imtu;
+ chan->omtu = opts.omtu;
+ chan->fcs = opts.fcs;
+ chan->max_tx = opts.max_tx;
+ chan->tx_win = opts.txwin_size;
+ chan->flush_to = opts.flush_to;
break;
case L2CAP_LM:
@@ -691,15 +553,21 @@
}
if (opt & L2CAP_LM_AUTH)
- l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
+ chan->sec_level = BT_SECURITY_LOW;
if (opt & L2CAP_LM_ENCRYPT)
- l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
+ chan->sec_level = BT_SECURITY_MEDIUM;
if (opt & L2CAP_LM_SECURE)
- l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
+ chan->sec_level = BT_SECURITY_HIGH;
- l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
- l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
- l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE);
+ if (opt & L2CAP_LM_MASTER)
+ set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+ else
+ clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+ if (opt & L2CAP_LM_RELIABLE)
+ set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
break;
default:
@@ -711,12 +579,13 @@
return err;
}
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
- struct bt_le_params le_params;
struct l2cap_conn *conn;
int len, err = 0;
u32 opt;
@@ -733,8 +602,8 @@
switch (optname) {
case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
break;
}
@@ -748,25 +617,40 @@
}
if (sec.level < BT_SECURITY_LOW ||
- sec.level > BT_SECURITY_HIGH) {
+ sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
- l2cap_pi(sk)->sec_level = sec.level;
+ chan->sec_level = sec.level;
- conn = l2cap_pi(sk)->conn;
- if (conn && l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
+ if (!chan->conn)
+ break;
+
+ conn = chan->conn;
+
+ /*change security for LE channels */
+ if (chan->scid == L2CAP_CID_LE_DATA) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
}
- if (smp_conn_security(conn, sec.level))
+ if (smp_conn_security(conn->hcon, sec.level))
break;
-
- err = 0;
sk->sk_state = BT_CONFIG;
+ chan->state = BT_CONFIG;
+
+ /* or for ACL link */
+ } else if ((sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
+ sk->sk_state == BT_CONNECTED) {
+ if (!l2cap_chan_check_security(chan))
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ else
+ sk->sk_state_change(sk);
+ } else {
+ err = -EINVAL;
}
break;
@@ -781,46 +665,10 @@
break;
}
- bt_sk(sk)->defer_setup = opt;
- break;
-
- case BT_POWER:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- pwr.force_active = 1;
-
- len = min_t(unsigned int, sizeof(pwr), optlen);
- if (copy_from_user((char *) &pwr, optval, len)) {
- err = -EFAULT;
- break;
- }
- l2cap_pi(sk)->force_active = pwr.force_active;
- break;
-
- case BT_AMP_POLICY:
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- if ((opt > BT_AMP_POLICY_PREFER_AMP) ||
- ((l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) &&
- (l2cap_pi(sk)->mode != L2CAP_MODE_STREAMING))) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->amp_pref = (u8) opt;
- BT_DBG("BT_AMP_POLICY now %d", opt);
-
- if ((sk->sk_state == BT_CONNECTED) &&
- (l2cap_pi(sk)->amp_move_role == L2CAP_AMP_MOVE_NONE))
- l2cap_amp_move_init(sk);
-
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_FLUSHABLE:
@@ -828,44 +676,77 @@
err = -EFAULT;
break;
}
- l2cap_pi(sk)->flushable = opt;
- break;
-
- case BT_LE_PARAMS:
- if (l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) {
+ if (opt > BT_FLUSHABLE_ON) {
err = -EINVAL;
break;
}
- if (copy_from_user((char *) &le_params, optval,
- sizeof(struct bt_le_params))) {
+ if (opt == BT_FLUSHABLE_OFF) {
+ struct l2cap_conn *conn = chan->conn;
+ /* proceed further only when we have l2cap_conn and
+ No Flush support in the LM */
+ if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (opt)
+ set_bit(FLAG_FLUSHABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FLUSHABLE, &chan->flags);
+ break;
+
+ case BT_POWER:
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+
+ len = min_t(unsigned int, sizeof(pwr), optlen);
+ if (copy_from_user((char *) &pwr, optval, len)) {
err = -EFAULT;
break;
}
- conn = l2cap_pi(sk)->conn;
- if (!conn || !conn->hcon ||
- l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) {
- memcpy(&bt_sk(sk)->le_params, &le_params,
- sizeof(le_params));
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ break;
+
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
break;
}
- if (!conn->hcon->out ||
- !l2cap_sock_le_conn_update_params_valid(
- &le_params)) {
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
err = -EINVAL;
break;
}
- memcpy(&bt_sk(sk)->le_params, &le_params, sizeof(le_params));
+ if (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING) {
+ err = -EOPNOTSUPP;
+ break;
+ }
- hci_le_conn_update(conn->hcon,
- le_params.interval_min,
- le_params.interval_max,
- le_params.latency,
- le_params.supervision_timeout);
+ chan->chan_policy = (u8) opt;
+
+ if (sk->sk_state == BT_CONNECTED &&
+ chan->move_role == L2CAP_MOVE_ROLE_NONE)
+ l2cap_move_start(chan);
+
break;
default:
@@ -877,14 +758,12 @@
return err;
}
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb;
- struct sk_buff_head seg_queue;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err;
- u8 amp_id;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -895,149 +774,31 @@
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
+ if (sk->sk_state != BT_CONNECTED)
+ return -ENOTCONN;
- if (sk->sk_state != BT_CONNECTED) {
- err = -ENOTCONN;
- goto done;
- }
+ l2cap_chan_lock(chan);
+ err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ l2cap_chan_unlock(chan);
- /* Connectionless channel */
- if (sk->sk_type == SOCK_DGRAM) {
- skb = l2cap_create_connless_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- } else {
- l2cap_do_send(sk, skb);
- err = len;
- }
- goto done;
- }
-
- switch (pi->mode) {
- case L2CAP_MODE_BASIC:
- /* Check outgoing MTU */
- if (len > pi->omtu) {
- err = -EMSGSIZE;
- goto done;
- }
-
- /* Create a basic PDU */
- skb = l2cap_create_basic_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
-
- l2cap_do_send(sk, skb);
- err = len;
- break;
-
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
-
- /* Check outgoing MTU */
- if (len > pi->omtu) {
- err = -EMSGSIZE;
- goto done;
- }
-
- __skb_queue_head_init(&seg_queue);
-
- /* Do segmentation before calling in to the state machine,
- * since it's possible to block while waiting for memory
- * allocation.
- */
- amp_id = pi->amp_id;
- err = l2cap_segment_sdu(sk, &seg_queue, msg, len, 0);
-
- /* The socket lock is released while segmenting, so check
- * that the socket is still connected
- */
- if (sk->sk_state != BT_CONNECTED) {
- __skb_queue_purge(&seg_queue);
- err = -ENOTCONN;
- }
-
- if (err) {
- BT_DBG("Error %d, sk_sndbuf %d, sk_wmem_alloc %d",
- err, sk->sk_sndbuf,
- atomic_read(&sk->sk_wmem_alloc));
- break;
- }
-
- if (pi->amp_id != amp_id) {
- /* Channel moved while unlocked. Resegment. */
- err = l2cap_resegment_queue(sk, &seg_queue);
-
- if (err)
- break;
- }
-
- if (pi->mode != L2CAP_MODE_STREAMING)
- err = l2cap_ertm_tx(sk, 0, &seg_queue,
- L2CAP_ERTM_EVENT_DATA_REQUEST);
- else
- err = l2cap_strm_tx(sk, &seg_queue);
- if (!err)
- err = len;
-
- /* If the skbs were not queued for sending, they'll still be in
- * seg_queue and need to be purged.
- */
- __skb_queue_purge(&seg_queue);
- break;
-
- default:
- BT_DBG("bad state %1.1x", pi->mode);
- err = -EBADFD;
- }
-
-done:
- release_sock(sk);
return err;
}
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
int err;
lock_sock(sk);
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
- struct l2cap_conn_rsp rsp;
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- u8 buf[128];
-
- if (l2cap_pi(sk)->amp_id) {
- /* Physical link must be brought up before connection
- * completes.
- */
- amp_accept_physical(conn, l2cap_pi(sk)->amp_id, sk);
- release_sock(sk);
- return 0;
- }
-
+ if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
sk->sk_state = BT_CONFIG;
+ pi->chan->state = BT_CONFIG;
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
- release_sock(sk);
- return 0;
- }
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
-
+ __l2cap_connect_rsp_defer(pi->chan);
release_sock(sk);
return 0;
}
@@ -1049,36 +810,117 @@
else
err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
- if (err >= 0)
- l2cap_ertm_recv_done(sk);
+ if (pi->chan->mode != L2CAP_MODE_ERTM)
+ return err;
+ /* Attempt to put pending rx data in the socket buffer */
+
+ lock_sock(sk);
+
+ if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+ goto done;
+
+ if (pi->rx_busy_skb) {
+ if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+ pi->rx_busy_skb = NULL;
+ else
+ goto done;
+ }
+
+ /* Restore data flow when half of the receive buffer is
+ * available. This avoids resending large numbers of
+ * frames.
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ l2cap_chan_busy(pi->chan, 0);
+
+done:
+ release_sock(sk);
return err;
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
-void l2cap_sock_kill(struct sock *sk)
+static void l2cap_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->sk_state);
+ BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
/* Kill poor orphan */
- bt_sock_unlink(&l2cap_sk_list, sk);
+
+ l2cap_chan_put(l2cap_pi(sk)->chan);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
+static int l2cap_sock_shutdown(struct socket *sock, int how)
{
- l2cap_sock_clear_timer(sk);
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan;
+ struct l2cap_conn *conn;
+ int err = 0;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ chan = l2cap_pi(sk)->chan;
+ conn = chan->conn;
+
+ if (conn)
+ mutex_lock(&conn->chan_lock);
+
+ l2cap_chan_lock(chan);
lock_sock(sk);
- __l2cap_sock_close(sk, ECONNRESET);
+
+ if (!sk->sk_shutdown) {
+ if (chan->mode == L2CAP_MODE_ERTM)
+ err = __l2cap_wait_ack(sk);
+
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ release_sock(sk);
+ l2cap_chan_close(chan, 0);
+ lock_sock(sk);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+
+ if (!err && sk->sk_err)
+ err = -sk->sk_err;
+
release_sock(sk);
+ l2cap_chan_unlock(chan);
+
+ if (conn)
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
+}
+
+static int l2cap_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ bt_sock_unlink(&l2cap_sk_list, sk);
+
+ err = l2cap_sock_shutdown(sock, 2);
+
+ sock_orphan(sk);
l2cap_sock_kill(sk);
+ return err;
}
static void l2cap_sock_cleanup_listen(struct sock *parent)
@@ -1088,214 +930,263 @@
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL)))
- l2cap_sock_close(sk);
+ while ((sk = bt_accept_dequeue(parent, NULL))) {
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- parent->sk_state = BT_CLOSED;
- sock_set_flag(parent, SOCK_ZAPPED);
-}
+ l2cap_chan_lock(chan);
+ __clear_chan_timer(chan);
+ l2cap_chan_close(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
-void __l2cap_sock_close(struct sock *sk, int reason)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
- BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
-
- switch (sk->sk_state) {
- case BT_LISTEN:
- l2cap_sock_cleanup_listen(sk);
- break;
-
- case BT_CONNECTED:
- case BT_CONFIG:
- if ((sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) &&
- conn->hcon->type == ACL_LINK) {
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- l2cap_send_disconn_req(conn, sk, reason);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT2:
- if ((sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) &&
- conn->hcon->type == ACL_LINK) {
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (bt_sk(sk)->defer_setup)
- result = L2CAP_CR_SEC_BLOCK;
- else
- result = L2CAP_CR_BAD_PSM;
- sk->sk_state = BT_DISCONN;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
- }
-
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT:
- case BT_DISCONN:
- l2cap_chan_del(sk, reason);
- break;
-
- default:
- sock_set_flag(sk, SOCK_ZAPPED);
- break;
+ l2cap_sock_kill(sk);
}
}
-static int l2cap_sock_shutdown(struct socket *sock, int how)
+static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
{
- struct sock *sk = sock->sk;
- int err = 0;
+ struct sock *sk, *parent = chan->data;
- BT_DBG("sock %p, sk %p", sock, sk);
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ return NULL;
+ }
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
+ GFP_ATOMIC);
if (!sk)
- return 0;
+ return NULL;
+
+ bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
+
+ l2cap_sock_init(sk, parent);
+
+ bt_accept_enqueue(parent, sk);
+
+ return l2cap_pi(sk)->chan;
+}
+
+static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ int err;
+ struct sock *sk = chan->data;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
lock_sock(sk);
- if (!sk->sk_shutdown) {
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
- err = __l2cap_wait_ack(sk);
- l2cap_ertm_shutdown(sk);
- }
-
- sk->sk_shutdown = SHUTDOWN_MASK;
- l2cap_sock_clear_timer(sk);
- __l2cap_sock_close(sk, 0);
-
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
- err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
+ if (pi->rx_busy_skb) {
+ err = -ENOMEM;
+ goto done;
}
- if (!err && sk->sk_err)
- err = -sk->sk_err;
+ err = sock_queue_rcv_skb(sk, skb);
+
+ /* For ERTM, handle one skb that doesn't fit into the recv
+ * buffer. This is important to do because the data frames
+ * have already been acked, so the skb cannot be discarded.
+ *
+ * Notify the l2cap core that the buffer is full, so the
+ * LOCAL_BUSY state is entered and no more frames are
+ * acked and reassembled until there is buffer space
+ * available.
+ */
+ if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
+ pi->rx_busy_skb = skb;
+ l2cap_chan_busy(pi->chan, 1);
+ err = 0;
+ }
+
+done:
+ release_sock(sk);
+
+ return err;
+}
+
+static void l2cap_sock_close_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ switch (chan->state) {
+ case BT_OPEN:
+ case BT_BOUND:
+ case BT_CLOSED:
+ break;
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ break;
+ default:
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ sk->sk_err = err;
+
+ if (parent) {
+ bt_accept_unlink(sk);
+ parent->sk_data_ready(parent, 0);
+ } else {
+ sk->sk_state_change(sk);
+ }
+
+ break;
+ }
release_sock(sk);
- return err;
}
-static int l2cap_sock_release(struct socket *sock)
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
{
- struct sock *sk = sock->sk;
- struct sock *sk2 = NULL;
+ struct sock *sk = chan->data;
+
+ sk->sk_state = state;
+}
+
+static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb)
+{
+ struct sk_buff *skb;
int err;
- BT_DBG("sock %p, sk %p", sock, sk);
+ l2cap_chan_unlock(chan);
+ skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+ l2cap_chan_lock(chan);
- if (!sk)
- return 0;
+ if (!skb)
+ return ERR_PTR(err);
- /* If this is an ATT socket, find it's matching server/client */
- if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA)
- sk2 = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
- &bt_sk(sk)->src, &bt_sk(sk)->dst,
- l2cap_pi(sk)->incoming ? 0 : 1);
-
- /* If matching socket found, request tear down */
- BT_DBG("sock:%p companion:%p", sk, sk2);
- if (sk2)
- l2cap_sock_set_timer(sk2, 1);
-
- err = l2cap_sock_shutdown(sock, 2);
-
- sock_orphan(sk);
- l2cap_sock_kill(sk);
- return err;
+ return skb;
}
+static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent = bt_sk(sk)->parent;
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+}
+
+static struct l2cap_ops l2cap_chan_ops = {
+ .name = "L2CAP Socket Interface",
+ .new_connection = l2cap_sock_new_connection_cb,
+ .recv = l2cap_sock_recv_cb,
+ .close = l2cap_sock_close_cb,
+ .teardown = l2cap_sock_teardown_cb,
+ .state_change = l2cap_sock_state_change_cb,
+ .ready = l2cap_sock_ready_cb,
+ .defer = l2cap_sock_defer_cb,
+ .alloc_skb = l2cap_sock_alloc_skb_cb,
+};
+
static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ if (l2cap_pi(sk)->chan)
+ l2cap_chan_put(l2cap_pi(sk)->chan);
+ if (l2cap_pi(sk)->rx_busy_skb) {
+ kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+ l2cap_pi(sk)->rx_busy_skb = NULL;
+ }
+
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
-
- l2cap_ertm_destruct(sk);
}
-static void set_default_config(struct l2cap_conf_prm *conf_prm)
-{
- conf_prm->fcs = L2CAP_FCS_CRC16;
- conf_prm->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-}
-
-void l2cap_sock_init(struct sock *sk, struct sock *parent)
+static void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_chan *chan = pi->chan;
- BT_DBG("sk %p parent %p", sk, parent);
+ BT_DBG("sk %p", sk);
if (parent) {
- sk->sk_type = parent->sk_type;
- sk->sk_rcvbuf = parent->sk_rcvbuf;
- sk->sk_sndbuf = parent->sk_sndbuf;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+ struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
- pi->imtu = l2cap_pi(parent)->imtu;
- pi->omtu = l2cap_pi(parent)->omtu;
- pi->conf_state = l2cap_pi(parent)->conf_state;
- pi->mode = l2cap_pi(parent)->mode;
- pi->fcs = l2cap_pi(parent)->fcs;
- pi->max_tx = l2cap_pi(parent)->max_tx;
- pi->tx_win = l2cap_pi(parent)->tx_win;
- pi->sec_level = l2cap_pi(parent)->sec_level;
- pi->role_switch = l2cap_pi(parent)->role_switch;
- pi->force_reliable = l2cap_pi(parent)->force_reliable;
- pi->flushable = l2cap_pi(parent)->flushable;
- pi->force_active = l2cap_pi(parent)->force_active;
- pi->amp_pref = l2cap_pi(parent)->amp_pref;
+ sk->sk_type = parent->sk_type;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
+
+ chan->chan_type = pchan->chan_type;
+ chan->imtu = pchan->imtu;
+ chan->omtu = pchan->omtu;
+ chan->conf_state = pchan->conf_state;
+ chan->mode = pchan->mode;
+ chan->fcs = pchan->fcs;
+ chan->max_tx = pchan->max_tx;
+ chan->tx_win = pchan->tx_win;
+ chan->tx_win_max = pchan->tx_win_max;
+ chan->sec_level = pchan->sec_level;
+ chan->flags = pchan->flags;
+
+ security_sk_clone(parent, sk);
} else {
- pi->imtu = L2CAP_DEFAULT_MTU;
- pi->omtu = 0;
- if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
- pi->mode = L2CAP_MODE_ERTM;
- pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
- } else {
- pi->mode = L2CAP_MODE_BASIC;
+
+ switch (sk->sk_type) {
+ case SOCK_RAW:
+ chan->chan_type = L2CAP_CHAN_RAW;
+ break;
+ case SOCK_DGRAM:
+ chan->chan_type = L2CAP_CHAN_CONN_LESS;
+ break;
+ case SOCK_SEQPACKET:
+ case SOCK_STREAM:
+ chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
+ break;
}
- pi->reconf_state = L2CAP_RECONF_NONE;
- pi->max_tx = L2CAP_DEFAULT_MAX_TX;
- pi->fcs = L2CAP_FCS_CRC16;
- pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- pi->sec_level = BT_SECURITY_LOW;
- pi->role_switch = 0;
- pi->force_reliable = 0;
- pi->flushable = 0;
- pi->force_active = 1;
- pi->amp_pref = BT_AMP_POLICY_REQUIRE_BR_EDR;
+
+ chan->imtu = L2CAP_DEFAULT_MTU;
+ chan->omtu = 0;
+ if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
+ chan->mode = L2CAP_MODE_ERTM;
+ set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ } else {
+ chan->mode = L2CAP_MODE_BASIC;
+ }
+
+ l2cap_chan_set_defaults(chan);
}
/* Default config options */
- sk->sk_backlog_rcv = l2cap_data_channel;
- pi->ampcon = NULL;
- pi->ampchan = NULL;
- pi->conf_len = 0;
- pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
- pi->scid = 0;
- pi->dcid = 0;
- pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
- pi->ack_win = pi->tx_win;
- pi->extended_control = 0;
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
- pi->local_conf.fcs = pi->fcs;
- pi->local_conf.flush_to = pi->flush_to;
-
- set_default_config(&pi->remote_conf);
-
- skb_queue_head_init(TX_QUEUE(sk));
- skb_queue_head_init(SREJ_QUEUE(sk));
+ chan->data = sk;
+ chan->ops = &l2cap_chan_ops;
}
static struct proto l2cap_proto = {
@@ -1304,9 +1195,11 @@
.obj_size = sizeof(struct l2cap_pinfo)
};
-struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio)
{
struct sock *sk;
+ struct l2cap_chan *chan;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
if (!sk)
@@ -1316,16 +1209,25 @@
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
+ chan = l2cap_chan_create();
+ if (!chan) {
+ sk_free(sk);
+ return NULL;
+ }
- bt_sock_link(&l2cap_sk_list, sk);
+ l2cap_chan_hold(chan);
+
+ chan->sk = sk;
+
+ l2cap_pi(sk)->chan = chan;
+
return sk;
}
@@ -1339,7 +1241,7 @@
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
- sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+ sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
@@ -1352,10 +1254,11 @@
return -ENOMEM;
l2cap_sock_init(sk, NULL);
+ bt_sock_link(&l2cap_sk_list, sk);
return 0;
}
-const struct proto_ops l2cap_sock_ops = {
+static const struct proto_ops l2cap_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = l2cap_sock_release,
@@ -1390,23 +1293,31 @@
return err;
err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("L2CAP socket registration failed");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list,
+ NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create L2CAP proc file");
+ bt_sock_unregister(BTPROTO_L2CAP);
+ goto error;
+ }
BT_INFO("L2CAP socket layer initialized");
return 0;
error:
- BT_ERR("L2CAP socket registration failed");
proto_unregister(&l2cap_proto);
return err;
}
void l2cap_cleanup_sockets(void)
{
- if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
- BT_ERR("L2CAP socket unregistration failed");
-
+ bt_procfs_cleanup(&init_net, "l2cap");
+ bt_sock_unregister(BTPROTO_L2CAP);
proto_unregister(&l2cap_proto);
}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b826d1b..b3fbc73 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -24,12 +24,9 @@
/* Bluetooth kernel library. */
-#include <linux/module.h>
+#define pr_fmt(fmt) "Bluetooth: " fmt
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <asm/errno.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
@@ -44,22 +41,8 @@
}
EXPORT_SYMBOL(baswap);
-char *batostr(bdaddr_t *ba)
-{
- static char str[2][18];
- static int i = 1;
-
- i ^= 1;
- sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X",
- ba->b[5], ba->b[4], ba->b[3],
- ba->b[2], ba->b[1], ba->b[0]);
-
- return str[i];
-}
-EXPORT_SYMBOL(batostr);
-
/* Bluetooth error codes to Unix errno mapping */
-int bt_err(__u16 code)
+int bt_to_errno(__u16 code)
{
switch (code) {
case 0:
@@ -149,4 +132,42 @@
return ENOSYS;
}
}
+EXPORT_SYMBOL(bt_to_errno);
+
+int bt_info(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = pr_info("%pV", &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(bt_info);
+
+int bt_err(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = pr_err("%pV", &vaf);
+
+ va_end(args);
+
+ return r;
+}
EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index dc6281e..3817728 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1,7 +1,8 @@
/*
BlueZ - Bluetooth protocol stack for Linux
+
Copyright (C) 2010 Nokia Corporation
- Copyright (c) 2011-2012 The Linux Foundation. All rights reserved.
+ Copyright (C) 2011-2012 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -23,129 +24,318 @@
/* Bluetooth HCI Management interface */
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
-#define MGMT_VERSION 0
-#define MGMT_REVISION 1
+bool enable_hs;
-#define SCAN_IDLE 0x00
-#define SCAN_LE 0x01
-#define SCAN_BR 0x02
+#define MGMT_VERSION 1
+#define MGMT_REVISION 3
+
+static const u16 mgmt_commands[] = {
+ MGMT_OP_READ_INDEX_LIST,
+ MGMT_OP_READ_INFO,
+ MGMT_OP_SET_POWERED,
+ MGMT_OP_SET_DISCOVERABLE,
+ MGMT_OP_SET_CONNECTABLE,
+ MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_OP_SET_PAIRABLE,
+ MGMT_OP_SET_LINK_SECURITY,
+ MGMT_OP_SET_SSP,
+ MGMT_OP_SET_HS,
+ MGMT_OP_SET_LE,
+ MGMT_OP_SET_DEV_CLASS,
+ MGMT_OP_SET_LOCAL_NAME,
+ MGMT_OP_ADD_UUID,
+ MGMT_OP_REMOVE_UUID,
+ MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_OP_DISCONNECT,
+ MGMT_OP_GET_CONNECTIONS,
+ MGMT_OP_PIN_CODE_REPLY,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_OP_PAIR_DEVICE,
+ MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_OP_UNPAIR_DEVICE,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ MGMT_OP_START_DISCOVERY,
+ MGMT_OP_STOP_DISCOVERY,
+ MGMT_OP_CONFIRM_NAME,
+ MGMT_OP_BLOCK_DEVICE,
+ MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_OP_SET_DEVICE_ID,
+};
+
+static const u16 mgmt_events[] = {
+ MGMT_EV_CONTROLLER_ERROR,
+ MGMT_EV_INDEX_ADDED,
+ MGMT_EV_INDEX_REMOVED,
+ MGMT_EV_NEW_SETTINGS,
+ MGMT_EV_CLASS_OF_DEV_CHANGED,
+ MGMT_EV_LOCAL_NAME_CHANGED,
+ MGMT_EV_NEW_LINK_KEY,
+ MGMT_EV_NEW_LONG_TERM_KEY,
+ MGMT_EV_DEVICE_CONNECTED,
+ MGMT_EV_DEVICE_DISCONNECTED,
+ MGMT_EV_CONNECT_FAILED,
+ MGMT_EV_PIN_CODE_REQUEST,
+ MGMT_EV_USER_CONFIRM_REQUEST,
+ MGMT_EV_USER_PASSKEY_REQUEST,
+ MGMT_EV_AUTH_FAILED,
+ MGMT_EV_DEVICE_FOUND,
+ MGMT_EV_DISCOVERING,
+ MGMT_EV_DEVICE_BLOCKED,
+ MGMT_EV_DEVICE_UNBLOCKED,
+ MGMT_EV_DEVICE_UNPAIRED,
+ MGMT_EV_PASSKEY_NOTIFY,
+};
+
+/*
+ * These LE scan and inquiry parameters were chosen according to LE General
+ * Discovery Procedure specification.
+ */
+#define LE_SCAN_WIN 0x12
+#define LE_SCAN_INT 0x12
+#define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
+#define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
+
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
+
+#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
+
+#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
struct pending_cmd {
struct list_head list;
- __u16 opcode;
+ u16 opcode;
int index;
void *param;
struct sock *sk;
void *user_data;
};
-struct mgmt_pending_free_work {
- struct work_struct work;
- struct sock *sk;
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+ MGMT_STATUS_SUCCESS,
+ MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
+ MGMT_STATUS_NOT_CONNECTED, /* No Connection */
+ MGMT_STATUS_FAILED, /* Hardware Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
+ MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
+ MGMT_STATUS_NO_RESOURCES, /* Memory Full */
+ MGMT_STATUS_TIMEOUT, /* Connection Timeout */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
+ MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
+ MGMT_STATUS_BUSY, /* Command Disallowed */
+ MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
+ MGMT_STATUS_REJECTED, /* Rejected Security */
+ MGMT_STATUS_REJECTED, /* Rejected Personal */
+ MGMT_STATUS_TIMEOUT, /* Host Timeout */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
+ MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
+ MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
+ MGMT_STATUS_DISCONNECTED, /* OE Power Off */
+ MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
+ MGMT_STATUS_BUSY, /* Repeated Attempts */
+ MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
+ MGMT_STATUS_FAILED, /* Unknown LMP PDU */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
+ MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
+ MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
+ MGMT_STATUS_REJECTED, /* Air Mode Rejected */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
+ MGMT_STATUS_FAILED, /* Unspecified Error */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
+ MGMT_STATUS_FAILED, /* Role Change Not Allowed */
+ MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
+ MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
+ MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
+ MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
+ MGMT_STATUS_FAILED, /* Unit Link Key Used */
+ MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
+ MGMT_STATUS_TIMEOUT, /* Instant Passed */
+ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
+ MGMT_STATUS_FAILED, /* Transaction Collision */
+ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
+ MGMT_STATUS_REJECTED, /* QoS Rejected */
+ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
+ MGMT_STATUS_REJECTED, /* Insufficient Security */
+ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
+ MGMT_STATUS_BUSY, /* Role Switch Pending */
+ MGMT_STATUS_FAILED, /* Slot Violation */
+ MGMT_STATUS_FAILED, /* Role Switch Failed */
+ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
+ MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
+ MGMT_STATUS_BUSY, /* Host Busy Pairing */
+ MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
+ MGMT_STATUS_BUSY, /* Controller Busy */
+ MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
+ MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
+ MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
};
-LIST_HEAD(cmd_list);
+bool mgmt_valid_hdev(struct hci_dev *hdev)
+{
+ return hdev->dev_type == HCI_BREDR;
+}
+
+static u8 mgmt_status(u8 hci_status)
+{
+ if (hci_status < ARRAY_SIZE(mgmt_status_table))
+ return mgmt_status_table[hci_status];
+
+ return MGMT_STATUS_FAILED;
+}
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
+ int err;
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+ hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(sizeof(*ev));
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
-static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
- size_t rp_len)
+static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ void *rp, size_t rp_len)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
+ int err;
BT_DBG("sock %p", sk);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+ hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
+ ev->status = status;
if (rp)
memcpy(ev->data, rp, rp_len);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
-static int read_version(struct sock *sk)
+static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
struct mgmt_rp_read_version rp;
BT_DBG("sock %p", sk);
rp.version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp.revision);
+ rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
- return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
- sizeof(rp));
+ return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
+ sizeof(rp));
}
-static int read_index_list(struct sock *sk)
+static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct mgmt_rp_read_commands *rp;
+ const u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ const u16 num_events = ARRAY_SIZE(mgmt_events);
+ __le16 *opcode;
+ size_t rp_size;
+ int i, err;
+
+ BT_DBG("sock %p", sk);
+
+ rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ rp->num_commands = __constant_cpu_to_le16(num_commands);
+ rp->num_events = __constant_cpu_to_le16(num_events);
+
+ for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
+ put_unaligned_le16(mgmt_commands[i], opcode);
+
+ for (i = 0; i < num_events; i++, opcode++)
+ put_unaligned_le16(mgmt_events[i], opcode);
+
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
+ rp_size);
+ kfree(rp);
+
+ return err;
+}
+
+static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
struct mgmt_rp_read_index_list *rp;
- struct list_head *p;
+ struct hci_dev *d;
size_t rp_len;
u16 count;
- int i, err;
+ int err;
BT_DBG("sock %p", sk);
read_lock(&hci_dev_list_lock);
count = 0;
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
- if (d->dev_type != HCI_BREDR)
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (!mgmt_valid_hdev(d))
continue;
+
count++;
}
@@ -156,628 +346,212 @@
return -ENOMEM;
}
- put_unaligned_le16(0, &rp->num_controllers);
-
- i = 0;
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(d);
-
- if (d->dev_type != HCI_BREDR)
+ count = 0;
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
- set_bit(HCI_MGMT, &d->flags);
-
- if (test_bit(HCI_SETUP, &d->flags))
+ if (!mgmt_valid_hdev(d))
continue;
- put_unaligned_le16(d->id, &rp->index[i++]);
- put_unaligned_le16((u16)i, &rp->num_controllers);
+ rp->index[count++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
+ rp->num_controllers = cpu_to_le16(count);
+ rp_len = sizeof(*rp) + (2 * count);
+
read_unlock(&hci_dev_list_lock);
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
- rp_len);
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
+ rp_len);
kfree(rp);
return err;
}
-static int read_controller_info(struct sock *sk, u16 index)
+static u32 get_supported_settings(struct hci_dev *hdev)
{
- struct mgmt_rp_read_info rp;
- struct hci_dev *hdev;
+ u32 settings = 0;
- BT_DBG("sock %p hci%u", sk, index);
+ settings |= MGMT_SETTING_POWERED;
+ settings |= MGMT_SETTING_PAIRABLE;
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+ if (lmp_ssp_capable(hdev))
+ settings |= MGMT_SETTING_SSP;
- hci_del_off_timer(hdev);
+ if (lmp_bredr_capable(hdev)) {
+ settings |= MGMT_SETTING_CONNECTABLE;
+ if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
+ settings |= MGMT_SETTING_FAST_CONNECTABLE;
+ settings |= MGMT_SETTING_DISCOVERABLE;
+ settings |= MGMT_SETTING_BREDR;
+ settings |= MGMT_SETTING_LINK_SECURITY;
+ }
- hci_dev_lock_bh(hdev);
+ if (enable_hs)
+ settings |= MGMT_SETTING_HS;
- set_bit(HCI_MGMT, &hdev->flags);
+ if (lmp_le_capable(hdev))
+ settings |= MGMT_SETTING_LE;
- memset(&rp, 0, sizeof(rp));
-
- rp.type = hdev->dev_type;
-
- rp.powered = test_bit(HCI_UP, &hdev->flags);
- rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
- rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
- rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
-
- if (test_bit(HCI_AUTH, &hdev->flags))
- rp.sec_mode = 3;
- else if (hdev->ssp_mode > 0)
- rp.sec_mode = 4;
- else
- rp.sec_mode = 2;
-
- bacpy(&rp.bdaddr, &hdev->bdaddr);
- memcpy(rp.features, hdev->features, 8);
- memcpy(rp.dev_class, hdev->dev_class, 3);
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
- rp.hci_ver = hdev->hci_ver;
- put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
-
- memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
-
- rp.le_white_list_size = hdev->le_white_list_size;
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+ return settings;
}
-static void mgmt_pending_free_worker(struct work_struct *work)
+static u32 get_current_settings(struct hci_dev *hdev)
{
- struct mgmt_pending_free_work *free_work =
- container_of(work, struct mgmt_pending_free_work, work);
+ u32 settings = 0;
- BT_DBG("sk %p", free_work->sk);
+ if (hdev_is_powered(hdev))
+ settings |= MGMT_SETTING_POWERED;
- sock_put(free_work->sk);
- kfree(free_work);
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_CONNECTABLE;
+
+ if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_FAST_CONNECTABLE;
+
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_DISCOVERABLE;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_PAIRABLE;
+
+ if (lmp_bredr_capable(hdev))
+ settings |= MGMT_SETTING_BREDR;
+
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_LE;
+
+ if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ settings |= MGMT_SETTING_LINK_SECURITY;
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_SSP;
+
+ if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_HS;
+
+ return settings;
}
-static void mgmt_pending_free(struct pending_cmd *cmd)
-{
- struct mgmt_pending_free_work *free_work;
- struct sock *sk = cmd->sk;
-
- BT_DBG("opcode %d, sk %p", cmd->opcode, sk);
-
- kfree(cmd->param);
- kfree(cmd);
-
- free_work = kzalloc(sizeof(*free_work), GFP_ATOMIC);
- if (free_work) {
- INIT_WORK(&free_work->work, mgmt_pending_free_worker);
- free_work->sk = sk;
-
- if (!schedule_work(&free_work->work))
- kfree(free_work);
- }
-}
-
-static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- u16 index, void *data, u16 len)
-{
- struct pending_cmd *cmd;
-
- BT_DBG("%d", opcode);
-
- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
- return NULL;
-
- cmd->opcode = opcode;
- cmd->index = index;
-
- cmd->param = kmalloc(len, GFP_ATOMIC);
- if (!cmd->param) {
- kfree(cmd);
- return NULL;
- }
-
- if (data)
- memcpy(cmd->param, data, len);
-
- cmd->sk = sk;
- sock_hold(sk);
-
- list_add(&cmd->list, &cmd_list);
-
- return cmd;
-}
-
-static void mgmt_pending_foreach(u16 opcode, int index,
- void (*cb)(struct pending_cmd *cmd, void *data),
- void *data)
-{
- struct list_head *p, *n;
-
- BT_DBG(" %d", opcode);
-
- list_for_each_safe(p, n, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
- if (opcode > 0 && cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
- continue;
-
- cb(cmd, data);
- }
-}
-
-static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
-{
- struct list_head *p;
-
- BT_DBG(" %d", opcode);
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
- continue;
-
- return cmd;
- }
-
- return NULL;
-}
-
-static void mgmt_pending_remove(struct pending_cmd *cmd)
-{
- BT_DBG(" %d", cmd->opcode);
-
- list_del(&cmd->list);
- mgmt_pending_free(cmd);
-}
-
-static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
-{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- int err, up;
-
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- up = test_bit(HCI_UP, &hdev->flags);
- if ((cp->val && up) || (!cp->val && !up)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- hci_dev_unlock_bh(hdev);
-
- if (cp->val)
- queue_work(hdev->workqueue, &hdev->power_on);
- else
- queue_work(hdev->workqueue, &hdev->power_off);
-
- err = 0;
- hci_dev_put(hdev);
-
- return err;
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
- return err;
-}
-
-static u8 get_service_classes(struct hci_dev *hdev)
-{
- struct list_head *p;
- u8 val = 0;
-
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
-
- val |= uuid->svc_hint;
- }
-
- return val;
-}
-
-static int update_class(struct hci_dev *hdev)
-{
- u8 cod[3];
- int err = 0;
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
- return 0;
-
- cod[0] = hdev->minor_class;
- cod[1] = hdev->major_class;
- cod[2] = get_service_classes(hdev);
-
- if (memcmp(cod, hdev->dev_class, 3) == 0)
- return 0;
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
-
- if (err == 0)
- memcpy(hdev->dev_class, cod, 3);
-
- return err;
-}
-
-static int set_limited_discoverable(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- struct hci_cp_write_current_iac_lap dcp;
- int update_cod;
- int err = 0;
- /* General Inquiry LAP: 0x9E8B33, Limited Inquiry LAP: 0x9E8B00 */
- u8 lap[] = { 0x33, 0x8b, 0x9e, 0x00, 0x8b, 0x9e };
-
- cp = (void *) data;
-
- BT_DBG("hci%u discoverable: %d", index, cp->val);
-
- if (!cp || len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
- ENETDOWN);
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_LIMIT_DISCOVERABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
- EBUSY);
- goto failed;
- }
-
- if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
- test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE,
- EALREADY);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LIMIT_DISCOVERABLE, index, data,
- len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- memset(&dcp, 0, sizeof(dcp));
- dcp.num_current_iac = cp->val ? 2 : 1;
- memcpy(&dcp.lap, lap, dcp.num_current_iac * 3);
- update_cod = 1;
-
- if (cp->val) {
- if (hdev->major_class & MGMT_MAJOR_CLASS_LIMITED)
- update_cod = 0;
- hdev->major_class |= MGMT_MAJOR_CLASS_LIMITED;
- } else {
- if (!(hdev->major_class & MGMT_MAJOR_CLASS_LIMITED))
- update_cod = 0;
- hdev->major_class &= ~MGMT_MAJOR_CLASS_LIMITED;
- }
-
- if (update_cod)
- err = update_class(hdev);
-
- if (err >= 0)
- err = hci_send_cmd(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
- sizeof(dcp), &dcp);
-
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- u8 scan;
- int err;
-
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
- goto failed;
- }
-
- if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
- test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- scan = SCAN_PAGE;
-
- if (cp->val)
- scan |= SCAN_INQUIRY;
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_mode *cp;
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- u8 scan;
- int err;
-
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
- goto failed;
- }
-
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
- goto failed;
- }
-
- if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- if (cp->val)
- scan = SCAN_PAGE;
- else
- scan = 0;
-
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
- struct sock *skip_sk)
-{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
-
- BT_DBG("hci%d %d", index, event);
-
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
-
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- hdr->index = cpu_to_le16(index);
- hdr->len = cpu_to_le16(data_len);
-
- if (data)
- memcpy(skb_put(skb, data_len), data, data_len);
-
- hci_send_to_sock(NULL, skb, skip_sk);
- kfree_skb(skb);
-
- return 0;
-}
-
-static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
-{
- struct mgmt_mode rp;
-
- rp.val = val;
-
- return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
-}
-
-static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_mode *cp, ev;
- struct hci_dev *hdev;
- int err;
-
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (cp->val)
- set_bit(HCI_PAIRABLE, &hdev->flags);
- else
- clear_bit(HCI_PAIRABLE, &hdev->flags);
-
- err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
- if (err < 0)
- goto failed;
-
- ev.val = cp->val;
-
- err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-#define EIR_FLAGS 0x01 /* flags */
-#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
-#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
-#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
-#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
-#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
-#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
-#define EIR_NAME_SHORT 0x08 /* shortened local name */
-#define EIR_NAME_COMPLETE 0x09 /* complete local name */
-#define EIR_TX_POWER 0x0A /* transmit power level */
-#define EIR_DEVICE_ID 0x10 /* device ID */
-
#define PNP_INFO_SVCLASS_ID 0x1200
-static u8 bluetooth_base_uuid[] = {
- 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static u16 get_uuid16(u8 *uuid128)
+static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
- u32 val;
- int i;
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
- for (i = 0; i < 12; i++) {
- if (bluetooth_base_uuid[i] != uuid128[i])
- return 0;
+ if (len < 4)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u16 uuid16;
+
+ if (uuid->size != 16)
+ continue;
+
+ uuid16 = get_unaligned_le16(&uuid->uuid[12]);
+ if (uuid16 < 0x1100)
+ continue;
+
+ if (uuid16 == PNP_INFO_SVCLASS_ID)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID16_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + sizeof(u16) > len) {
+ uuids_start[1] = EIR_UUID16_SOME;
+ break;
+ }
+
+ *ptr++ = (uuid16 & 0x00ff);
+ *ptr++ = (uuid16 & 0xff00) >> 8;
+ uuids_start[0] += sizeof(uuid16);
}
- memcpy(&val, &uuid128[12], 4);
+ return ptr;
+}
- val = le32_to_cpu(val);
- if (val > 0xffff)
- return 0;
+static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
- return (u16) val;
+ if (len < 6)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ if (uuid->size != 32)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID32_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + sizeof(u32) > len) {
+ uuids_start[1] = EIR_UUID32_SOME;
+ break;
+ }
+
+ memcpy(ptr, &uuid->uuid[12], sizeof(u32));
+ ptr += sizeof(u32);
+ uuids_start[0] += sizeof(u32);
+ }
+
+ return ptr;
+}
+
+static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 18)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ if (uuid->size != 128)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID128_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + 16 > len) {
+ uuids_start[1] = EIR_UUID128_SOME;
+ break;
+ }
+
+ memcpy(ptr, uuid->uuid, 16);
+ ptr += 16;
+ uuids_start[0] += 16;
+ }
+
+ return ptr;
}
static void create_eir(struct hci_dev *hdev, u8 *data)
{
u8 *ptr = data;
- u16 eir_len = 0;
- u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
- int i, truncated = 0;
- struct list_head *p;
size_t name_len;
- name_len = strnlen(hdev->dev_name, HCI_MAX_EIR_LENGTH);
+ name_len = strlen(hdev->dev_name);
if (name_len > 0) {
/* EIR Data type */
@@ -792,109 +566,935 @@
memcpy(ptr + 2, hdev->dev_name, name_len);
- eir_len += (name_len + 2);
ptr += (name_len + 2);
}
- memset(uuid16_list, 0, sizeof(uuid16_list));
+ if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->inq_tx_power;
- /* Group all UUID16 types */
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
- u16 uuid16;
-
- uuid16 = get_uuid16(uuid->uuid);
- if (uuid16 == 0)
- return;
-
- if (uuid16 < 0x1100)
- continue;
-
- if (uuid16 == PNP_INFO_SVCLASS_ID)
- continue;
-
- /* Stop if not enough space to put next UUID */
- if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
- truncated = 1;
- break;
- }
-
- /* Check for duplicates */
- for (i = 0; uuid16_list[i] != 0; i++)
- if (uuid16_list[i] == uuid16)
- break;
-
- if (uuid16_list[i] == 0) {
- uuid16_list[i] = uuid16;
- eir_len += sizeof(u16);
- }
+ ptr += 3;
}
- if (uuid16_list[0] != 0) {
- u8 *length = ptr;
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
- /* EIR Data type */
- ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
- ptr += 2;
- eir_len += 2;
-
- for (i = 0; uuid16_list[i] != 0; i++) {
- *ptr++ = (uuid16_list[i] & 0x00ff);
- *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
- }
-
- /* EIR Data length */
- *length = (i * sizeof(u16)) + 1;
+ ptr += 10;
}
+
+ ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+ ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+ ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
}
-static int update_eir(struct hci_dev *hdev)
+static void update_eir(struct hci_request *req)
{
+ struct hci_dev *hdev = req->hdev;
struct hci_cp_write_eir cp;
- if (!(hdev->features[6] & LMP_EXT_INQ))
- return 0;
+ if (!hdev_is_powered(hdev))
+ return;
- if (hdev->ssp_mode == 0)
- return 0;
+ if (!lmp_ext_inq_capable(hdev))
+ return;
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
- return 0;
+ if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ return;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ return;
memset(&cp, 0, sizeof(cp));
create_eir(hdev, cp.data);
if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
- return 0;
+ return;
memcpy(hdev->eir, cp.data, sizeof(cp.data));
- return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
}
-static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static u8 get_service_classes(struct hci_dev *hdev)
{
- struct mgmt_cp_add_uuid *cp;
- struct hci_dev *hdev;
+ struct bt_uuid *uuid;
+ u8 val = 0;
+
+ list_for_each_entry(uuid, &hdev->uuids, list)
+ val |= uuid->svc_hint;
+
+ return val;
+}
+
+static void update_class(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (!hdev_is_powered(hdev))
+ return;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ return;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return;
+
+ hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static void service_cache_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ service_cache.work);
+ struct hci_request req;
+
+ if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ return;
+
+ hci_req_init(&req, hdev);
+
+ hci_dev_lock(hdev);
+
+ update_eir(&req);
+ update_class(&req);
+
+ hci_dev_unlock(hdev);
+
+ hci_req_run(&req, NULL);
+}
+
+static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
+{
+ if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
+
+ INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+
+ /* Non-mgmt controlled devices get this bit set
+ * implicitly so that pairing works for them, however
+ * for mgmt we require user-space to explicitly enable
+ * it
+ */
+ clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+}
+
+static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_rp_read_info rp;
+
+ BT_DBG("sock %p %s", sk, hdev->name);
+
+ hci_dev_lock(hdev);
+
+ memset(&rp, 0, sizeof(rp));
+
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
+
+ rp.version = hdev->hci_ver;
+ rp.manufacturer = cpu_to_le16(hdev->manufacturer);
+
+ rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
+ rp.current_settings = cpu_to_le32(get_current_settings(hdev));
+
+ memcpy(rp.dev_class, hdev->dev_class, 3);
+
+ memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
+ memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
+
+ hci_dev_unlock(hdev);
+
+ return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
+ sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+ sock_put(cmd->sk);
+ kfree(cmd->param);
+ kfree(cmd);
+}
+
+static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct pending_cmd *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->opcode = opcode;
+ cmd->index = hdev->id;
+
+ cmd->param = kmalloc(len, GFP_KERNEL);
+ if (!cmd->param) {
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (data)
+ memcpy(cmd->param, data, len);
+
+ cmd->sk = sk;
+ sock_hold(sk);
+
+ list_add(&cmd->list, &hdev->mgmt_pending);
+
+ return cmd;
+}
+
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+ void (*cb)(struct pending_cmd *cmd,
+ void *data),
+ void *data)
+{
+ struct pending_cmd *cmd, *tmp;
+
+ list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
+ if (opcode > 0 && cmd->opcode != opcode)
+ continue;
+
+ cb(cmd, data);
+ }
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode == opcode)
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void mgmt_pending_remove(struct pending_cmd *cmd)
+{
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
+
+static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
+{
+ __le32 settings = cpu_to_le32(get_current_settings(hdev));
+
+ return cmd_complete(sk, hdev->id, opcode, 0, &settings,
+ sizeof(settings));
+}
+
+static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ cancel_delayed_work(&hdev->power_off);
+
+ if (cp->val) {
+ mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
+ data, len);
+ err = mgmt_powered(hdev, 1);
+ goto failed;
+ }
+ }
+
+ if (!!cp->val == hdev_is_powered(hdev)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val)
+ queue_work(hdev->req_workqueue, &hdev->power_on);
+ else
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
+
+ err = 0;
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
+ struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(event);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
+ hdr->len = cpu_to_le16(data_len);
+
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ /* Time stamp */
+ __net_timestamp(skb);
+
+ hci_send_to_control(skb, skip_sk);
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int new_settings(struct hci_dev *hdev, struct sock *skip)
+{
+ __le32 ev;
+
+ ev = cpu_to_le32(get_current_settings(hdev));
+
+ return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
+}
+
+static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_discoverable *cp = data;
+ struct pending_cmd *cmd;
+ u16 timeout;
+ u8 scan;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ timeout = __le16_to_cpu(cp->timeout);
+ if (!cp->val && timeout > 0)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev) && timeout > 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_REJECTED);
+ goto failed;
+ }
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
+ change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ }
+
+ if (cp->val && timeout > 0) {
+ hdev->discov_timeout = timeout;
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ msecs_to_jiffies(hdev->discov_timeout * 1000));
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ scan = SCAN_PAGE;
+
+ if (cp->val)
+ scan |= SCAN_INQUIRY;
+ else
+ cancel_delayed_work(&hdev->discov_off);
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+ if (cp->val)
+ hdev->discov_timeout = timeout;
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static void write_fast_connectable(struct hci_request *req, bool enable)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_page_scan_activity acp;
+ u8 type;
+
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return;
+
+ if (enable) {
+ type = PAGE_SCAN_TYPE_INTERLACED;
+
+ /* 160 msec page scan interval */
+ acp.interval = __constant_cpu_to_le16(0x0100);
+ } else {
+ type = PAGE_SCAN_TYPE_STANDARD; /* default */
+
+ /* default 1.28 sec page scan */
+ acp.interval = __constant_cpu_to_le16(0x0800);
+ }
+
+ acp.window = __constant_cpu_to_le16(0x0012);
+
+ if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
+ __cpu_to_le16(hdev->page_scan_window) != acp.window)
+ hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ sizeof(acp), &acp);
+
+ if (hdev->page_scan_type != type)
+ hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
+}
+
+static void set_connectable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ u8 scan;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+
+ if (cp->val) {
+ set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ } else {
+ clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val) {
+ scan = SCAN_PAGE;
+ } else {
+ scan = 0;
+
+ if (test_bit(HCI_ISCAN, &hdev->flags) &&
+ hdev->discov_timeout > 0)
+ cancel_delayed_work(&hdev->discov_off);
+ }
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+
+ /* If we're going from non-connectable to connectable or
+ * vice-versa when fast connectable is enabled ensure that fast
+ * connectable gets disabled. write_fast_connectable won't do
+ * anything if the page scan parameters are already what they
+ * should be.
+ */
+ if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
+ write_fast_connectable(&req, false);
+
+ err = hci_req_run(&req, set_connectable_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (cp->val)
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ else
+ clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
+ if (err < 0)
+ goto failed;
+
+ err = new_settings(hdev, sk);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 val;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags)) {
+ change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ val = !!cp->val;
+
+ if (test_bit(HCI_AUTH, &hdev->flags) == val) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 val;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_ssp_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ val = !!cp->val;
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!enable_hs)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (cp->val)
+ set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ else
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+
+ return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+}
+
+static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct hci_cp_write_le_host_supported hci_cp;
+ struct pending_cmd *cmd;
+ int err;
+ u8 val, enabled;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* LE-only devices do not allow toggling LE on/off */
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ val = !!cp->val;
+ enabled = lmp_host_le_capable(hdev);
+
+ if (!hdev_is_powered(hdev) || val == enabled) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ memset(&hci_cp, 0, sizeof(hci_cp));
+
+ if (val) {
+ hci_cp.le = val;
+ hci_cp.simul = lmp_le_br_capable(hdev);
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
+ &hci_cp);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+/* This is a helper function to test for pending mgmt commands that can
+ * cause CoD or EIR HCI commands. We can only allow one such pending
+ * mgmt command at a time since otherwise we cannot easily track what
+ * the current values are, will be, and based on that calculate if a new
+ * HCI command needs to be sent and if yes with what value.
+ */
+static bool pending_eir_or_class(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ switch (cmd->opcode) {
+ case MGMT_OP_ADD_UUID:
+ case MGMT_OP_REMOVE_UUID:
+ case MGMT_OP_SET_DEV_CLASS:
+ case MGMT_OP_SET_POWERED:
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const u8 bluetooth_base_uuid[] = {
+ 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static u8 get_uuid_size(const u8 *uuid)
+{
+ u32 val;
+
+ if (memcmp(uuid, bluetooth_base_uuid, 12))
+ return 128;
+
+ val = get_unaligned_le32(&uuid[12]);
+ if (val > 0xffff)
+ return 32;
+
+ return 16;
+}
+
+static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(mgmt_op, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
+ hdev->dev_class, 3);
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void add_uuid_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("status 0x%02x", status);
+
+ mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
+}
+
+static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_cp_add_uuid *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
struct bt_uuid *uuid;
int err;
- cp = (void *) data;
+ BT_DBG("request for %s", hdev->name);
- BT_DBG("request for hci%u", index);
+ hci_dev_lock(hdev);
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+ if (pending_eir_or_class(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+ uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
if (!uuid) {
err = -ENOMEM;
goto failed;
@@ -902,295 +1502,369 @@
memcpy(uuid->uuid, cp->uuid, 16);
uuid->svc_hint = cp->svc_hint;
+ uuid->size = get_uuid_size(cp->uuid);
- list_add(&uuid->list, &hdev->uuids);
+ list_add_tail(&uuid->list, &hdev->uuids);
- if (test_bit(HCI_UP, &hdev->flags)) {
+ hci_req_init(&req, hdev);
- err = update_class(hdev);
- if (err < 0)
+ update_class(&req);
+ update_eir(&req);
+
+ err = hci_req_run(&req, add_uuid_complete);
+ if (err < 0) {
+ if (err != -ENODATA)
goto failed;
- err = update_eir(hdev);
- if (err < 0)
- goto failed;
- } else
- err = 0;
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
+ hdev->dev_class, 3);
+ goto failed;
+ }
- err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = 0;
failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static bool enable_service_cache(struct hci_dev *hdev)
{
- struct list_head *p, *n;
- struct mgmt_cp_remove_uuid *cp;
- struct hci_dev *hdev;
+ if (!hdev_is_powered(hdev))
+ return false;
+
+ if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ queue_delayed_work(hdev->workqueue, &hdev->service_cache,
+ CACHE_TIMEOUT);
+ return true;
+ }
+
+ return false;
+}
+
+static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("status 0x%02x", status);
+
+ mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
+}
+
+static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_remove_uuid *cp = data;
+ struct pending_cmd *cmd;
+ struct bt_uuid *match, *tmp;
u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ struct hci_request req;
int err, found;
- cp = (void *) data;
+ BT_DBG("request for %s", hdev->name);
- BT_DBG("request for hci%u", index);
+ hci_dev_lock(hdev);
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
-
- hci_dev_lock_bh(hdev);
+ if (pending_eir_or_class(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
err = hci_uuids_clear(hdev);
- goto unlock;
+
+ if (enable_service_cache(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ 0, hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ goto update_class;
}
found = 0;
- list_for_each_safe(p, n, &hdev->uuids) {
- struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
-
+ list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
if (memcmp(match->uuid, cp->uuid, 16) != 0)
continue;
list_del(&match->list);
+ kfree(match);
found++;
}
if (found == 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+ err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
- if (test_bit(HCI_UP, &hdev->flags)) {
- err = update_class(hdev);
- if (err < 0)
- goto unlock;
+update_class:
+ hci_req_init(&req, hdev);
- err = update_eir(hdev);
- if (err < 0)
- goto unlock;
- } else
- err = 0;
+ update_class(&req);
+ update_eir(&req);
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
-
-unlock:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_set_dev_class *cp;
- int err;
-
- cp = (void *) data;
-
- BT_DBG("request for hci%u", index);
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- hdev->major_class &= ~MGMT_MAJOR_CLASS_MASK;
- hdev->major_class |= cp->major & MGMT_MAJOR_CLASS_MASK;
- hdev->minor_class = cp->minor;
-
- if (test_bit(HCI_UP, &hdev->flags)) {
- err = update_class(hdev);
- if (err == 0)
- err = cmd_complete(sk, index,
- MGMT_OP_SET_DEV_CLASS, hdev->dev_class, sizeof(u8)*3);
- } else
- err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_set_service_cache *cp;
- int err;
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- BT_DBG("hci%u enable %d", index, cp->enable);
-
- if (cp->enable) {
- set_bit(HCI_SERVICE_CACHE, &hdev->flags);
- err = 0;
- } else {
- clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
- if (test_bit(HCI_UP, &hdev->flags)) {
- err = update_class(hdev);
- if (err == 0)
- err = update_eir(hdev);
- } else
- err = 0;
- }
-
- if (err == 0)
- err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
- 0);
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_load_keys *cp;
- u16 key_count, expected_len;
- int i, err;
-
- cp = (void *) data;
-
- if (len < sizeof(*cp))
- return -EINVAL;
-
- key_count = get_unaligned_le16(&cp->key_count);
-
- expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
- if (expected_len > len) {
- BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
- expected_len, len);
- return -EINVAL;
- }
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
-
- BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
- key_count);
-
- hci_dev_lock_bh(hdev);
-
- hci_link_keys_clear(hdev);
-
- set_bit(HCI_LINK_KEYS, &hdev->flags);
-
- if (cp->debug_keys)
- set_bit(HCI_DEBUG_KEYS, &hdev->flags);
- else
- clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
-
- len -= sizeof(*cp);
- i = 0;
-
- while (i < len) {
- struct mgmt_key_info *key = (void *) cp->keys + i;
-
- i += sizeof(*key);
-
- if (key->key_type == KEY_TYPE_LTK) {
- struct key_master_id *id = (void *) key->data;
-
- if (key->dlen != sizeof(struct key_master_id))
- continue;
-
- hci_add_ltk(hdev, 0, &key->bdaddr, key->addr_type,
- key->pin_len, key->auth, id->ediv,
- id->rand, key->val);
-
- continue;
- }
-
- hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->key_type,
- key->pin_len);
- }
-
- err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_remove_key *cp;
- struct hci_conn *conn;
- int err;
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- err = hci_remove_link_key(hdev, &cp->bdaddr);
+ err = hci_req_run(&req, remove_uuid_complete);
if (err < 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+ if (err != -ENODATA)
+ goto unlock;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
goto unlock;
}
err = 0;
- if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
- goto unlock;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (conn) {
- struct hci_cp_disconnect dc;
-
- put_unaligned_le16(conn->handle, &dc.handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
- err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
- }
-
unlock:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static void set_class_complete(struct hci_dev *hdev, u8 status)
{
- struct hci_dev *hdev;
- struct mgmt_cp_disconnect *cp;
+ BT_DBG("status 0x%02x", status);
+
+ mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
+}
+
+static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_dev_class *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ hci_dev_lock(hdev);
+
+ if (pending_eir_or_class(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ hdev->major_class = cp->major;
+ hdev->minor_class = cp->minor;
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ hci_dev_unlock(hdev);
+ cancel_delayed_work_sync(&hdev->service_cache);
+ hci_dev_lock(hdev);
+ update_eir(&req);
+ }
+
+ update_class(&req);
+
+ err = hci_req_run(&req, set_class_complete);
+ if (err < 0) {
+ if (err != -ENODATA)
+ goto unlock;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = 0;
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_load_link_keys *cp = data;
+ u16 key_count, expected_len;
+ int i;
+
+ key_count = __le16_to_cpu(cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_link_key_info);
+ if (expected_len != len) {
+ BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
+ key_count);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_link_key_info *key = &cp->keys[i];
+
+ if (key->addr.type != BDADDR_BREDR)
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ hci_link_keys_clear(hdev);
+
+ set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
+
+ if (cp->debug_keys)
+ set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+ else
+ clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_link_key_info *key = &cp->keys[i];
+
+ hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
+ key->type, key->pin_len);
+ }
+
+ cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, struct sock *skip_sk)
+{
+ struct mgmt_ev_device_unpaired ev;
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = addr_type;
+
+ return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
+ skip_sk);
+}
+
+static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_unpair_device *cp = data;
+ struct mgmt_rp_unpair_device rp;
+ struct hci_cp_disconnect dc;
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
+ return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ if (cp->addr.type == BDADDR_BREDR)
+ err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
+ else
+ err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
+
+ if (err < 0) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ if (cp->disconnect) {
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
+ &cp->addr.bdaddr);
+ } else {
+ conn = NULL;
+ }
+
+ if (!conn) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
+ &rp, sizeof(rp));
+ device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
+ sizeof(*cp));
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ dc.handle = cpu_to_le16(conn->handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_disconnect *cp = data;
+ struct mgmt_rp_disconnect rp;
struct hci_cp_disconnect dc;
struct pending_cmd *cmd;
struct hci_conn *conn;
@@ -1198,490 +1872,244 @@
BT_DBG("");
- cp = (void *) data;
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
-
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto failed;
}
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (!conn) {
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
- if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
- ENOTCONN);
- goto failed;
- }
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
+
+ if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+ goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- put_unaligned_le16(conn->handle, &dc.handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
+ dc.handle = cpu_to_le16(conn->handle);
+ dc.reason = HCI_ERROR_REMOTE_USER_TERM;
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int get_connections(struct sock *sk, u16 index)
+static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
+{
+ switch (link_type) {
+ case LE_LINK:
+ switch (addr_type) {
+ case ADDR_LE_DEV_PUBLIC:
+ return BDADDR_LE_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return BDADDR_LE_RANDOM;
+ }
+
+ default:
+ /* Fallback to BR/EDR type */
+ return BDADDR_BREDR;
+ }
+}
+
+static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
struct mgmt_rp_get_connections *rp;
- struct hci_dev *hdev;
- struct list_head *p;
+ struct hci_conn *c;
size_t rp_len;
- u16 count;
- int i, err;
+ int err;
+ u16 i;
BT_DBG("");
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+ hci_dev_lock(hdev);
- hci_dev_lock_bh(hdev);
-
- count = 0;
- list_for_each(p, &hdev->conn_hash.list) {
- count++;
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
}
- rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
- rp = kmalloc(rp_len, GFP_ATOMIC);
+ i = 0;
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
+ i++;
+ }
+
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+ rp = kmalloc(rp_len, GFP_KERNEL);
if (!rp) {
err = -ENOMEM;
goto unlock;
}
- put_unaligned_le16(count, &rp->conn_count);
-
- read_lock(&hci_dev_list_lock);
-
i = 0;
- list_for_each(p, &hdev->conn_hash.list) {
- struct hci_conn *c = list_entry(p, struct hci_conn, list);
-
- bacpy(&rp->conn[i++], &c->dst);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
+ continue;
+ bacpy(&rp->addr[i].bdaddr, &c->dst);
+ rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
+ if (c->type == SCO_LINK || c->type == ESCO_LINK)
+ continue;
+ i++;
}
- read_unlock(&hci_dev_list_lock);
+ rp->conn_count = cpu_to_le16(i);
- err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
+ /* Recalculate length in case of filtered SCO connections, etc */
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
+ rp_len);
+
+ kfree(rp);
unlock:
- kfree(rp);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ hci_dev_unlock(hdev);
return err;
}
-static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_pin_code_neg_reply *cp)
{
- struct hci_dev *hdev;
- struct mgmt_cp_pin_code_reply *cp;
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
+ sizeof(*cp));
+ if (!cmd)
+ return -ENOMEM;
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+ sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct hci_conn *conn;
+ struct mgmt_cp_pin_code_reply *cp = data;
struct hci_cp_pin_code_reply reply;
struct pending_cmd *cmd;
int err;
BT_DBG("");
- cp = (void *) data;
+ hci_dev_lock(hdev);
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_CONNECTED);
+ goto failed;
+ }
+
+ if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
+ struct mgmt_cp_pin_code_neg_reply ncp;
+
+ memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
+
+ BT_ERR("PIN code is not 16 bytes long");
+
+ err = send_pin_code_neg_reply(sk, hdev, &ncp);
+ if (err >= 0)
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- bacpy(&reply.bdaddr, &cp->bdaddr);
+ bacpy(&reply.bdaddr, &cp->addr.bdaddr);
reply.pin_len = cp->pin_len;
- memcpy(reply.pin_code, cp->pin_code, 16);
+ memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int encrypt_link(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_encrypt_link *cp;
- struct hci_cp_set_conn_encrypt enc;
- struct hci_conn *conn;
- int err = 0;
+ struct mgmt_cp_set_io_capability *cp = data;
BT_DBG("");
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENETDOWN);
- goto done;
- }
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENOTCONN);
- goto done;
- }
-
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, EINPROGRESS);
- goto done;
- }
-
- if (conn->link_mode & HCI_LM_AUTH) {
- enc.handle = cpu_to_le16(conn->handle);
- enc.encrypt = cp->enable;
- err = hci_send_cmd(hdev,
- HCI_OP_SET_CONN_ENCRYPT, sizeof(enc), &enc);
- } else {
- conn->auth_initiator = 1;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
- struct hci_cp_auth_requested cp;
- cp.handle = cpu_to_le16(conn->handle);
- err = hci_send_cmd(conn->hdev,
- HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
- }
- }
-
-done:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-
-static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_pin_code_neg_reply *cp;
- struct pending_cmd *cmd;
- int err;
-
- BT_DBG("");
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- ENETDOWN);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
- data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
- &cp->bdaddr);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int le_add_dev_white_list(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_le_add_dev_white_list *cp;
- int err = 0;
-
- BT_DBG("");
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST,
- ENETDOWN);
- goto failed;
- }
-
- hci_le_add_dev_white_list(hdev, &cp->bdaddr);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int le_remove_dev_white_list(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_le_remove_dev_white_list *cp;
- int err = 0;
-
- BT_DBG("");
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST,
- ENETDOWN);
- goto failed;
- }
-
- hci_le_remove_dev_white_list(hdev, &cp->bdaddr);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int le_create_conn_white_list(struct sock *sk, u16 index)
-{
- struct hci_dev *hdev;
- struct hci_conn *conn;
- u8 sec_level, auth_type;
- struct pending_cmd *cmd;
- bdaddr_t bdaddr;
- int err = 0;
-
- BT_DBG("");
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LE_CREATE_CONN_WHITE_LIST,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_LE_CREATE_CONN_WHITE_LIST,
- ENETDOWN);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, index,
- NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- sec_level = BT_SECURITY_MEDIUM;
- auth_type = HCI_AT_GENERAL_BONDING;
- memset(&bdaddr, 0, sizeof(bdaddr));
- conn = hci_le_connect(hdev, 0, BDADDR_ANY, sec_level, auth_type, NULL);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mgmt_pending_remove(cmd);
- }
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int le_cancel_create_conn_white_list(struct sock *sk, u16 index)
-{
- struct hci_dev *hdev;
- int err = 0;
-
- BT_DBG("");
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index,
- MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index,
- MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST, ENETDOWN);
- goto failed;
- }
-
- hci_le_cancel_create_connect(hdev, BDADDR_ANY);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int le_clear_white_list(struct sock *sk, u16 index)
-{
- struct hci_dev *hdev;
- int err;
-
- BT_DBG("");
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index,
- MGMT_OP_LE_CLEAR_WHITE_LIST, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index,
- MGMT_OP_LE_CLEAR_WHITE_LIST, ENETDOWN);
- goto failed;
- }
-
- err = hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_set_io_capability *cp;
-
- BT_DBG("");
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
-
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
- hdev->io_capability);
+ hdev->io_capability);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ hci_dev_unlock(hdev);
- return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
+ return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
+ 0);
}
-static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
+static struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- struct list_head *p;
+ struct pending_cmd *cmd;
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
continue;
- if (cmd->index != hdev->id)
- continue;
-
if (cmd->user_data != conn)
continue;
@@ -1696,18 +2124,19 @@
struct mgmt_rp_pair_device rp;
struct hci_conn *conn = cmd->user_data;
- BT_DBG(" %u", status);
+ bacpy(&rp.addr.bdaddr, &conn->dst);
+ rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
- bacpy(&rp.bdaddr, &conn->dst);
- rp.status = status;
-
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
+ &rp, sizeof(rp));
/* So we don't get further callbacks for this connection */
conn->connect_cfm_cb = NULL;
conn->security_cfm_cb = NULL;
conn->disconn_cfm_cb = NULL;
+ hci_conn_drop(conn);
+
mgmt_pending_remove(cmd);
}
@@ -1715,797 +2144,432 @@
{
struct pending_cmd *cmd;
- BT_DBG(" %u", status);
+ BT_DBG("status %u", status);
cmd = find_pairing(conn);
- if (!cmd) {
+ if (!cmd)
BT_DBG("Unable to find a pending command");
- return;
- }
-
- pairing_complete(cmd, status);
- hci_conn_put(conn);
-}
-
-static void pairing_security_complete_cb(struct hci_conn *conn, u8 status)
-{
- struct pending_cmd *cmd;
-
- BT_DBG(" %u", status);
-
- cmd = find_pairing(conn);
- if (!cmd) {
- BT_DBG("Unable to find a pending command");
- return;
- }
-
- if (conn->type == LE_LINK)
- smp_link_encrypt_cmplt(conn->l2cap_data, status,
- status ? 0 : 1);
else
- pairing_complete(cmd, status);
+ pairing_complete(cmd, mgmt_status(status));
}
-static void pairing_connect_complete_cb(struct hci_conn *conn, u8 status)
+static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
{
struct pending_cmd *cmd;
- BT_DBG("conn: %p %u", conn, status);
+ BT_DBG("status %u", status);
+
+ if (!status)
+ return;
cmd = find_pairing(conn);
- if (!cmd) {
+ if (!cmd)
BT_DBG("Unable to find a pending command");
- return;
- }
-
- if (status || conn->pending_sec_level < BT_SECURITY_MEDIUM)
- pairing_complete(cmd, status);
-
- hci_conn_put(conn);
+ else
+ pairing_complete(cmd, mgmt_status(status));
}
-static void discovery_terminated(struct pending_cmd *cmd, void *data)
+static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_mode ev = {0};
-
- BT_DBG("");
- hdev = hci_dev_get(cmd->index);
- if (!hdev)
- goto not_found;
-
- del_timer(&hdev->disco_le_timer);
- del_timer(&hdev->disco_timer);
- hci_dev_put(hdev);
-
-not_found:
- mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL);
-
- list_del(&cmd->list);
-
- mgmt_pending_free(cmd);
-}
-
-static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_pair_device *cp;
+ struct mgmt_cp_pair_device *cp = data;
+ struct mgmt_rp_pair_device rp;
struct pending_cmd *cmd;
- u8 sec_level, auth_type, io_cap;
+ u8 sec_level, auth_type;
struct hci_conn *conn;
- struct adv_entry *entry;
int err;
BT_DBG("");
- cp = (void *) data;
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
- hdev = hci_dev_get(index);
+ hci_dev_lock(hdev);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- io_cap = cp->io_cap;
-
- sec_level = BT_SECURITY_MEDIUM;
- auth_type = HCI_AT_DEDICATED_BONDING;
-
- entry = hci_find_adv_entry(hdev, &cp->bdaddr);
- if (entry && entry->flags & 0x04) {
- conn = hci_le_connect(hdev, 0, &cp->bdaddr, sec_level,
- auth_type, NULL);
- } else {
- /* ACL-SSP does not support io_cap 0x04 (KeyboadDisplay) */
- if (io_cap == 0x04)
- io_cap = 0x01;
- conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level,
- auth_type);
- conn->auth_initiator = 1;
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto unlock;
}
+ sec_level = BT_SECURITY_MEDIUM;
+ if (cp->io_cap == 0x03)
+ auth_type = HCI_AT_DEDICATED_BONDING;
+ else
+ auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, 0, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
+ else
+ conn = hci_connect(hdev, LE_LINK, 0, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
+
if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
+ int status;
+
+ if (PTR_ERR(conn) == -EBUSY)
+ status = MGMT_STATUS_BUSY;
+ else
+ status = MGMT_STATUS_CONNECT_FAILED;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ status, &rp,
+ sizeof(rp));
goto unlock;
}
if (conn->connect_cfm_cb) {
- hci_conn_put(conn);
- err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+ hci_conn_drop(conn);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
- hci_conn_put(conn);
+ hci_conn_drop(conn);
goto unlock;
}
- conn->connect_cfm_cb = pairing_connect_complete_cb;
- conn->security_cfm_cb = pairing_security_complete_cb;
+ /* For LE, just connecting isn't a proof that the pairing finished */
+ if (cp->addr.type == BDADDR_BREDR)
+ conn->connect_cfm_cb = pairing_complete_cb;
+ else
+ conn->connect_cfm_cb = le_connect_complete_cb;
+
+ conn->security_cfm_cb = pairing_complete_cb;
conn->disconn_cfm_cb = pairing_complete_cb;
- conn->io_capability = io_cap;
+ conn->io_capability = cp->io_cap;
cmd->user_data = conn;
if (conn->state == BT_CONNECTED &&
- hci_conn_security(conn, sec_level, auth_type))
+ hci_conn_security(conn, sec_level, auth_type))
pairing_complete(cmd, 0);
err = 0;
unlock:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len, u16 opcode)
+static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct mgmt_cp_user_confirm_reply *cp = (void *) data;
- u16 mgmt_op = opcode, hci_op;
+ struct mgmt_addr_info *addr = data;
struct pending_cmd *cmd;
- struct hci_dev *hdev;
- struct hci_conn *le_conn;
+ struct hci_conn *conn;
int err;
- BT_DBG("%d", mgmt_op);
+ BT_DBG("");
- if (mgmt_op == MGMT_OP_USER_CONFIRM_NEG_REPLY)
- hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
+ if (!cmd) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ conn = cmd->user_data;
+
+ if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ pairing_complete(cmd, MGMT_STATUS_CANCELLED);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
+ addr, sizeof(*addr));
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_addr_info *addr, u16 mgmt_op,
+ u16 hci_op, __le32 passkey)
+{
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_POWERED, addr,
+ sizeof(*addr));
+ goto done;
+ }
+
+ if (addr->type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
else
- hci_op = HCI_OP_USER_CONFIRM_REPLY;
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
- if (len < sizeof(*cp))
- return cmd_status(sk, index, mgmt_op, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, mgmt_op, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, mgmt_op, ENETDOWN);
+ if (!conn) {
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_CONNECTED, addr,
+ sizeof(*addr));
goto done;
}
- le_conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
- if (le_conn) {
- err = le_user_confirm_reply(le_conn, mgmt_op, (void *) cp);
- goto done;
- }
- BT_DBG("BR/EDR: %s", mgmt_op == MGMT_OP_USER_CONFIRM_NEG_REPLY ?
- "Reject" : "Accept");
+ if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
+ /* Continue with pairing via SMP */
+ err = smp_user_confirm_reply(conn, mgmt_op, passkey);
- cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto done;
- }
-
- err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-done:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int resolve_name(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_cp_resolve_name *mgmt_cp = (void *) data;
- struct hci_cp_remote_name_req hci_cp;
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- int err;
-
- BT_DBG("");
-
- if (len != sizeof(*mgmt_cp))
- return cmd_status(sk, index, MGMT_OP_RESOLVE_NAME, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_RESOLVE_NAME, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_RESOLVE_NAME, index, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- memset(&hci_cp, 0, sizeof(hci_cp));
- bacpy(&hci_cp.bdaddr, &mgmt_cp->bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(hci_cp),
- &hci_cp);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int cancel_resolve_name(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_cp_cancel_resolve_name *mgmt_cp = (void *) data;
- struct hci_cp_remote_name_req_cancel hci_cp;
- struct hci_dev *hdev;
- int err;
-
- BT_DBG("");
-
- if (len != sizeof(*mgmt_cp))
- return cmd_status(sk, index, MGMT_OP_CANCEL_RESOLVE_NAME,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_CANCEL_RESOLVE_NAME,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- memset(&hci_cp, 0, sizeof(hci_cp));
- bacpy(&hci_cp.bdaddr, &mgmt_cp->bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(hci_cp),
- &hci_cp);
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_connection_params(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct mgmt_cp_set_connection_params *cp = (void *) data;
- struct hci_dev *hdev;
- struct hci_conn *conn;
- int err;
-
- BT_DBG("");
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
- if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS,
- ENOTCONN);
- goto failed;
- }
-
- hci_le_conn_update(conn, le16_to_cpu(cp->interval_min),
- le16_to_cpu(cp->interval_max),
- le16_to_cpu(cp->slave_latency),
- le16_to_cpu(cp->timeout_multiplier));
-
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS, 0);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_rssi_reporter(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct mgmt_cp_set_rssi_reporter *cp = (void *) data;
- struct hci_dev *hdev;
- struct hci_conn *conn;
- int err = 0;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
-
- if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER,
- ENOTCONN);
- goto failed;
- }
-
- BT_DBG("updateOnThreshExceed %d ", cp->updateOnThreshExceed);
- hci_conn_set_rssi_reporter(conn, cp->rssi_threshold,
- __le16_to_cpu(cp->interval), cp->updateOnThreshExceed);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int unset_rssi_reporter(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct mgmt_cp_unset_rssi_reporter *cp = (void *) data;
- struct hci_dev *hdev;
- struct hci_conn *conn;
- int err = 0;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER,
- EINVAL);
-
- hdev = hci_dev_get(index);
-
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
-
- if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER,
- ENOTCONN);
- goto failed;
- }
-
- hci_conn_unset_rssi_reporter(conn);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int le_cancel_create_conn(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
-{
- struct mgmt_cp_le_cancel_create_conn *cp = (void *) data;
- struct hci_dev *hdev;
- int err = 0;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN,
- EINVAL);
-
- hdev = hci_dev_get(index);
-
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN,
- ENETDOWN);
- goto failed;
- }
-
- hci_le_cancel_create_connect(hdev, &cp->bdaddr);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
-return err;
-}
-
-static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
- struct hci_cp_write_local_name hci_cp;
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- int err;
-
- BT_DBG("");
-
- if (len != sizeof(*mgmt_cp))
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
- err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
- &hci_cp);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static void discovery_rsp(struct pending_cmd *cmd, void *data)
-{
- struct mgmt_mode ev;
-
- BT_DBG("");
- if (cmd->opcode == MGMT_OP_START_DISCOVERY) {
- ev.val = 1;
- cmd_status(cmd->sk, cmd->index, MGMT_OP_START_DISCOVERY, 0);
- } else {
- ev.val = 0;
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_STOP_DISCOVERY,
- NULL, 0);
- if (cmd->opcode == MGMT_OP_STOP_DISCOVERY) {
- struct hci_dev *hdev = hci_dev_get(cmd->index);
- if (hdev) {
- del_timer(&hdev->disco_le_timer);
- del_timer(&hdev->disco_timer);
- hci_dev_put(hdev);
- }
- }
- }
-
- mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL);
-
- list_del(&cmd->list);
-
- mgmt_pending_free(cmd);
-}
-
-void mgmt_inquiry_started(u16 index)
-{
- BT_DBG("");
- mgmt_pending_foreach(MGMT_OP_START_DISCOVERY, index,
- discovery_rsp, NULL);
-}
-
-void mgmt_inquiry_complete_evt(u16 index, u8 status)
-{
- struct hci_dev *hdev;
- struct hci_cp_le_set_scan_enable le_cp = {1, 0};
- struct mgmt_mode cp = {0};
- int err = -1;
-
- hdev = hci_dev_get(index);
-
- if (hdev)
- BT_DBG("disco_state: %d", hdev->disco_state);
-
- if (!hdev || !lmp_le_capable(hdev)) {
-
- mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
- discovery_terminated, NULL);
-
- mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL);
-
- hdev->disco_state = SCAN_IDLE;
-
- if (hdev)
- goto done;
+ if (!err)
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_SUCCESS, addr,
+ sizeof(*addr));
else
- return;
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_FAILED, addr,
+ sizeof(*addr));
+
+ goto done;
}
- if (hdev->disco_state != SCAN_IDLE) {
- err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
- sizeof(le_cp), &le_cp);
- if (err >= 0) {
- mod_timer(&hdev->disco_le_timer, jiffies +
- msecs_to_jiffies(hdev->disco_int_phase * 1000));
- hdev->disco_state = SCAN_LE;
- } else
- hdev->disco_state = SCAN_IDLE;
+ cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
+ if (!cmd) {
+ err = -ENOMEM;
+ goto done;
}
- if (hdev->disco_state == SCAN_IDLE)
- mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL);
+ /* Continue with pairing via HCI */
+ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+ struct hci_cp_user_passkey_reply cp;
+
+ bacpy(&cp.bdaddr, &addr->bdaddr);
+ cp.passkey = passkey;
+ err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+ } else
+ err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
+ &addr->bdaddr);
if (err < 0)
- mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
- discovery_terminated, NULL);
-
-done:
- hci_dev_put(hdev);
-}
-
-void mgmt_disco_timeout(unsigned long data)
-{
- struct hci_dev *hdev = (void *) data;
- struct pending_cmd *cmd;
- struct mgmt_mode cp = {0};
-
- BT_DBG("hci%d", hdev->id);
-
- hdev = hci_dev_get(hdev->id);
-
- if (!hdev)
- return;
-
- hci_dev_lock_bh(hdev);
- del_timer(&hdev->disco_le_timer);
-
- if (hdev->disco_state != SCAN_IDLE) {
- struct hci_cp_le_set_scan_enable le_cp = {0, 0};
-
- if (test_bit(HCI_UP, &hdev->flags)) {
- if (hdev->disco_state == SCAN_LE)
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
- sizeof(le_cp), &le_cp);
- else
- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0,
- NULL);
- }
- hdev->disco_state = SCAN_IDLE;
- }
-
- mgmt_event(MGMT_EV_DISCOVERING, hdev->id, &cp, sizeof(cp), NULL);
-
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev->id);
- if (cmd)
mgmt_pending_remove(cmd);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+done:
+ hci_dev_unlock(hdev);
+ return err;
}
-void mgmt_disco_le_timeout(unsigned long data)
+static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev = (void *)data;
- struct hci_cp_le_set_scan_enable le_cp = {0, 0};
+ struct mgmt_cp_pin_code_neg_reply *cp = data;
- BT_DBG("hci%d", hdev->id);
+ BT_DBG("");
- hdev = hci_dev_get(hdev->id);
-
- if (!hdev)
- return;
-
- hci_dev_lock_bh(hdev);
-
- if (test_bit(HCI_UP, &hdev->flags)) {
- if (hdev->disco_state == SCAN_LE)
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
- sizeof(le_cp), &le_cp);
-
- /* re-start BR scan */
- if (hdev->disco_state != SCAN_IDLE) {
- struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 4, 0};
- hdev->disco_int_phase *= 2;
- hdev->disco_int_count = 0;
- cp.num_rsp = (u8) hdev->disco_int_phase;
- hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
- hdev->disco_state = SCAN_BR;
- }
- }
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ HCI_OP_PIN_CODE_NEG_REPLY, 0);
}
-static int start_discovery(struct sock *sk, u16 index)
+static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
{
- struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 8, 0};
- struct hci_dev *hdev;
+ struct mgmt_cp_user_confirm_reply *cp = data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_user_confirm_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_passkey_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_user_passkey_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
+static void update_name(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_local_name cp;
+
+ memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
+
+ hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
+}
+
+static void set_name_complete(struct hci_dev *hdev, u8 status)
+{
+ struct mgmt_cp_set_local_name *cp;
struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cp = cmd->param;
+
+ if (status)
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ mgmt_status(status));
+ else
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ cp, sizeof(*cp));
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_local_name *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
int err;
BT_DBG("");
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+ hci_dev_lock(hdev);
- BT_DBG("disco_state: %d", hdev->disco_state);
- hci_dev_lock_bh(hdev);
-
- if (hdev->disco_state && timer_pending(&hdev->disco_timer)) {
- err = -EBUSY;
+ /* If the old values are the same as the new ones just return a
+ * direct command complete event.
+ */
+ if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
+ !memcmp(hdev->short_name, cp->short_name,
+ sizeof(hdev->short_name))) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+ memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
+
+ if (!hdev_is_powered(hdev)) {
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
+ if (err < 0)
+ goto failed;
+
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
+ sk);
+
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- /* If LE Capable, we will alternate between BR/EDR and LE */
- if (lmp_le_capable(hdev)) {
- struct hci_cp_le_set_scan_parameters le_cp;
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
- /* Shorten BR scan params */
- cp.num_rsp = 1;
- cp.length /= 2;
+ hci_req_init(&req, hdev);
- /* Setup LE scan params */
- memset(&le_cp, 0, sizeof(le_cp));
- le_cp.type = 0x01; /* Active scanning */
- /* The recommended value for scan interval and window is
- * 11.25 msec. It is calculated by: time = n * 0.625 msec */
- le_cp.interval = cpu_to_le16(0x0012);
- le_cp.window = cpu_to_le16(0x0012);
- le_cp.own_bdaddr_type = 0; /* Public address */
- le_cp.filter = 0; /* Accept all adv packets */
-
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAMETERS,
- sizeof(le_cp), &le_cp);
+ if (lmp_bredr_capable(hdev)) {
+ update_name(&req);
+ update_eir(&req);
}
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ if (lmp_le_capable(hdev))
+ hci_update_ad(&req);
- if (err < 0) {
+ err = hci_req_run(&req, set_name_complete);
+ if (err < 0)
mgmt_pending_remove(cmd);
- hdev->disco_state = SCAN_IDLE;
- } else if (lmp_le_capable(hdev)) {
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
- if (!cmd)
- mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index,
- NULL, 0);
- hdev->disco_int_phase = 1;
- hdev->disco_int_count = 0;
- hdev->disco_state = SCAN_BR;
- del_timer(&hdev->disco_le_timer);
- del_timer(&hdev->disco_timer);
- mod_timer(&hdev->disco_timer,
- jiffies + msecs_to_jiffies(20000));
- } else
- hdev->disco_state = SCAN_BR;
failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- if (err < 0)
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, -err);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int stop_discovery(struct sock *sk, u16 index)
+static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
{
- struct hci_cp_le_set_scan_enable le_cp = {0, 0};
- struct mgmt_mode mode_cp = {0};
- struct hci_dev *hdev;
- struct pending_cmd *cmd = NULL;
- int err = -EPERM;
- u8 state;
-
- BT_DBG("");
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
-
- BT_DBG("disco_state: %d", hdev->disco_state);
-
- hci_dev_lock_bh(hdev);
-
- state = hdev->disco_state;
- hdev->disco_state = SCAN_IDLE;
- del_timer(&hdev->disco_le_timer);
- del_timer(&hdev->disco_timer);
-
- if (state == SCAN_LE) {
- err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
- sizeof(le_cp), &le_cp);
- if (err >= 0) {
- mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
- discovery_terminated, NULL);
-
- err = cmd_complete(sk, index, MGMT_OP_STOP_DISCOVERY,
- NULL, 0);
- }
- } else if (state == SCAN_BR)
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
- if (err < 0 && cmd)
- mgmt_pending_remove(cmd);
-
- mgmt_event(MGMT_EV_DISCOVERING, index, &mode_cp, sizeof(mode_cp), NULL);
-
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- if (err < 0)
- return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, -err);
- else
- return err;
-}
-
-static int read_local_oob_data(struct sock *sk, u16 index)
-{
- struct hci_dev *hdev;
struct pending_cmd *cmd;
int err;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- ENODEV);
+ hci_dev_lock(hdev);
- hci_dev_lock_bh(hdev);
-
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- ENETDOWN);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
- if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EOPNOTSUPP);
+ if (!lmp_ssp_capable(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_BUSY);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -2516,85 +2580,598 @@
mgmt_pending_remove(cmd);
unlock:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
+ hci_dev_unlock(hdev);
return err;
}
-static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
+static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
+ struct mgmt_cp_add_remote_oob_data *cp = data;
+ u8 status;
int err;
- BT_DBG("hci%u ", index);
+ BT_DBG("%s ", hdev->name);
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- EINVAL);
+ hci_dev_lock(hdev);
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
- cp->randomizer);
+ err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
+ cp->randomizer);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
+ status = MGMT_STATUS_FAILED;
else
- err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
- 0);
+ status = MGMT_STATUS_SUCCESS;
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
+ &cp->addr, sizeof(cp->addr));
+ hci_dev_unlock(hdev);
return err;
}
-static int remove_remote_oob_data(struct sock *sk, u16 index,
- unsigned char *data, u16 len)
+static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
{
- struct hci_dev *hdev;
- struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
+ struct mgmt_cp_remove_remote_oob_data *cp = data;
+ u8 status;
int err;
- BT_DBG("hci%u ", index);
+ BT_DBG("%s", hdev->name);
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- EINVAL);
+ hci_dev_lock(hdev);
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
+ err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- -err);
+ status = MGMT_STATUS_INVALID_PARAMS;
else
- err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- NULL, 0);
+ status = MGMT_STATUS_SUCCESS;
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
+
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+int mgmt_interleaved_discovery(struct hci_dev *hdev)
+{
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
+ if (err < 0)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+ hci_dev_unlock(hdev);
return err;
}
+static int start_discovery(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_start_discovery *cp = data;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (hdev->discovery.state != DISCOVERY_STOPPED) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ hdev->discovery.type = cp->type;
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ if (!lmp_bredr_capable(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_SUPPORTED);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
+ break;
+
+ case DISCOV_TYPE_LE:
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_SUPPORTED);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
+ LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
+ break;
+
+ case DISCOV_TYPE_INTERLEAVED:
+ if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_SUPPORTED);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
+ LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
+ break;
+
+ default:
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_stop_discovery *mgmt_cp = data;
+ struct pending_cmd *cmd;
+ struct hci_cp_remote_name_req_cancel cp;
+ struct inquiry_entry *e;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hci_discovery_active(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_REJECTED, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ goto unlock;
+ }
+
+ if (hdev->discovery.type != mgmt_cp->type) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ switch (hdev->discovery.state) {
+ case DISCOVERY_FINDING:
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ err = hci_cancel_inquiry(hdev);
+ else
+ err = hci_cancel_le_scan(hdev);
+
+ break;
+
+ case DISCOVERY_RESOLVING:
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e) {
+ mgmt_pending_remove(cmd);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp);
+
+ break;
+
+ default:
+ BT_DBG("unknown discovery state %u", hdev->discovery.state);
+ err = -EFAULT;
+ }
+
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_confirm_name *cp = data;
+ struct inquiry_entry *e;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hci_discovery_active(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_FAILED);
+ goto failed;
+ }
+
+ e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
+ if (!e) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto failed;
+ }
+
+ if (cp->name_known) {
+ e->name_state = NAME_KNOWN;
+ list_del(&e->list);
+ } else {
+ e->name_state = NAME_NEEDED;
+ hci_inquiry_cache_update_resolve(hdev, e);
+ }
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
+ sizeof(cp->addr));
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_block_device *cp = data;
+ u8 status;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_lock(hdev);
+
+ err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
+ if (err < 0)
+ status = MGMT_STATUS_FAILED;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_unblock_device *cp = data;
+ u8 status;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_lock(hdev);
+
+ err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
+ if (err < 0)
+ status = MGMT_STATUS_INVALID_PARAMS;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_device_id *cp = data;
+ struct hci_request req;
+ int err;
+ __u16 source;
+
+ BT_DBG("%s", hdev->name);
+
+ source = __le16_to_cpu(cp->source);
+
+ if (source > 0x0002)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->devid_source = source;
+ hdev->devid_vendor = __le16_to_cpu(cp->vendor);
+ hdev->devid_product = __le16_to_cpu(cp->product);
+ hdev->devid_version = __le16_to_cpu(cp->version);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+
+ hci_req_init(&req, hdev);
+ update_eir(&req);
+ hci_req_run(&req, NULL);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ mgmt_status(status));
+ } else {
+ struct mgmt_mode *cp = cmd->param;
+
+ if (cp->val)
+ set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+ else
+ clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
+ new_settings(hdev, cmd->sk);
+ }
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (!hdev_is_powered(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_NOT_POWERED);
+
+ if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
+ hdev);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
+ data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ write_fast_connectable(&req, cp->val);
+
+ err = hci_req_run(&req, fast_connectable_complete);
+ if (err < 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
+ mgmt_pending_remove(cmd);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static bool ltk_is_valid(struct mgmt_ltk_info *key)
+{
+ if (key->authenticated != 0x00 && key->authenticated != 0x01)
+ return false;
+ if (key->master != 0x00 && key->master != 0x01)
+ return false;
+ if (!bdaddr_type_is_le(key->addr.type))
+ return false;
+ return true;
+}
+
+static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ void *cp_data, u16 len)
+{
+ struct mgmt_cp_load_long_term_keys *cp = cp_data;
+ u16 key_count, expected_len;
+ int i, err;
+
+ key_count = __le16_to_cpu(cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_ltk_info);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ BT_DBG("%s key_count %u", hdev->name, key_count);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_ltk_info *key = &cp->keys[i];
+
+ if (!ltk_is_valid(key))
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ hci_smp_ltks_clear(hdev);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_ltk_info *key = &cp->keys[i];
+ u8 type;
+
+ if (key->master)
+ type = HCI_SMP_LTK;
+ else
+ type = HCI_SMP_LTK_SLAVE;
+
+ hci_add_ltk(hdev, &key->addr.bdaddr,
+ bdaddr_to_le(key->addr.type),
+ type, 0, key->authenticated, key->val,
+ key->enc_size, key->ediv, key->rand);
+ }
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
+ NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static const struct mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ bool var_len;
+ size_t data_len;
+} mgmt_handlers[] = {
+ { NULL }, /* 0x0000 (no command) */
+ { read_version, false, MGMT_READ_VERSION_SIZE },
+ { read_commands, false, MGMT_READ_COMMANDS_SIZE },
+ { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
+ { read_controller_info, false, MGMT_READ_INFO_SIZE },
+ { set_powered, false, MGMT_SETTING_SIZE },
+ { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
+ { set_connectable, false, MGMT_SETTING_SIZE },
+ { set_fast_connectable, false, MGMT_SETTING_SIZE },
+ { set_pairable, false, MGMT_SETTING_SIZE },
+ { set_link_security, false, MGMT_SETTING_SIZE },
+ { set_ssp, false, MGMT_SETTING_SIZE },
+ { set_hs, false, MGMT_SETTING_SIZE },
+ { set_le, false, MGMT_SETTING_SIZE },
+ { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
+ { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
+ { add_uuid, false, MGMT_ADD_UUID_SIZE },
+ { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
+ { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
+ { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
+ { disconnect, false, MGMT_DISCONNECT_SIZE },
+ { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
+ { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
+ { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
+ { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
+ { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
+ { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
+ { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
+ { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
+ { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
+ { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
+ { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
+ { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
+ { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
+ { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
+ { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
+ { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
+ { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
+ { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
+ { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
+};
+
+
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
- unsigned char *buf;
+ void *buf;
+ u8 *cp;
struct mgmt_hdr *hdr;
u16 opcode, index, len;
+ struct hci_dev *hdev = NULL;
+ const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2611,154 +3188,64 @@
goto done;
}
- hdr = (struct mgmt_hdr *) buf;
- opcode = get_unaligned_le16(&hdr->opcode);
- index = get_unaligned_le16(&hdr->index);
- len = get_unaligned_le16(&hdr->len);
+ hdr = buf;
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
goto done;
}
- BT_DBG("got opcode %x", opcode);
- switch (opcode) {
- case MGMT_OP_READ_VERSION:
- err = read_version(sk);
- break;
- case MGMT_OP_READ_INDEX_LIST:
- err = read_index_list(sk);
- break;
- case MGMT_OP_READ_INFO:
- err = read_controller_info(sk, index);
- break;
- case MGMT_OP_SET_POWERED:
- err = set_powered(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_DISCOVERABLE:
- err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_LIMIT_DISCOVERABLE:
- err = set_limited_discoverable(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_SET_CONNECTABLE:
- err = set_connectable(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_PAIRABLE:
- err = set_pairable(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_ADD_UUID:
- err = add_uuid(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_REMOVE_UUID:
- err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_DEV_CLASS:
- err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_SERVICE_CACHE:
- err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_LOAD_KEYS:
- err = load_keys(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_REMOVE_KEY:
- err = remove_key(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_DISCONNECT:
- err = disconnect(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_GET_CONNECTIONS:
- err = get_connections(sk, index);
- break;
- case MGMT_OP_PIN_CODE_REPLY:
- err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_PIN_CODE_NEG_REPLY:
- err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_IO_CAPABILITY:
- err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_PAIR_DEVICE:
- err = pair_device(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_USER_CONFIRM_REPLY:
- case MGMT_OP_USER_PASSKEY_REPLY:
- case MGMT_OP_USER_CONFIRM_NEG_REPLY:
- err = user_confirm_reply(sk, index, buf + sizeof(*hdr),
- len, opcode);
- break;
- case MGMT_OP_SET_LOCAL_NAME:
- err = set_local_name(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_START_DISCOVERY:
- err = start_discovery(sk, index);
- break;
- case MGMT_OP_STOP_DISCOVERY:
- err = stop_discovery(sk, index);
- break;
- case MGMT_OP_RESOLVE_NAME:
- err = resolve_name(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_CANCEL_RESOLVE_NAME:
- err = cancel_resolve_name(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_CONNECTION_PARAMS:
- err = set_connection_params(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_SET_RSSI_REPORTER:
- err = set_rssi_reporter(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_UNSET_RSSI_REPORTER:
- err = unset_rssi_reporter(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_READ_LOCAL_OOB_DATA:
- err = read_local_oob_data(sk, index);
- break;
- case MGMT_OP_ADD_REMOTE_OOB_DATA:
- err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
- err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_ENCRYPT_LINK:
- err = encrypt_link(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_LE_ADD_DEV_WHITE_LIST:
- err = le_add_dev_white_list(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_LE_REMOVE_DEV_WHITE_LIST:
- err = le_remove_dev_white_list(sk, index, buf + sizeof(*hdr),
- len);
- break;
- case MGMT_OP_LE_CLEAR_WHITE_LIST:
- err = le_clear_white_list(sk, index);
- break;
- case MGMT_OP_LE_CREATE_CONN_WHITE_LIST:
- err = le_create_conn_white_list(sk, index);
- break;
- case MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST:
- err = le_cancel_create_conn_white_list(sk, index);
- break;
- case MGMT_OP_LE_CANCEL_CREATE_CONN:
- err = le_cancel_create_conn(sk, index, buf + sizeof(*hdr), len);
- break;
- default:
- BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, index, opcode, 0x01);
- break;
+ if (index != MGMT_INDEX_NONE) {
+ hdev = hci_dev_get(index);
+ if (!hdev) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
}
+ if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
+ mgmt_handlers[opcode].func == NULL) {
+ BT_DBG("Unknown op %u", opcode);
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_UNKNOWN_COMMAND);
+ goto done;
+ }
+
+ if ((hdev && opcode < MGMT_OP_READ_INFO) ||
+ (!hdev && opcode >= MGMT_OP_READ_INFO)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+
+ handler = &mgmt_handlers[opcode];
+
+ if ((handler->var_len && len < handler->data_len) ||
+ (!handler->var_len && len != handler->data_len)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto done;
+ }
+
+ if (hdev)
+ mgmt_init_hdev(sk, hdev);
+
+ cp = buf + sizeof(*hdr);
+
+ err = handler->func(sk, hdev, cp, len);
if (err < 0)
goto done;
err = msglen;
done:
+ if (hdev)
+ hci_dev_put(hdev);
+
kfree(buf);
return err;
}
@@ -2771,37 +3258,37 @@
mgmt_pending_remove(cmd);
}
-int mgmt_index_added(u16 index)
+int mgmt_index_added(struct hci_dev *hdev)
{
- BT_DBG("%d", index);
- return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+ if (!mgmt_valid_hdev(hdev))
+ return -ENOTSUPP;
+
+ return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
}
-int mgmt_index_removed(u16 index)
+int mgmt_index_removed(struct hci_dev *hdev)
{
- u8 status = ENODEV;
+ u8 status = MGMT_STATUS_INVALID_INDEX;
- BT_DBG("%d", index);
+ if (!mgmt_valid_hdev(hdev))
+ return -ENOTSUPP;
- mgmt_pending_foreach(0, index, cmd_status_rsp, &status);
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
- return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
}
struct cmd_lookup {
- u8 val;
struct sock *sk;
+ struct hci_dev *hdev;
+ u8 mgmt_status;
};
-static void mode_rsp(struct pending_cmd *cmd, void *data)
+static void settings_rsp(struct pending_cmd *cmd, void *data)
{
- struct mgmt_mode *cp = cmd->param;
struct cmd_lookup *match = data;
- if (cp->val != match->val)
- return;
-
- send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+ send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
list_del(&cmd->list);
@@ -2813,133 +3300,273 @@
mgmt_pending_free(cmd);
}
-int mgmt_powered(u16 index, u8 powered)
+static void set_bredr_scan(struct hci_request *req)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { powered, NULL };
- int ret;
+ struct hci_dev *hdev = req->hdev;
+ u8 scan = 0;
- BT_DBG("hci%u %d", index, powered);
+ /* Ensure that fast connectable is disabled. This function will
+ * not do anything if the page scan parameters are already what
+ * they should be.
+ */
+ write_fast_connectable(req, false);
- mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ scan |= SCAN_PAGE;
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ scan |= SCAN_INQUIRY;
- if (!powered) {
- u8 status = ENETDOWN;
- mgmt_pending_foreach(0, index, cmd_status_rsp, &status);
+ if (scan)
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void powered_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
+
+ new_settings(hdev, match.sk);
+
+ hci_dev_unlock(hdev);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+static int powered_update_hci(struct hci_dev *hdev)
+{
+ struct hci_request req;
+ u8 link_sec;
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ !lmp_host_ssp_capable(hdev)) {
+ u8 ssp = 1;
+
+ hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
}
- ev.val = powered;
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ lmp_bredr_capable(hdev)) {
+ struct hci_cp_write_le_host_supported cp;
- ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+ cp.le = 1;
+ cp.simul = lmp_le_br_capable(hdev);
+
+ /* Check first if we already have the right
+ * host state (host features set)
+ */
+ if (cp.le != lmp_host_le_capable(hdev) ||
+ cp.simul != lmp_host_le_br_capable(hdev))
+ hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ sizeof(cp), &cp);
+ }
+
+ link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
+ hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
+ sizeof(link_sec), &link_sec);
+
+ if (lmp_bredr_capable(hdev)) {
+ set_bredr_scan(&req);
+ update_class(&req);
+ update_name(&req);
+ update_eir(&req);
+ }
+
+ return hci_req_run(&req, powered_complete);
+}
+
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
+ u8 zero_cod[] = { 0, 0, 0 };
+ int err;
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return 0;
+
+ if (powered) {
+ if (powered_update_hci(hdev) == 0)
+ return 0;
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
+ &match);
+ goto new_settings;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
+
+ if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
+ mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+ zero_cod, sizeof(zero_cod), NULL);
+
+new_settings:
+ err = new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
- return ret;
-}
-
-int mgmt_discoverable(u16 index, u8 discoverable)
-{
- struct mgmt_mode ev;
- struct cmd_lookup match = { discoverable, NULL };
- int ret;
-
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
-
- ev.val = discoverable;
-
- ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
- match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- return ret;
-}
-
-int mgmt_connectable(u16 index, u8 connectable)
-{
- struct mgmt_mode ev;
- struct cmd_lookup match = { connectable, NULL };
- int ret;
-
- mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
-
- ev.val = connectable;
-
- ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- return ret;
-}
-
-int mgmt_new_key(u16 index, struct link_key *key, u8 bonded)
-{
- struct mgmt_ev_new_key *ev;
- int err, total;
-
- total = sizeof(struct mgmt_ev_new_key) + key->dlen;
- ev = kzalloc(total, GFP_ATOMIC);
- if (!ev)
- return -ENOMEM;
-
- bacpy(&ev->key.bdaddr, &key->bdaddr);
- ev->key.addr_type = key->addr_type;
- ev->key.key_type = key->key_type;
- memcpy(ev->key.val, key->val, 16);
- ev->key.pin_len = key->pin_len;
- ev->key.auth = key->auth;
- ev->store_hint = bonded;
- ev->key.dlen = key->dlen;
-
- memcpy(ev->key.data, key->data, key->dlen);
-
- err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
-
- kfree(ev);
-
return err;
}
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le)
+int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
{
- struct mgmt_ev_connected ev;
struct pending_cmd *cmd;
- struct hci_dev *hdev;
+ u8 status;
- BT_DBG("hci%u", index);
+ cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ if (!cmd)
+ return -ENOENT;
- hdev = hci_dev_get(index);
+ if (err == -ERFKILL)
+ status = MGMT_STATUS_RFKILLED;
+ else
+ status = MGMT_STATUS_FAILED;
- if (!hdev)
- return -ENODEV;
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
- bacpy(&ev.bdaddr, bdaddr);
- ev.le = le;
+ mgmt_pending_remove(cmd);
- cmd = mgmt_pending_find(MGMT_OP_LE_CREATE_CONN_WHITE_LIST, index);
- if (cmd) {
- BT_ERR("mgmt_connected remove mgmt pending white_list");
- mgmt_pending_remove(cmd);
- }
-
- return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+ return err;
}
-int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval,
- u16 latency, u16 timeout)
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
- struct mgmt_ev_le_conn_params ev;
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
- bacpy(&ev.bdaddr, bdaddr);
- ev.interval = interval;
- ev.latency = latency;
- ev.timeout = timeout;
+ if (discoverable) {
+ if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ changed = true;
+ }
- return mgmt_event(MGMT_EV_LE_CONN_PARAMS, index, &ev, sizeof(ev),
- NULL);
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
+ &match);
+
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
+}
+
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+{
+ struct pending_cmd *cmd;
+ bool changed = false;
+ int err = 0;
+
+ if (connectable) {
+ if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+ }
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+
+ if (changed)
+ err = new_settings(hdev, cmd ? cmd->sk : NULL);
+
+ return err;
+}
+
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+{
+ u8 mgmt_err = mgmt_status(status);
+
+ if (scan & SCAN_PAGE)
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ if (scan & SCAN_INQUIRY)
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ return 0;
+}
+
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
+{
+ struct mgmt_ev_new_link_key ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.store_hint = persistent;
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+ ev.key.addr.type = BDADDR_BREDR;
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
+ ev.key.pin_len = key->pin_len;
+
+ return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
+{
+ struct mgmt_ev_new_long_term_key ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.store_hint = persistent;
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
+ ev.key.authenticated = key->authenticated;
+ ev.key.enc_size = key->enc_size;
+ ev.key.ediv = key->ediv;
+
+ if (key->type == HCI_SMP_LTK)
+ ev.key.master = 1;
+
+ memcpy(ev.key.rand, key->rand, sizeof(key->rand));
+ memcpy(ev.key.val, key->val, sizeof(key->val));
+
+ return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
+ NULL);
+}
+
+int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class)
+{
+ char buf[512];
+ struct mgmt_ev_device_connected *ev = (void *) buf;
+ u16 eir_len = 0;
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
+
+ ev->flags = __cpu_to_le32(flags);
+
+ if (name_len > 0)
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
+ name, name_len);
+
+ if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
+ eir_len = eir_append_data(ev->eir, eir_len,
+ EIR_CLASS_OF_DEV, dev_class, 3);
+
+ ev->eir_len = cpu_to_le16(eir_len);
+
+ return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
+ sizeof(*ev) + eir_len, NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2948,9 +3575,11 @@
struct sock **sk = data;
struct mgmt_rp_disconnect rp;
- bacpy(&rp.bdaddr, &cp->bdaddr);
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
+ sizeof(rp));
*sk = cmd->sk;
sock_hold(*sk);
@@ -2958,280 +3587,429 @@
mgmt_pending_remove(cmd);
}
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr, u8 reason)
+static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
{
- struct mgmt_ev_disconnected ev;
+ struct hci_dev *hdev = data;
+ struct mgmt_cp_unpair_device *cp = cmd->param;
+ struct mgmt_rp_unpair_device rp;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
+
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason)
+{
+ struct mgmt_ev_device_disconnected ev;
struct sock *sk = NULL;
int err;
- bacpy(&ev.bdaddr, bdaddr);
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.reason = reason;
- err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
+ sk);
if (sk)
sock_put(sk);
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+ mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+ hdev);
return err;
}
-int mgmt_disconnect_failed(u16 index)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
+ struct mgmt_rp_disconnect rp;
struct pending_cmd *cmd;
int err;
- cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+ mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+ hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
return -ENOENT;
- err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
+
+ err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_connect_failed ev;
- bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
{
struct mgmt_ev_pin_code_request ev;
- BT_DBG("hci%u", index);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = BDADDR_BREDR;
+ ev.secure = secure;
- bacpy(&ev.bdaddr, bdaddr);
- ev.secure = 0;
-
- return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
- NULL);
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
}
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
return -ENOENT;
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = BDADDR_BREDR;
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
- sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
return -ENOENT;
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = BDADDR_BREDR;
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
- sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_request(u16 index, u8 event,
- bdaddr_t *bdaddr, __le32 value)
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, __le32 value,
+ u8 confirm_hint)
{
struct mgmt_ev_user_confirm_request ev;
- struct hci_conn *conn = NULL;
- struct hci_dev *hdev;
- u8 loc_cap, rem_cap, loc_mitm, rem_mitm;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
- hdev = hci_dev_get(index);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.confirm_hint = confirm_hint;
+ ev.value = value;
- if (!hdev)
- return -ENODEV;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
-
- ev.auto_confirm = 0;
-
- if (!conn || event != HCI_EV_USER_CONFIRM_REQUEST)
- goto no_auto_confirm;
-
- loc_cap = (conn->io_capability == 0x04) ? 0x01 : conn->io_capability;
- rem_cap = conn->remote_cap;
- loc_mitm = conn->auth_type & 0x01;
- rem_mitm = conn->remote_auth & 0x01;
-
- if ((conn->auth_type & HCI_AT_DEDICATED_BONDING) &&
- conn->auth_initiator && rem_cap == 0x03)
- ev.auto_confirm = 1;
- else if (loc_cap == 0x01 && (rem_cap == 0x00 || rem_cap == 0x03)) {
- if (!loc_mitm && !rem_mitm)
- value = 0;
- goto no_auto_confirm;
- }
-
- /* Show bonding dialog if neither side requires no bonding */
- if ((conn->auth_type > 0x01) && (conn->remote_auth > 0x01)) {
- if (!loc_mitm && !rem_mitm)
- value = 0;
- goto no_auto_confirm;
- }
-
- if ((!loc_mitm || rem_cap == 0x03) && (!rem_mitm || loc_cap == 0x03))
- ev.auto_confirm = 1;
-
-no_auto_confirm:
- bacpy(&ev.bdaddr, bdaddr);
- ev.event = event;
- put_unaligned_le32(value, &ev.value);
-
- hci_dev_put(hdev);
-
- return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
- NULL);
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
}
-int mgmt_user_passkey_request(u16 index, bdaddr_t *bdaddr)
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type)
{
struct mgmt_ev_user_passkey_request ev;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
- return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, index, &ev, sizeof(ev),
- NULL);
+ return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
}
-static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
- u8 opcode)
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status,
+ u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
int err;
- cmd = mgmt_pending_find(opcode, index);
+ cmd = mgmt_pending_find(opcode, hdev);
if (!cmd)
return -ENOENT;
- bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
- err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
+ err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
+ &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
- MGMT_OP_USER_CONFIRM_REPLY);
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_CONFIRM_REPLY);
}
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
- MGMT_OP_USER_CONFIRM_NEG_REPLY);
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 passkey,
+ u8 entered)
+{
+ struct mgmt_ev_passkey_notify ev;
+
+ BT_DBG("%s", hdev->name);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.passkey = __cpu_to_le32(passkey);
+ ev.entered = entered;
+
+ return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_auth_failed ev;
- bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
+int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
{
- struct pending_cmd *cmd;
- struct hci_dev *hdev;
- struct mgmt_cp_set_local_name ev;
- int err;
-
- memset(&ev, 0, sizeof(ev));
- memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
-
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
- if (!cmd)
- goto send_event;
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
- goto failed;
+ u8 mgmt_err = mgmt_status(status);
+ mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
+ cmd_status_rsp, &mgmt_err);
+ return 0;
}
- hdev = hci_dev_get(index);
- if (hdev) {
- update_eir(hdev);
- hci_dev_put(hdev);
+ if (test_bit(HCI_AUTH, &hdev->flags)) {
+ if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ changed = true;
}
- err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
- sizeof(ev));
- if (err < 0)
- goto failed;
+ mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
+ &match);
-send_event:
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ if (changed)
+ err = new_settings(hdev, match.sk);
-failed:
- if (cmd)
- mgmt_pending_remove(cmd);
+ if (match.sk)
+ sock_put(match.sk);
+
return err;
}
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
- u8 status)
+static void clear_eir(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_eir cp;
+
+ if (!lmp_ext_inq_capable(hdev))
+ return;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+
+ memset(&cp, 0, sizeof(cp));
+
+ hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+}
+
+int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ struct hci_request req;
+ bool changed = false;
+ int err = 0;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags))
+ err = new_settings(hdev, NULL);
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
+ &mgmt_err);
+
+ return err;
+ }
+
+ if (enable) {
+ if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ changed = true;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
+
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ update_eir(&req);
+ else
+ clear_eir(&req);
+
+ hci_req_run(&req, NULL);
+
+ return err;
+}
+
+static void sk_lookup(struct pending_cmd *cmd, void *data)
+{
+ struct cmd_lookup *match = data;
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
+ }
+}
+
+int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
+ int err = 0;
+
+ mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
+ mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
+ mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
+
+ if (!status)
+ err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
+ 3, NULL);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
+}
+
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
+{
+ struct mgmt_cp_set_local_name ev;
+ struct pending_cmd *cmd;
+
+ if (status)
+ return 0;
+
+ memset(&ev, 0, sizeof(ev));
+ memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
+ memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ if (!cmd) {
+ memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
+
+ /* If this is a HCI command related to powering on the
+ * HCI dev don't send any mgmt signals.
+ */
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+ return 0;
+ }
+
+ return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
+
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status)
{
struct pending_cmd *cmd;
int err;
- BT_DBG("hci%u status %u", index, status);
+ BT_DBG("%s status %u", hdev->name, status);
- cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
+ cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
if (!cmd)
return -ENOENT;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EIO);
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ mgmt_status(status));
} else {
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash, sizeof(rp.hash));
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
- err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
+ sizeof(rp));
}
mgmt_pending_remove(cmd);
@@ -3239,182 +4017,194 @@
return err;
}
-void mgmt_read_rssi_complete(u16 index, s8 rssi, bdaddr_t *bdaddr,
- u16 handle, u8 status)
+int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
{
- struct mgmt_ev_rssi_update ev;
- struct hci_conn *conn;
- struct hci_dev *hdev;
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+ int err = 0;
- if (status)
- return;
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
- hdev = hci_dev_get(index);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (enable && test_and_clear_bit(HCI_LE_ENABLED,
+ &hdev->dev_flags))
+ err = new_settings(hdev, NULL);
- if (!conn)
- return;
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
- BT_DBG("rssi_update_thresh_exceed : %d ",
- conn->rssi_update_thresh_exceed);
- BT_DBG("RSSI Threshold : %d , recvd RSSI : %d ",
- conn->rssi_threshold, rssi);
-
- if (conn->rssi_update_thresh_exceed == 1) {
- BT_DBG("rssi_update_thresh_exceed == 1");
- if (rssi > conn->rssi_threshold) {
- memset(&ev, 0, sizeof(ev));
- bacpy(&ev.bdaddr, bdaddr);
- ev.rssi = rssi;
- mgmt_event(MGMT_EV_RSSI_UPDATE, index, &ev,
- sizeof(ev), NULL);
- } else {
- hci_conn_set_rssi_reporter(conn, conn->rssi_threshold,
- conn->rssi_update_interval,
- conn->rssi_update_thresh_exceed);
- }
- } else {
- BT_DBG("rssi_update_thresh_exceed == 0");
- if (rssi < conn->rssi_threshold) {
- memset(&ev, 0, sizeof(ev));
- bacpy(&ev.bdaddr, bdaddr);
- ev.rssi = rssi;
- mgmt_event(MGMT_EV_RSSI_UPDATE, index, &ev,
- sizeof(ev), NULL);
- } else {
- hci_conn_set_rssi_reporter(conn, conn->rssi_threshold,
- conn->rssi_update_interval,
- conn->rssi_update_thresh_exceed);
- }
+ return err;
}
+
+ if (enable) {
+ if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ changed = true;
+ } else {
+ if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ changed = true;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
+
+ if (changed)
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
}
-
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 type, u8 le,
- u8 *dev_class, s8 rssi, u8 eir_len, u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
+ ssp, u8 *eir, u16 eir_len)
{
- struct mgmt_ev_device_found ev;
- struct hci_dev *hdev;
+ char buf[512];
+ struct mgmt_ev_device_found *ev = (void *) buf;
+ size_t ev_size;
+
+ /* Leave 5 bytes for a potential CoD field */
+ if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
+ ev->rssi = rssi;
+ if (cfm_name)
+ ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
+ if (!ssp)
+ ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
+
+ if (eir_len > 0)
+ memcpy(ev->eir, eir, eir_len);
+
+ if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
+ eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
+ dev_class, 3);
+
+ ev->eir_len = cpu_to_le16(eir_len);
+ ev_size = sizeof(*ev) + eir_len;
+
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+}
+
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len)
+{
+ struct mgmt_ev_device_found *ev;
+ char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
+ u16 eir_len;
+
+ ev = (struct mgmt_ev_device_found *) buf;
+
+ memset(buf, 0, sizeof(buf));
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
+ ev->rssi = rssi;
+
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
+ name_len);
+
+ ev->eir_len = cpu_to_le16(eir_len);
+
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
+ sizeof(*ev) + eir_len, NULL);
+}
+
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ u8 type;
int err;
- BT_DBG("le: %d", le);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- memset(&ev, 0, sizeof(ev));
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
- bacpy(&ev.bdaddr, bdaddr);
- ev.rssi = rssi;
- ev.type = type;
- ev.le = le;
+ type = hdev->discovery.type;
- if (dev_class)
- memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
+ err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+ &type, sizeof(type));
+ mgmt_pending_remove(cmd);
- if (eir && eir_len)
- memcpy(ev.eir, eir, eir_len);
+ return err;
+}
- err = mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
- if (err < 0)
- return err;
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
- hdev = hci_dev_get(index);
+ err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+ &hdev->discovery.type, sizeof(hdev->discovery.type));
+ mgmt_pending_remove(cmd);
- if (!hdev)
- return 0;
+ return err;
+}
- if (hdev->disco_state == SCAN_IDLE)
- goto done;
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+{
+ struct mgmt_ev_discovering ev;
+ struct pending_cmd *cmd;
- hdev->disco_int_count++;
+ BT_DBG("%s discovering %u", hdev->name, discovering);
- if (hdev->disco_int_count >= hdev->disco_int_phase) {
- /* Inquiry scan for General Discovery LAP */
- struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 4, 0};
- struct hci_cp_le_set_scan_enable le_cp = {0, 0};
+ if (discovering)
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ else
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
- hdev->disco_int_phase *= 2;
- hdev->disco_int_count = 0;
- if (hdev->disco_state == SCAN_LE) {
- /* cancel LE scan */
- hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
- sizeof(le_cp), &le_cp);
- /* start BR scan */
- cp.num_rsp = (u8) hdev->disco_int_phase;
- hci_send_cmd(hdev, HCI_OP_INQUIRY,
- sizeof(cp), &cp);
- hdev->disco_state = SCAN_BR;
- del_timer_sync(&hdev->disco_le_timer);
- }
+ if (cmd != NULL) {
+ u8 type = hdev->discovery.type;
+
+ cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
+ sizeof(type));
+ mgmt_pending_remove(cmd);
}
-done:
- hci_dev_put(hdev);
- return 0;
-}
-
-
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 status, u8 *name)
-{
- struct mgmt_ev_remote_name ev;
-
memset(&ev, 0, sizeof(ev));
+ ev.type = hdev->discovery.type;
+ ev.discovering = discovering;
- bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
- memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
-
- return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_encrypt_change(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
- struct mgmt_ev_encrypt_change ev;
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_blocked ev;
- BT_DBG("hci%u", index);
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
- bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = type;
- return mgmt_event(MGMT_EV_ENCRYPT_CHANGE, index, &ev, sizeof(ev),
- NULL);
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
-int mgmt_remote_class(u16 index, bdaddr_t *bdaddr, u8 dev_class[3])
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
- struct mgmt_ev_remote_class ev;
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_unblocked ev;
- memset(&ev, 0, sizeof(ev));
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
- bacpy(&ev.bdaddr, bdaddr);
- memcpy(ev.dev_class, dev_class, 3);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = type;
- return mgmt_event(MGMT_EV_REMOTE_CLASS, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
-int mgmt_remote_version(u16 index, bdaddr_t *bdaddr, u8 ver, u16 mnf,
- u16 sub_ver)
-{
- struct mgmt_ev_remote_version ev;
-
- memset(&ev, 0, sizeof(ev));
-
- bacpy(&ev.bdaddr, bdaddr);
- ev.lmp_ver = ver;
- ev.manufacturer = mnf;
- ev.lmp_subver = sub_ver;
-
- return mgmt_event(MGMT_EV_REMOTE_VERSION, index, &ev, sizeof(ev), NULL);
-}
-
-int mgmt_remote_features(u16 index, bdaddr_t *bdaddr, u8 features[8])
-{
- struct mgmt_ev_remote_features ev;
-
- memset(&ev, 0, sizeof(ev));
-
- bacpy(&ev.bdaddr, bdaddr);
- memcpy(ev.features, features, sizeof(ev.features));
-
- return mgmt_event(MGMT_EV_REMOTE_FEATURES, index, &ev, sizeof(ev),
- NULL);
-}
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 195271a..0c77476 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1,6 +1,5 @@
/*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
- Copyright (c) 2013 The Linux Foundation. All rights reserved.
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
@@ -27,23 +26,8 @@
*/
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <net/sock.h>
-#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -52,9 +36,6 @@
#include <net/bluetooth/rfcomm.h>
#define VERSION "1.11"
-/* 1 Byte DLCI, 1 Byte Control filed, 2 Bytes Length, 1 Byte for Credits,
- * 1 Byte FCS */
-#define RFCOMM_HDR_SIZE 6
static bool disable_cfc;
static bool l2cap_ertm;
@@ -67,7 +48,6 @@
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
-static unsigned long rfcomm_event;
static LIST_HEAD(session_list);
@@ -87,11 +67,9 @@
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
bdaddr_t *dst,
u8 sec_level,
- int *err,
- u8 channel,
- struct rfcomm_dlc *d);
+ int *err);
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
-static void rfcomm_session_del(struct rfcomm_session *s);
+static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
/* ---- RFCOMM frame parsing macros ---- */
#define __get_dlci(b) ((b & 0xfc) >> 2)
@@ -123,41 +101,13 @@
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
-struct rfcomm_sock_release_work {
- struct work_struct work;
- struct socket *sock;
- int state;
-};
-
-static inline void rfcomm_schedule(void)
+static void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
- set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
wake_up_process(rfcomm_thread);
}
-static inline void rfcomm_session_put(struct rfcomm_session *s)
-{
- bool match = false;
- struct rfcomm_session *sess;
- struct list_head *p, *n;
- list_for_each_safe(p, n, &session_list) {
- sess = list_entry(p, struct rfcomm_session, list);
- if (s == sess) {
- match = true;
- break;
- }
- }
- if (!match) {
- BT_ERR("session already freed previously");
- dump_stack();
- return;
- }
- if (atomic_dec_and_test(&s->refcnt))
- rfcomm_session_del(s);
-}
-
/* ---- RFCOMM FCS computation ---- */
/* reversed, 8-bit, poly=0x07 */
@@ -257,13 +207,14 @@
return err;
}
-static inline int rfcomm_check_security(struct rfcomm_dlc *d)
+static int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
+ struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
+
__u8 auth_type;
switch (d->sec_level) {
- case BT_SECURITY_VERY_HIGH:
case BT_SECURITY_HIGH:
auth_type = HCI_AT_GENERAL_BONDING_MITM;
break;
@@ -275,8 +226,7 @@
break;
}
- return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level,
- auth_type);
+ return hci_conn_security(conn->hcon, d->sec_level, auth_type);
}
static void rfcomm_session_timeout(unsigned long arg)
@@ -293,16 +243,14 @@
{
BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
- if (!mod_timer(&s->timer, jiffies + timeout))
- rfcomm_session_hold(s);
+ mod_timer(&s->timer, jiffies + timeout);
}
static void rfcomm_session_clear_timer(struct rfcomm_session *s)
{
BT_DBG("session %p state %ld", s, s->state);
- if (timer_pending(&s->timer) && del_timer(&s->timer))
- rfcomm_session_put(s);
+ del_timer_sync(&s->timer);
}
/* ---- RFCOMM DLCs ---- */
@@ -329,7 +277,7 @@
{
BT_DBG("dlc %p state %ld", d, d->state);
- if (timer_pending(&d->timer) && del_timer(&d->timer))
+ if (del_timer(&d->timer))
rfcomm_dlc_put(d);
}
@@ -380,8 +328,6 @@
{
BT_DBG("dlc %p session %p", d, s);
- rfcomm_session_hold(s);
-
rfcomm_session_clear_timer(s);
rfcomm_dlc_hold(d);
list_add(&d->list, &s->dlcs);
@@ -400,20 +346,16 @@
if (list_empty(&s->dlcs))
rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT);
-
- rfcomm_session_put(s);
}
static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_dlc *d;
- struct list_head *p;
- list_for_each(p, &s->dlcs) {
- d = list_entry(p, struct rfcomm_dlc, list);
+ list_for_each_entry(d, &s->dlcs, list)
if (d->dlci == dlci)
return d;
- }
+
return NULL;
}
@@ -423,8 +365,8 @@
int err = 0;
u8 dlci;
- BT_DBG("dlc %p state %ld %s %s channel %d",
- d, d->state, batostr(src), batostr(dst), channel);
+ BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",
+ d, d->state, src, dst, channel);
if (channel < 1 || channel > 30)
return -EINVAL;
@@ -434,31 +376,31 @@
s = rfcomm_session_get(src, dst);
if (!s) {
- s = rfcomm_session_create(src, dst,
- d->sec_level, &err, channel, d);
+ s = rfcomm_session_create(src, dst, d->sec_level, &err);
if (!s)
return err;
- } else {
- dlci = __dlci(!s->initiator, channel);
-
- /* Check if DLCI already exists */
- if (rfcomm_dlc_get(s, dlci))
- return -EBUSY;
-
- rfcomm_dlc_clear_state(d);
-
- d->dlci = dlci;
- d->addr = __addr(s->initiator, dlci);
- d->priority = 7;
-
- d->state = BT_CONFIG;
- rfcomm_dlc_link(s, d);
-
- d->out = 1;
-
- d->mtu = s->mtu;
- d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
}
+
+ dlci = __dlci(!s->initiator, channel);
+
+ /* Check if DLCI already exists */
+ if (rfcomm_dlc_get(s, dlci))
+ return -EBUSY;
+
+ rfcomm_dlc_clear_state(d);
+
+ d->dlci = dlci;
+ d->addr = __addr(s->initiator, dlci);
+ d->priority = 7;
+
+ d->state = BT_CONFIG;
+ rfcomm_dlc_link(s, d);
+
+ d->out = 1;
+
+ d->mtu = s->mtu;
+ d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
+
if (s->state == BT_CONNECTED) {
if (rfcomm_check_security(d))
rfcomm_send_pn(s, 1, d);
@@ -538,12 +480,34 @@
int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
{
- int r;
+ int r = 0;
+ struct rfcomm_dlc *d_list;
+ struct rfcomm_session *s, *s_list;
+
+ BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err);
rfcomm_lock();
- r = __rfcomm_dlc_close(d, err);
+ s = d->session;
+ if (!s)
+ goto no_session;
+ /* after waiting on the mutex check the session still exists
+ * then check the dlc still exists
+ */
+ list_for_each_entry(s_list, &session_list, list) {
+ if (s_list == s) {
+ list_for_each_entry(d_list, &s->dlcs, list) {
+ if (d_list == d) {
+ r = __rfcomm_dlc_close(d, err);
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+no_session:
rfcomm_unlock();
return r;
}
@@ -654,47 +618,22 @@
return s;
}
-static void rfcomm_sock_release_worker(struct work_struct *work)
-{
- struct rfcomm_sock_release_work *release_work =
- container_of(work, struct rfcomm_sock_release_work, work);
-
- BT_DBG("sock %p", release_work->sock);
-
- sock_release(release_work->sock);
- if (release_work->state != BT_LISTEN)
- module_put(THIS_MODULE);
-
- kfree(release_work);
-}
-
-static void rfcomm_session_del(struct rfcomm_session *s)
+static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s)
{
int state = s->state;
- struct socket *sock = s->sock;
- struct rfcomm_sock_release_work *release_work;
BT_DBG("session %p state %ld", s, s->state);
list_del(&s->list);
- if (state == BT_CONNECTED)
- rfcomm_send_disc(s, 0);
-
rfcomm_session_clear_timer(s);
-
+ sock_release(s->sock);
kfree(s);
- release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC);
- if (release_work) {
- INIT_WORK(&release_work->work, rfcomm_sock_release_worker);
- release_work->sock = sock;
- release_work->state = state;
+ if (state != BT_LISTEN)
+ module_put(THIS_MODULE);
- if (!schedule_work(&release_work->work))
- kfree(release_work);
- }
-
+ return NULL;
}
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
@@ -713,17 +652,16 @@
return NULL;
}
-static void rfcomm_session_close(struct rfcomm_session *s, int err)
+static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s,
+ int err)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
- BT_DBG("session %p state %ld err %d", s, s->state, err);
-
- rfcomm_session_hold(s);
-
s->state = BT_CLOSED;
+ BT_DBG("session %p state %ld err %d", s, s->state, err);
+
/* Close all dlcs */
list_for_each_safe(p, n, &s->dlcs) {
d = list_entry(p, struct rfcomm_dlc, list);
@@ -732,23 +670,20 @@
}
rfcomm_session_clear_timer(s);
- rfcomm_session_put(s);
+ return rfcomm_session_del(s);
}
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
bdaddr_t *dst,
u8 sec_level,
- int *err,
- u8 channel,
- struct rfcomm_dlc *d)
+ int *err)
{
struct rfcomm_session *s = NULL;
struct sockaddr_l2 addr;
struct socket *sock;
struct sock *sk;
- u8 dlci;
- BT_DBG("%s %s", batostr(src), batostr(dst));
+ BT_DBG("%pMR -> %pMR", src, dst);
*err = rfcomm_l2sock_create(&sock);
if (*err < 0)
@@ -765,10 +700,10 @@
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->imtu = l2cap_mtu;
- l2cap_pi(sk)->sec_level = sec_level;
+ l2cap_pi(sk)->chan->imtu = l2cap_mtu;
+ l2cap_pi(sk)->chan->sec_level = sec_level;
if (l2cap_ertm)
- l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
+ l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
release_sock(sk);
s = rfcomm_session_add(sock, BT_BOUND);
@@ -781,33 +716,13 @@
bacpy(&addr.l2_bdaddr, dst);
addr.l2_family = AF_BLUETOOTH;
- addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
+ addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM);
addr.l2_cid = 0;
- dlci = __dlci(!s->initiator, channel);
-
- /* Check if DLCI already exists */
- if (rfcomm_dlc_get(s, dlci))
- return NULL;
-
- rfcomm_dlc_clear_state(d);
-
- d->dlci = dlci;
- d->addr = __addr(s->initiator, dlci);
- d->priority = 7;
-
- d->state = BT_CONFIG;
- rfcomm_dlc_link(s, d);
-
- d->out = 1;
-
- d->mtu = s->mtu;
- d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
*err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
if (*err == 0 || *err == -EINPROGRESS)
return s;
- BT_ERR("error ret is %d, going to delete session", *err);
- rfcomm_dlc_unlink(d);
- return NULL;
+
+ return rfcomm_session_del(s);
failed:
sock_release(sock);
@@ -826,7 +741,6 @@
/* ---- RFCOMM frame sending ---- */
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
- struct socket *sock = s->sock;
struct kvec iv = { data, len };
struct msghdr msg;
@@ -834,7 +748,14 @@
memset(&msg, 0, sizeof(msg));
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return kernel_sendmsg(s->sock, &msg, &iv, 1, len);
+}
+
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+ BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+ return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd));
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
@@ -848,7 +769,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -862,7 +783,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -876,7 +797,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -912,7 +833,7 @@
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -1190,7 +1111,7 @@
}
/* ---- RFCOMM frame reception ---- */
-static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
+static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
{
BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
@@ -1199,7 +1120,7 @@
struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci);
if (!d) {
rfcomm_send_dm(s, dlci);
- return 0;
+ return s;
}
switch (d->state) {
@@ -1235,19 +1156,14 @@
break;
case BT_DISCONN:
- /* When socket is closed and we are not RFCOMM
- * initiator rfcomm_process_rx already calls
- * rfcomm_session_put() */
- if (s->sock->sk->sk_state != BT_CLOSED)
- if (list_empty(&s->dlcs))
- rfcomm_session_put(s);
+ s = rfcomm_session_close(s, ECONNRESET);
break;
}
}
- return 0;
+ return s;
}
-static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
+static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
{
int err = 0;
@@ -1271,13 +1187,13 @@
else
err = ECONNRESET;
- s->state = BT_CLOSED;
- rfcomm_session_close(s, err);
+ s = rfcomm_session_close(s, err);
}
- return 0;
+ return s;
}
-static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
+static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s,
+ u8 dlci)
{
int err = 0;
@@ -1306,16 +1222,15 @@
else
err = ECONNRESET;
- s->state = BT_CLOSED;
- rfcomm_session_close(s, err);
+ s = rfcomm_session_close(s, err);
}
-
- return 0;
+ return s;
}
void rfcomm_dlc_accept(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
+ struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
BT_DBG("dlc %p", d);
@@ -1329,7 +1244,7 @@
rfcomm_dlc_unlock(d);
if (d->role_switch)
- hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00);
+ hci_conn_switch_role(conn->hcon, 0x00);
rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
}
@@ -1730,11 +1645,18 @@
return 0;
}
-static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
+static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s,
+ struct sk_buff *skb)
{
struct rfcomm_hdr *hdr = (void *) skb->data;
u8 type, dlci, fcs;
+ if (!s) {
+ /* no session, so free socket data */
+ kfree_skb(skb);
+ return s;
+ }
+
dlci = __get_dlci(hdr->addr);
type = __get_type(hdr->ctrl);
@@ -1745,7 +1667,7 @@
if (__check_fcs(skb->data, type, fcs)) {
BT_ERR("bad checksum in packet");
kfree_skb(skb);
- return -EILSEQ;
+ return s;
}
if (__test_ea(hdr->len))
@@ -1761,22 +1683,23 @@
case RFCOMM_DISC:
if (__test_pf(hdr->ctrl))
- rfcomm_recv_disc(s, dlci);
+ s = rfcomm_recv_disc(s, dlci);
break;
case RFCOMM_UA:
if (__test_pf(hdr->ctrl))
- rfcomm_recv_ua(s, dlci);
+ s = rfcomm_recv_ua(s, dlci);
break;
case RFCOMM_DM:
- rfcomm_recv_dm(s, dlci);
+ s = rfcomm_recv_dm(s, dlci);
break;
case RFCOMM_UIH:
- if (dlci)
- return rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb);
-
+ if (dlci) {
+ rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb);
+ return s;
+ }
rfcomm_recv_mcc(s, skb);
break;
@@ -1785,7 +1708,7 @@
break;
}
kfree_skb(skb);
- return 0;
+ return s;
}
/* ---- Connection and data processing ---- */
@@ -1814,7 +1737,7 @@
/* Send data queued for the DLC.
* Return number of frames left in the queue.
*/
-static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
+static int rfcomm_process_tx(struct rfcomm_dlc *d)
{
struct sk_buff *skb;
int err;
@@ -1862,7 +1785,7 @@
return skb_queue_len(&d->tx_queue);
}
-static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
+static void rfcomm_process_dlcs(struct rfcomm_session *s)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
@@ -1877,6 +1800,11 @@
continue;
}
+ if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
+ __rfcomm_dlc_close(d, ECONNREFUSED);
+ continue;
+ }
+
if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (d->out) {
@@ -1917,7 +1845,7 @@
}
}
-static inline void rfcomm_process_rx(struct rfcomm_session *s)
+static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
{
struct socket *sock = s->sock;
struct sock *sk = sock->sk;
@@ -1929,20 +1857,18 @@
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- rfcomm_recv_frame(s, skb);
+ s = rfcomm_recv_frame(s, skb);
else
kfree_skb(skb);
}
- if (sk->sk_state == BT_CLOSED) {
- if (!s->initiator)
- rfcomm_session_put(s);
+ if (s && (sk->sk_state == BT_CLOSED))
+ s = rfcomm_session_close(s, sk->sk_err);
- rfcomm_session_close(s, sk->sk_err);
- }
+ return s;
}
-static inline void rfcomm_accept_connection(struct rfcomm_session *s)
+static void rfcomm_accept_connection(struct rfcomm_session *s)
{
struct socket *sock = s->sock, *nsock;
int err;
@@ -1964,20 +1890,17 @@
s = rfcomm_session_add(nsock, BT_OPEN);
if (s) {
- rfcomm_session_hold(s);
-
/* We should adjust MTU on incoming sessions.
- * L2CAP MTU minus UIH header and FCS.
- * Need to accomodate 1 Byte credits information */
- s->mtu = min(l2cap_pi(nsock->sk)->omtu,
- l2cap_pi(nsock->sk)->imtu) - RFCOMM_HDR_SIZE;
+ * L2CAP MTU minus UIH header and FCS. */
+ s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
+ l2cap_pi(nsock->sk)->chan->imtu) - 5;
rfcomm_schedule();
} else
sock_release(nsock);
}
-static inline void rfcomm_check_connection(struct rfcomm_session *s)
+static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s)
{
struct sock *sk = s->sock->sk;
@@ -1988,21 +1911,20 @@
s->state = BT_CONNECT;
/* We can adjust MTU on outgoing sessions.
- * L2CAP MTU minus UIH header, Credits and FCS. */
- s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) -
- RFCOMM_HDR_SIZE;
+ * L2CAP MTU minus UIH header and FCS. */
+ s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
rfcomm_send_sabm(s, 0);
break;
case BT_CLOSED:
- s->state = BT_CLOSED;
- rfcomm_session_close(s, sk->sk_err);
+ s = rfcomm_session_close(s, sk->sk_err);
break;
}
+ return s;
}
-static inline void rfcomm_process_sessions(void)
+static void rfcomm_process_sessions(void)
{
struct list_head *p, *n;
@@ -2015,7 +1937,6 @@
if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
- rfcomm_session_put(s);
continue;
}
@@ -2024,21 +1945,18 @@
continue;
}
- rfcomm_session_hold(s);
-
switch (s->state) {
case BT_BOUND:
- rfcomm_check_connection(s);
+ s = rfcomm_check_connection(s);
break;
default:
- rfcomm_process_rx(s);
+ s = rfcomm_process_rx(s);
break;
}
- rfcomm_process_dlcs(s);
-
- rfcomm_session_put(s);
+ if (s)
+ rfcomm_process_dlcs(s);
}
rfcomm_unlock();
@@ -2062,7 +1980,7 @@
/* Bind socket */
bacpy(&addr.l2_bdaddr, ba);
addr.l2_family = AF_BLUETOOTH;
- addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
+ addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM);
addr.l2_cid = 0;
err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
if (err < 0) {
@@ -2073,7 +1991,7 @@
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->imtu = l2cap_mtu;
+ l2cap_pi(sk)->chan->imtu = l2cap_mtu;
release_sock(sk);
/* Start listening on the socket */
@@ -2085,10 +2003,11 @@
/* Add listening session */
s = rfcomm_session_add(sock, BT_LISTEN);
- if (!s)
+ if (!s) {
+ err = -ENOMEM;
goto failed;
+ }
- rfcomm_session_hold(s);
return 0;
failed:
sock_release(sock);
@@ -2116,19 +2035,18 @@
rfcomm_add_listener(BDADDR_ANY);
- while (!kthread_should_stop()) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
- /* No pending events. Let's sleep.
- * Incoming connections and data will wake us up. */
- schedule();
- }
- set_current_state(TASK_RUNNING);
+
+ if (kthread_should_stop())
+ break;
/* Process stuff */
- clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
rfcomm_process_sessions();
+
+ schedule();
}
+ __set_current_state(TASK_RUNNING);
rfcomm_kill_listener();
@@ -2147,15 +2065,13 @@
if (!s)
return;
- rfcomm_session_hold(s);
-
list_for_each_safe(p, n, &s->dlcs) {
d = list_entry(p, struct rfcomm_dlc, list);
if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (status || encrypt == 0x00) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
+ set_bit(RFCOMM_ENC_DROP, &d->flags);
continue;
}
}
@@ -2165,9 +2081,8 @@
set_bit(RFCOMM_SEC_PENDING, &d->flags);
rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
continue;
- } else if (d->sec_level == BT_SECURITY_HIGH ||
- d->sec_level == BT_SECURITY_VERY_HIGH) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
+ } else if (d->sec_level == BT_SECURITY_HIGH) {
+ set_bit(RFCOMM_ENC_DROP, &d->flags);
continue;
}
}
@@ -2175,14 +2090,12 @@
if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
continue;
- if (!status)
+ if (!status && hci_conn_check_secure(conn, d->sec_level))
set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
else
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
}
- rfcomm_session_put(s);
-
rfcomm_schedule();
}
@@ -2194,21 +2107,18 @@
static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
{
struct rfcomm_session *s;
- struct list_head *pp, *p;
rfcomm_lock();
- list_for_each(p, &session_list) {
- s = list_entry(p, struct rfcomm_session, list);
- list_for_each(pp, &s->dlcs) {
+ list_for_each_entry(s, &session_list, list) {
+ struct rfcomm_dlc *d;
+ list_for_each_entry(d, &s->dlcs, list) {
struct sock *sk = s->sock->sk;
- struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
- seq_printf(f, "%s %s %ld %d %d %d %d\n",
- batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst),
- d->state, d->dlci, d->mtu,
- d->rx_credits, d->tx_credits);
+ seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
+ &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ d->state, d->dlci, d->mtu,
+ d->rx_credits, d->tx_credits);
}
}
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 297729c..a1f9450 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1,6 +1,5 @@
/*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
- Copyright (c) 2013 The Linux Foundation. All rights reserved.
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
@@ -26,27 +25,8 @@
* RFCOMM sockets.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
+#include <linux/export.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -135,7 +115,7 @@
break;
}
- return node ? sk : NULL;
+ return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
@@ -165,7 +145,7 @@
read_unlock(&rfcomm_sk_list.lock);
- return node ? sk : sk1;
+ return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
@@ -261,10 +241,13 @@
if (parent) {
sk->sk_type = parent->sk_type;
- pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
+ pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
+
+ security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
@@ -351,7 +334,7 @@
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr));
+ BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -368,7 +351,7 @@
goto done;
}
- write_lock_bh(&rfcomm_sk_list.lock);
+ write_lock(&rfcomm_sk_list.lock);
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
@@ -379,7 +362,7 @@
sk->sk_state = BT_BOUND;
}
- write_unlock_bh(&rfcomm_sk_list.lock);
+ write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
@@ -453,7 +436,7 @@
err = -EINVAL;
- write_lock_bh(&rfcomm_sk_list.lock);
+ write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_sock_by_addr(channel, src)) {
@@ -462,7 +445,7 @@
break;
}
- write_unlock_bh(&rfcomm_sk_list.lock);
+ write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
@@ -484,12 +467,7 @@
long timeo;
int err = 0;
- lock_sock(sk);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
@@ -502,28 +480,33 @@
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
+ nsk = bt_accept_dequeue(sk, newsock);
+ if (nsk)
+ break;
+
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -545,6 +528,7 @@
BT_DBG("sock %p, sk %p", sock, sk);
+ memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
@@ -598,6 +582,8 @@
break;
}
+ skb->priority = sk->sk_priority;
+
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
@@ -680,7 +666,8 @@
{
struct sock *sk = sock->sk;
struct bt_security sec;
- int len, err = 0;
+ int err = 0;
+ size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -708,13 +695,12 @@
break;
}
- if (sec.level > BT_SECURITY_VERY_HIGH) {
+ if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
rfcomm_pi(sk)->sec_level = sec.level;
- BT_DBG("set to %d", sec.level);
break;
case BT_DEFER_SETUP:
@@ -728,7 +714,11 @@
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+
break;
default:
@@ -743,8 +733,8 @@
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
- struct sock *l2cap_sk;
struct rfcomm_conninfo cinfo;
+ struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
@@ -765,7 +755,6 @@
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
- case BT_SECURITY_VERY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE;
break;
@@ -788,10 +777,9 @@
break;
}
- l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
-
- cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
- memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = conn->hcon->handle;
+ memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
@@ -835,6 +823,7 @@
}
sec.level = rfcomm_pi(sk)->sec_level;
+ sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
@@ -848,7 +837,8 @@
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -954,6 +944,8 @@
if (!sk)
goto done;
+ bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
+
rfcomm_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, &src);
bacpy(&bt_sk(sk)->dst, &dst);
@@ -969,7 +961,7 @@
done:
bh_unlock_sock(parent);
- if (bt_sk(parent)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
@@ -980,16 +972,15 @@
struct sock *sk;
struct hlist_node *node;
- read_lock_bh(&rfcomm_sk_list.lock);
+ read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, node, &rfcomm_sk_list.head) {
- seq_printf(f, "%s %s %d %d\n",
- batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst),
- sk->sk_state, rfcomm_pi(sk)->channel);
+ seq_printf(f, "%pMR %pMR %d %d\n",
+ &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ sk->sk_state, rfcomm_pi(sk)->channel);
}
- read_unlock_bh(&rfcomm_sk_list.lock);
+ read_unlock(&rfcomm_sk_list.lock);
return 0;
}
@@ -1043,8 +1034,17 @@
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("RFCOMM socket layer registration failed");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create RFCOMM proc file");
+ bt_sock_unregister(BTPROTO_RFCOMM);
+ goto error;
+ }
if (bt_debugfs) {
rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
@@ -1058,17 +1058,17 @@
return 0;
error:
- BT_ERR("RFCOMM socket layer registration failed");
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
+ bt_procfs_cleanup(&init_net, "rfcomm");
+
debugfs_remove(rfcomm_sock_debugfs);
- if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
- BT_ERR("RFCOMM socket layer unregistration failed");
+ bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto);
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index e18781c..aaf2eae 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -26,16 +26,11 @@
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <linux/capability.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/rfcomm.h>
@@ -48,13 +43,12 @@
static struct tty_driver *rfcomm_tty_driver;
struct rfcomm_dev {
+ struct tty_port port;
struct list_head list;
- atomic_t refcnt;
char name[12];
int id;
unsigned long flags;
- atomic_t opened;
int err;
bdaddr_t src;
@@ -64,9 +58,7 @@
uint modem_status;
struct rfcomm_dlc *dlc;
- struct tty_struct *tty;
wait_queue_head_t wait;
- struct tasklet_struct wakeup_task;
struct device *tty_dev;
@@ -76,17 +68,24 @@
};
static LIST_HEAD(rfcomm_dev_list);
-static DEFINE_RWLOCK(rfcomm_dev_lock);
+static DEFINE_SPINLOCK(rfcomm_dev_lock);
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(unsigned long arg);
-
/* ---- Device functions ---- */
-static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
+
+/*
+ * The reason this isn't actually a race, as you no doubt have a little voice
+ * screaming at you in your head, is that the refcount should never actually
+ * reach zero unless the device has already been taken off the list, in
+ * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in
+ * rfcomm_dev_destruct() anyway.
+ */
+static void rfcomm_dev_destruct(struct tty_port *port)
{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
struct rfcomm_dlc *dlc = dev->dlc;
BT_DBG("dev %p dlc %p", dev, dlc);
@@ -113,43 +112,26 @@
module_put(THIS_MODULE);
}
-static inline void rfcomm_dev_hold(struct rfcomm_dev *dev)
-{
- atomic_inc(&dev->refcnt);
-}
-
-static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
-{
- /* The reason this isn't actually a race, as you no
- doubt have a little voice screaming at you in your
- head, is that the refcount should never actually
- reach zero unless the device has already been taken
- off the list, in rfcomm_dev_del(). And if that's not
- true, we'll hit the BUG() in rfcomm_dev_destruct()
- anyway. */
- if (atomic_dec_and_test(&dev->refcnt))
- rfcomm_dev_destruct(dev);
-}
+static const struct tty_port_operations rfcomm_port_ops = {
+ .destruct = rfcomm_dev_destruct,
+};
static struct rfcomm_dev *__rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
- struct list_head *p;
- list_for_each(p, &rfcomm_dev_list) {
- dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list)
if (dev->id == id)
return dev;
- }
return NULL;
}
-static inline struct rfcomm_dev *rfcomm_dev_get(int id)
+static struct rfcomm_dev *rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
- read_lock(&rfcomm_dev_lock);
+ spin_lock(&rfcomm_dev_lock);
dev = __rfcomm_dev_get(id);
@@ -157,10 +139,10 @@
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
dev = NULL;
else
- rfcomm_dev_hold(dev);
+ tty_port_get(&dev->port);
}
- read_unlock(&rfcomm_dev_lock);
+ spin_unlock(&rfcomm_dev_lock);
return dev;
}
@@ -184,7 +166,7 @@
static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
- return sprintf(buf, "%s\n", batostr(&dev->dst));
+ return sprintf(buf, "%pMR\n", &dev->dst);
}
static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
@@ -198,8 +180,8 @@
static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
{
- struct rfcomm_dev *dev;
- struct list_head *head = &rfcomm_dev_list, *p;
+ struct rfcomm_dev *dev, *entry;
+ struct list_head *head = &rfcomm_dev_list;
int err = 0;
BT_DBG("id %d channel %d", req->dev_id, req->channel);
@@ -208,24 +190,22 @@
if (!dev)
return -ENOMEM;
- write_lock_bh(&rfcomm_dev_lock);
+ spin_lock(&rfcomm_dev_lock);
if (req->dev_id < 0) {
dev->id = 0;
- list_for_each(p, &rfcomm_dev_list) {
- if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
+ if (entry->id != dev->id)
break;
dev->id++;
- head = p;
+ head = &entry->list;
}
} else {
dev->id = req->dev_id;
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
if (entry->id == dev->id) {
err = -EADDRINUSE;
goto out;
@@ -234,7 +214,7 @@
if (entry->id > dev->id - 1)
break;
- head = p;
+ head = &entry->list;
}
}
@@ -246,7 +226,6 @@
sprintf(dev->name, "rfcomm%d", dev->id);
list_add(&dev->list, head);
- atomic_set(&dev->refcnt, 1);
bacpy(&dev->src, &req->src);
bacpy(&dev->dst, &req->dst);
@@ -255,10 +234,9 @@
dev->flags = req->flags &
((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC));
- atomic_set(&dev->opened, 0);
-
+ tty_port_init(&dev->port);
+ dev->port.ops = &rfcomm_port_ops;
init_waitqueue_head(&dev->wait);
- tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
skb_queue_head_init(&dev->pending);
@@ -295,13 +273,13 @@
__module_get(THIS_MODULE);
out:
- write_unlock_bh(&rfcomm_dev_lock);
+ spin_unlock(&rfcomm_dev_lock);
if (err < 0)
goto free;
- dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL);
-
+ dev->tty_dev = tty_port_register_device(&dev->port, rfcomm_tty_driver,
+ dev->id, NULL);
if (IS_ERR(dev->tty_dev)) {
err = PTR_ERR(dev->tty_dev);
list_del(&dev->list);
@@ -325,18 +303,23 @@
static void rfcomm_dev_del(struct rfcomm_dev *dev)
{
+ unsigned long flags;
BT_DBG("dev %p", dev);
BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
- if (atomic_read(&dev->opened) > 0)
+ spin_lock_irqsave(&dev->port.lock, flags);
+ if (dev->port.count > 0) {
+ spin_unlock_irqrestore(&dev->port.lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&dev->port.lock, flags);
- write_lock_bh(&rfcomm_dev_lock);
+ spin_lock(&rfcomm_dev_lock);
list_del_init(&dev->list);
- write_unlock_bh(&rfcomm_dev_lock);
+ spin_unlock(&rfcomm_dev_lock);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
/* ---- Send buffer ---- */
@@ -350,15 +333,16 @@
static void rfcomm_wfree(struct sk_buff *skb)
{
struct rfcomm_dev *dev = (void *) skb->sk;
+ struct tty_struct *tty = dev->port.tty;
atomic_sub(skb->truesize, &dev->wmem_alloc);
- if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- tasklet_schedule(&dev->wakeup_task);
- rfcomm_dev_put(dev);
+ if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty)
+ tty_wakeup(tty);
+ tty_port_put(&dev->port);
}
-static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
+static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
{
- rfcomm_dev_hold(dev);
+ tty_port_get(&dev->port);
atomic_add(skb->truesize, &dev->wmem_alloc);
skb->sk = (void *) dev;
skb->destructor = rfcomm_wfree;
@@ -437,7 +421,7 @@
return -ENODEV;
if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) {
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return -EPERM;
}
@@ -445,20 +429,20 @@
rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */
- if (dev->tty)
- tty_vhangup(dev->tty);
+ if (dev->port.tty)
+ tty_vhangup(dev->port.tty);
if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return 0;
}
static int rfcomm_get_dev_list(void __user *arg)
{
+ struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- struct list_head *p;
int n = 0, size, err;
u16 dev_num;
@@ -472,16 +456,15 @@
size = sizeof(*dl) + dev_num * sizeof(*di);
- dl = kmalloc(size, GFP_KERNEL);
+ dl = kzalloc(size, GFP_KERNEL);
if (!dl)
return -ENOMEM;
di = dl->dev_info;
- read_lock_bh(&rfcomm_dev_lock);
+ spin_lock(&rfcomm_dev_lock);
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
continue;
(di + n)->id = dev->id;
@@ -494,7 +477,7 @@
break;
}
- read_unlock_bh(&rfcomm_dev_lock);
+ spin_unlock(&rfcomm_dev_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*di);
@@ -529,7 +512,7 @@
if (copy_to_user(arg, &di, sizeof(di)))
err = -EFAULT;
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return err;
}
@@ -558,23 +541,23 @@
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
{
struct rfcomm_dev *dev = dlc->owner;
- struct tty_struct *tty;
+ struct tty_port *port;
if (!dev) {
kfree_skb(skb);
return;
}
- tty = dev->tty;
- if (!tty || !skb_queue_empty(&dev->pending)) {
+ if (!skb_queue_empty(&dev->pending)) {
skb_queue_tail(&dev->pending, skb);
return;
}
- BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len);
+ BT_DBG("dlc %p len %d", dlc, skb->len);
- tty_insert_flip_string(tty, skb->data, skb->len);
- tty_flip_buffer_push(tty);
+ port = &dev->port;
+ tty_insert_flip_string(port->tty, skb->data, skb->len);
+ tty_flip_buffer_push(port->tty);
kfree_skb(skb);
}
@@ -591,13 +574,13 @@
wake_up_interruptible(&dev->wait);
if (dlc->state == BT_CLOSED) {
- if (!dev->tty) {
+ if (!dev->port.tty) {
if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
/* Drop DLC lock here to avoid deadlock
* 1. rfcomm_dev_get will take rfcomm_dev_lock
* but in rfcomm_dev_add there's lock order:
* rfcomm_dev_lock -> dlc lock
- * 2. rfcomm_dev_put will deadlock if it's
+ * 2. tty_port_put will deadlock if it's
* the last reference
*/
rfcomm_dlc_unlock(dlc);
@@ -607,11 +590,11 @@
}
rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
rfcomm_dlc_lock(dlc);
}
} else
- tty_hangup(dev->tty);
+ tty_hangup(dev->port.tty);
}
}
@@ -624,8 +607,8 @@
BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
- if (dev->tty && !C_CLOCAL(dev->tty))
- tty_hangup(dev->tty);
+ if (dev->port.tty && !C_CLOCAL(dev->port.tty))
+ tty_hangup(dev->port.tty);
}
dev->modem_status =
@@ -636,39 +619,27 @@
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
-{
- struct rfcomm_dev *dev = (void *) arg;
- struct tty_struct *tty = dev->tty;
- if (!tty)
- return;
-
- BT_DBG("dev %p tty %p", dev, tty);
- tty_wakeup(tty);
-}
-
static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
{
- struct tty_struct *tty = dev->tty;
struct sk_buff *skb;
+ struct tty_port *port;
int inserted = 0;
- if (!tty)
- return;
-
- BT_DBG("dev %p tty %p", dev, tty);
+ BT_DBG("dev %p", dev);
rfcomm_dlc_lock(dev->dlc);
+ port = &dev->port;
while ((skb = skb_dequeue(&dev->pending))) {
- inserted += tty_insert_flip_string(tty, skb->data, skb->len);
+ inserted += tty_insert_flip_string(port->tty, skb->data,
+ skb->len);
kfree_skb(skb);
}
rfcomm_dlc_unlock(dev->dlc);
if (inserted > 0)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port->tty);
}
static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
@@ -676,6 +647,7 @@
DECLARE_WAITQUEUE(wait, current);
struct rfcomm_dev *dev;
struct rfcomm_dlc *dlc;
+ unsigned long flags;
int err, id;
id = tty->index;
@@ -690,11 +662,15 @@
if (!dev)
return -ENODEV;
- BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
- dev->channel, atomic_read(&dev->opened));
+ BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+ dev->channel, dev->port.count);
- if (atomic_inc_return(&dev->opened) > 1)
+ spin_lock_irqsave(&dev->port.lock, flags);
+ if (++dev->port.count > 1) {
+ spin_unlock_irqrestore(&dev->port.lock, flags);
return 0;
+ }
+ spin_unlock_irqrestore(&dev->port.lock, flags);
dlc = dev->dlc;
@@ -702,7 +678,7 @@
rfcomm_dlc_lock(dlc);
tty->driver_data = dev;
- dev->tty = tty;
+ dev->port.tty = tty;
rfcomm_dlc_unlock(dlc);
set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
@@ -749,13 +725,17 @@
static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ unsigned long flags;
+
if (!dev)
return;
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
- atomic_read(&dev->opened));
+ dev->port.count);
- if (atomic_dec_and_test(&dev->opened)) {
+ spin_lock_irqsave(&dev->port.lock, flags);
+ if (!--dev->port.count) {
+ spin_unlock_irqrestore(&dev->port.lock, flags);
if (dev->tty_dev->parent)
device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
@@ -763,23 +743,23 @@
rfcomm_dlc_close(dev->dlc, 0);
clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- tasklet_kill(&dev->wakeup_task);
rfcomm_dlc_lock(dev->dlc);
tty->driver_data = NULL;
- dev->tty = NULL;
+ dev->port.tty = NULL;
rfcomm_dlc_unlock(dev->dlc);
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
- write_lock_bh(&rfcomm_dev_lock);
+ spin_lock(&rfcomm_dev_lock);
list_del_init(&dev->list);
- write_unlock_bh(&rfcomm_dev_lock);
+ spin_unlock(&rfcomm_dev_lock);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
- }
+ } else
+ spin_unlock_irqrestore(&dev->port.lock, flags);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -1088,7 +1068,7 @@
if (rfcomm_dev_get(dev->id) == NULL)
return;
rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
}
@@ -1156,11 +1136,12 @@
int __init rfcomm_init_ttys(void)
{
+ int error;
+
rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
if (!rfcomm_tty_driver)
- return -1;
+ return -ENOMEM;
- rfcomm_tty_driver->owner = THIS_MODULE;
rfcomm_tty_driver->driver_name = "rfcomm";
rfcomm_tty_driver->name = "rfcomm";
rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR;
@@ -1173,10 +1154,11 @@
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
- if (tty_register_driver(rfcomm_tty_driver)) {
+ error = tty_register_driver(rfcomm_tty_driver);
+ if (error) {
BT_ERR("Can't register RFCOMM TTY driver");
put_tty_driver(rfcomm_tty_driver);
- return -1;
+ return error;
}
BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 3170190..8c40a8f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1,7 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright (c) 2011, The Linux Foundation. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -25,28 +24,9 @@
/* Bluetooth SCO sockets. */
-#include <linux/interrupt.h>
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/list.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -63,8 +43,6 @@
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err, u8 is_process);
-
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -97,15 +75,15 @@
}
/* ---- SCO connections ---- */
-static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
+static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
- if (conn || status)
+ if (conn)
return conn;
- conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
+ conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
@@ -127,7 +105,7 @@
return conn;
}
-static inline struct sock *sco_chan_get(struct sco_conn *conn)
+static struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
@@ -136,7 +114,7 @@
return sk;
}
-static int sco_conn_del(struct hci_conn *hcon, int err, u8 is_process)
+static int sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
@@ -149,16 +127,10 @@
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
- if (is_process)
- lock_sock(sk);
- else
- bh_lock_sock(sk);
+ bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
- if (is_process)
- release_sock(sk);
- else
- bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
sco_sock_kill(sk);
}
@@ -167,7 +139,8 @@
return 0;
}
-static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
+static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+ struct sock *parent)
{
int err = 0;
@@ -181,7 +154,7 @@
return err;
}
-static int sco_connect(struct sock *sk, __s8 is_wbs)
+static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -191,44 +164,31 @@
struct hci_dev *hdev;
int err, type;
- BT_DBG("%s -> %s", batostr(src), batostr(dst));
+ BT_DBG("%pMR -> %pMR", src, dst);
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- hdev->is_wbs = is_wbs;
-
- if (lmp_esco_capable(hdev) && !disable_esco) {
+ if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
- } else if (is_wbs) {
- return -ENAVAIL;
- } else {
+ else {
type = SCO_LINK;
pkt_type &= SCO_ESCO_MASK;
}
- BT_DBG("type: %d, pkt_type: 0x%x", type, pkt_type);
-
- hcon = hci_connect(hdev, type, pkt_type, dst,
- BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, pkt_type, dst, BDADDR_BREDR,
+ BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
- if (is_wbs && (hcon->type != ESCO_LINK)) {
- BT_ERR("WBS [ hcon->type: 0x%x, hcon->pkt_type: 0x%x ]",
- hcon->type, hcon->pkt_type);
- err = -EREMOTEIO;
- goto done;
- }
-
- conn = sco_conn_add(hcon, 0);
+ conn = sco_conn_add(hcon);
if (!conn) {
- hci_conn_put(hcon);
+ hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
@@ -249,16 +209,16 @@
}
done:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
+static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
- int err, count;
+ int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -266,23 +226,21 @@
BT_DBG("sk %p len %d", sk, len);
- count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_send_alloc(sk, count,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
- return count;
+ return len;
}
-static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
+static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
@@ -302,17 +260,20 @@
}
/* -------- Socket interface ---------- */
-static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
+static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
struct sock *sk;
struct hlist_node *node;
- sk_for_each(sk, node, &sco_sk_list.head)
+ sk_for_each(sk, node, &sco_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
+ continue;
+
if (!bacmp(&bt_sk(sk)->src, ba))
- goto found;
- sk = NULL;
-found:
- return sk;
+ return sk;
+ }
+
+ return NULL;
}
/* Find socket listening on source bdaddr.
@@ -340,7 +301,7 @@
read_unlock(&sco_sk_list.lock);
- return node ? sk : sk1;
+ return sk ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
@@ -394,17 +355,18 @@
case BT_CONNECTED:
case BT_CONFIG:
- if (sco_pi(sk)->conn) {
+ if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
if (sco_pi(sk)->conn->hcon != NULL) {
- hci_conn_put(sco_pi(sk)->conn->hcon);
+ hci_conn_drop(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
}
} else
sco_chan_del(sk, ECONNRESET);
break;
+ case BT_CONNECT2:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
@@ -430,8 +392,11 @@
{
BT_DBG("sk %p", sk);
- if (parent)
+ if (parent) {
sk->sk_type = parent->sk_type;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
+ security_sk_clone(parent, sk);
+ }
}
static struct proto sco_proto = {
@@ -491,10 +456,9 @@
{
struct sockaddr_sco sa;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa.sco_bdaddr;
int len, err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
+ BT_DBG("sk %p %pMR", sk, &sa.sco_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -510,18 +474,15 @@
goto done;
}
- write_lock_bh(&sco_sk_list.lock);
-
- if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
- sco_pi(sk)->pkt_type = sa.sco_pkt_type;
- sk->sk_state = BT_BOUND;
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
}
- write_unlock_bh(&sco_sk_list.lock);
+ bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
+
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -532,7 +493,7 @@
{
struct sock *sk = sock->sk;
struct sockaddr_sco sa;
- int len, err = 0;
+ int len, err;
BT_DBG("sk %p", sk);
@@ -559,12 +520,12 @@
bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
sco_pi(sk)->pkt_type = sa.sco_pkt_type;
- err = sco_connect(sk, sa.is_wbs);
+ err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
@@ -574,21 +535,38 @@
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ bdaddr_t *src = &bt_sk(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ write_lock(&sco_sk_list.lock);
+
+ if (__sco_get_sock_listen_by_addr(src)) {
+ err = -EADDRINUSE;
+ goto unlock;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
sk->sk_state = BT_LISTEN;
+unlock:
+ write_unlock(&sco_sk_list.lock);
+
done:
release_sock(sk);
return err;
@@ -603,39 +581,39 @@
lock_sock(sk);
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(ch = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
+ ch = bt_accept_dequeue(sk, newsock);
+ if (ch)
+ break;
+
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -695,16 +673,93 @@
return err;
}
+static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->state = BT_CONFIG;
+
+ if (!lmp_esco_capable(hdev)) {
+ struct hci_cp_accept_conn_req cp;
+
+ bacpy(&cp.bdaddr, &conn->dst);
+
+ if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+ cp.role = 0x00; /* Become master */
+ else
+ cp.role = 0x01; /* Remain slave */
+
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
+ } else {
+ struct hci_cp_accept_sync_conn_req cp;
+
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
+ cp.content_format = cpu_to_le16(hdev->voice_setting);
+ cp.retrans_effort = 0xff;
+
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
+ sizeof(cp), &cp);
+ }
+}
+
+static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sco_pinfo *pi = sco_pi(sk);
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ sco_conn_defer_accept(pi->conn->hcon, 0);
+ sk->sk_state = BT_CONFIG;
+
+ release_sock(sk);
+ return 0;
+ }
+
+ release_sock(sk);
+
+ return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+}
+
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
+ u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -786,6 +841,19 @@
lock_sock(sk);
switch (optname) {
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -813,10 +881,7 @@
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
- else
- err = bt_sock_wait_state(sk, BT_CLOSED,
- SCO_DISCONN_TIMEOUT);
+ sk->sk_lingertime);
}
release_sock(sk);
return err;
@@ -838,11 +903,6 @@
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
- } else {
- lock_sock(sk);
- err = bt_sock_wait_state(sk, BT_CLOSED,
- SCO_DISCONN_TIMEOUT);
- release_sock(sk);
}
sock_orphan(sk);
@@ -878,7 +938,7 @@
sco_conn_unlock(conn);
if (conn->hcon)
- hci_conn_put(conn->hcon);
+ hci_conn_drop(conn->hcon);
}
sk->sk_state = BT_CLOSED;
@@ -895,8 +955,6 @@
BT_DBG("conn %p", conn);
- sco_conn_lock(conn);
-
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
@@ -904,17 +962,22 @@
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
+ sco_conn_lock(conn);
+
parent = sco_get_sock_listen(conn->src);
- if (!parent)
- goto done;
+ if (!parent) {
+ sco_conn_unlock(conn);
+ return;
+ }
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
- BTPROTO_SCO, GFP_ATOMIC);
+ BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
- goto done;
+ sco_conn_unlock(conn);
+ return;
}
sco_sock_init(sk, parent);
@@ -925,29 +988,28 @@
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
- sk->sk_state = BT_CONNECTED;
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
+ sk->sk_state = BT_CONNECT2;
+ else
+ sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
- }
-done:
- sco_conn_unlock(conn);
+ sco_conn_unlock(conn);
+ }
}
/* ----- SCO interface with lower layer (HCI) ----- */
-static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
{
- register struct sock *sk;
- struct hlist_node *node;
+ struct sock *sk;
int lm = 0;
+ struct hlist_node *node;
- if (type != SCO_LINK && type != ESCO_LINK)
- return 0;
-
- BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+ BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
@@ -956,8 +1018,11 @@
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
- !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+ !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
+
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
+ *flags |= HCI_PROTO_DEFER;
break;
}
}
@@ -966,38 +1031,27 @@
return lm;
}
-static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
- BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return -EINVAL;
-
+ BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
struct sco_conn *conn;
- conn = sco_conn_add(hcon, status);
+ conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
- sco_conn_del(hcon, bt_err(status), 0);
-
- return 0;
+ sco_conn_del(hcon, bt_to_errno(status));
}
-static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason, __u8 is_process)
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return -EINVAL;
-
- sco_conn_del(hcon, bt_err(reason), is_process);
-
- return 0;
+ sco_conn_del(hcon, bt_to_errno(reason));
}
-static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
@@ -1021,14 +1075,14 @@
struct sock *sk;
struct hlist_node *node;
- read_lock_bh(&sco_sk_list.lock);
+ read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
- seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst), sk->sk_state);
+ seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
+ &bt_sk(sk)->dst, sk->sk_state);
}
- read_unlock_bh(&sco_sk_list.lock);
+ read_unlock(&sco_sk_list.lock);
return 0;
}
@@ -1057,7 +1111,7 @@
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
- .recvmsg = bt_sock_recvmsg,
+ .recvmsg = sco_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
@@ -1073,15 +1127,6 @@
.create = sco_sock_create,
};
-static struct hci_proto sco_hci_proto = {
- .name = "SCO",
- .id = HCI_PROTO_SCO,
- .connect_ind = sco_connect_ind,
- .connect_cfm = sco_connect_cfm,
- .disconn_cfm = sco_disconn_cfm,
- .recv_scodata = sco_recv_scodata
-};
-
int __init sco_init(void)
{
int err;
@@ -1096,16 +1141,16 @@
goto error;
}
- err = hci_register_proto(&sco_hci_proto);
+ err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL);
if (err < 0) {
- BT_ERR("SCO protocol registration failed");
+ BT_ERR("Failed to create SCO proc file");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
if (bt_debugfs) {
- sco_debugfs = debugfs_create_file("sco", 0444,
- bt_debugfs, NULL, &sco_debugfs_fops);
+ sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+ NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
@@ -1121,13 +1166,11 @@
void __exit sco_exit(void)
{
+ bt_procfs_cleanup(&init_net, "sco");
+
debugfs_remove(sco_debugfs);
- if (bt_sock_unregister(BTPROTO_SCO) < 0)
- BT_ERR("SCO socket unregistration failed");
-
- if (hci_unregister_proto(&sco_hci_proto) < 0)
- BT_ERR("SCO protocol unregistration failed");
+ bt_sock_unregister(BTPROTO_SCO);
proto_unregister(&sco_proto);
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6d06582..b5562ab 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1,6 +1,5 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2013 The Linux Foundation. All rights reserved.
Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
This program is free software; you can redistribute it and/or modify
@@ -21,31 +20,19 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/b128ops.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
-#include <linux/crypto.h>
-#include <crypto/b128ops.h>
-#include <asm/unaligned.h>
-#define SMP_TIMEOUT 30000 /* 30 seconds */
+#define SMP_TIMEOUT msecs_to_jiffies(30000)
-#define SMP_MIN_CONN_INTERVAL 40 /* 50ms (40 * 1.25ms) */
-#define SMP_MAX_CONN_INTERVAL 56 /* 70ms (56 * 1.25ms) */
-#define SMP_MAX_CONN_LATENCY 0 /* 0ms (0 * 1.25ms) */
-#define SMP_SUPERVISION_TIMEOUT 500 /* 5 seconds (500 * 10ms) */
-
-#ifndef FALSE
-#define FALSE 0
-#define TRUE (!FALSE)
-#endif
-
-static int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+#define AUTH_REQ_MASK 0x07
static inline void swap128(u8 src[16], u8 dst[16])
{
@@ -163,7 +150,7 @@
}
static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
- u16 dlen, void *data)
+ u16 dlen, void *data)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
@@ -180,7 +167,7 @@
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(sizeof(code) + dlen);
- lh->cid = cpu_to_le16(L2CAP_CID_SMP);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_SMP);
memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
@@ -198,26 +185,28 @@
if (!skb)
return;
- hci_send_acl(conn->hcon, NULL, skb, 0);
+ skb->priority = HCI_PRIO_MAX;
+ hci_send_acl(conn->hchan, skb, 0);
+
+ cancel_delayed_work_sync(&conn->security_timer);
+ schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
}
static __u8 authreq_to_seclevel(__u8 authreq)
{
if (authreq & SMP_AUTH_MITM)
return BT_SECURITY_HIGH;
- else if (authreq & SMP_AUTH_BONDING)
- return BT_SECURITY_MEDIUM;
else
- return BT_SECURITY_LOW;
+ return BT_SECURITY_MEDIUM;
}
-static __u8 seclevel_to_authreq(__u8 level)
+static __u8 seclevel_to_authreq(__u8 sec_level)
{
- switch (level) {
- case BT_SECURITY_VERY_HIGH:
+ switch (sec_level) {
case BT_SECURITY_HIGH:
return SMP_AUTH_MITM | SMP_AUTH_BONDING;
-
+ case BT_SECURITY_MEDIUM:
+ return SMP_AUTH_BONDING;
default:
return SMP_AUTH_NONE;
}
@@ -228,412 +217,252 @@
struct smp_cmd_pairing *rsp,
__u8 authreq)
{
- struct hci_conn *hcon = conn->hcon;
- u8 all_keys = 0;
u8 dist_keys = 0;
- dist_keys = SMP_DIST_ENC_KEY;
- authreq |= SMP_AUTH_BONDING;
-
- BT_DBG("conn->hcon->io_capability:%d", conn->hcon->io_capability);
+ if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
+ dist_keys = SMP_DIST_ENC_KEY;
+ authreq |= SMP_AUTH_BONDING;
+ } else {
+ authreq &= ~SMP_AUTH_BONDING;
+ }
if (rsp == NULL) {
req->io_capability = conn->hcon->io_capability;
- req->oob_flag = hcon->oob ? SMP_OOB_PRESENT :
- SMP_OOB_NOT_PRESENT;
+ req->oob_flag = SMP_OOB_NOT_PRESENT;
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- req->init_key_dist = all_keys;
+ req->init_key_dist = 0;
req->resp_key_dist = dist_keys;
- req->auth_req = authreq;
- BT_DBG("SMP_CMD_PAIRING_REQ %d %d %d %d %2.2x %2.2x",
- req->io_capability, req->oob_flag,
- req->auth_req, req->max_key_size,
- req->init_key_dist, req->resp_key_dist);
+ req->auth_req = (authreq & AUTH_REQ_MASK);
return;
}
- /* Only request OOB if remote AND we support it */
- if (req->oob_flag)
- rsp->oob_flag = hcon->oob ? SMP_OOB_PRESENT :
- SMP_OOB_NOT_PRESENT;
- else
- rsp->oob_flag = SMP_OOB_NOT_PRESENT;
-
rsp->io_capability = conn->hcon->io_capability;
+ rsp->oob_flag = SMP_OOB_NOT_PRESENT;
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- rsp->init_key_dist = req->init_key_dist & all_keys;
+ rsp->init_key_dist = 0;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
- rsp->auth_req = authreq;
- BT_DBG("SMP_CMD_PAIRING_RSP %d %d %d %d %2.2x %2.2x",
- req->io_capability, req->oob_flag, req->auth_req,
- req->max_key_size, req->init_key_dist,
- req->resp_key_dist);
+ rsp->auth_req = (authreq & AUTH_REQ_MASK);
}
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
{
- struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp = conn->smp_chan;
if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
(max_key_size < SMP_MIN_ENC_KEY_SIZE))
return SMP_ENC_KEY_SIZE;
- hcon->smp_key_size = max_key_size;
+ smp->enc_key_size = max_key_size;
return 0;
}
-#define JUST_WORKS SMP_JUST_WORKS
-#define REQ_PASSKEY SMP_REQ_PASSKEY
-#define CFM_PASSKEY SMP_CFM_PASSKEY
-#define JUST_CFM SMP_JUST_CFM
-#define OVERLAP SMP_OVERLAP
-static const u8 gen_method[5][5] = {
- {JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY},
- {JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY},
- {CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY},
- {JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM},
- {CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP}
+static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
+{
+ struct hci_conn *hcon = conn->hcon;
+
+ if (send)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+ &reason);
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
+ mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
+ hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
+
+ cancel_delayed_work_sync(&conn->security_timer);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+ smp_chan_destroy(conn);
+}
+
+#define JUST_WORKS 0x00
+#define JUST_CFM 0x01
+#define REQ_PASSKEY 0x02
+#define CFM_PASSKEY 0x03
+#define REQ_OOB 0x04
+#define OVERLAP 0xFF
+
+static const u8 gen_method[5][5] = {
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
+ { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM },
+ { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
};
static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
u8 local_io, u8 remote_io)
{
struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp = conn->smp_chan;
u8 method;
u32 passkey = 0;
int ret = 0;
- /* Initialize key to JUST WORKS */
- memset(hcon->tk, 0, sizeof(hcon->tk));
- hcon->tk_valid = FALSE;
- hcon->auth = auth;
-
- /* By definition, OOB data will be used if both sides have it available
- */
- if (remote_oob && hcon->oob) {
- method = SMP_REQ_OOB;
- goto agent_request;
- }
+ /* Initialize key for JUST WORKS */
+ memset(smp->tk, 0, sizeof(smp->tk));
+ clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
/* If neither side wants MITM, use JUST WORKS */
- /* If either side has unknown io_caps, use JUST_WORKS */
+ /* If either side has unknown io_caps, use JUST WORKS */
+ /* Otherwise, look up method from the table */
if (!(auth & SMP_AUTH_MITM) ||
local_io > SMP_IO_KEYBOARD_DISPLAY ||
- remote_io > SMP_IO_KEYBOARD_DISPLAY) {
- hcon->auth &= ~SMP_AUTH_MITM;
- hcon->tk_valid = TRUE;
+ remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ method = JUST_WORKS;
+ else
+ method = gen_method[remote_io][local_io];
+
+ /* If not bonding, don't ask user to confirm a Zero TK */
+ if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
+ method = JUST_WORKS;
+
+ /* If Just Works, Continue with Zero TK */
+ if (method == JUST_WORKS) {
+ set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
return 0;
}
- /* MITM is now officially requested, but not required */
- /* Determine what we need (if anything) from the agent */
- method = gen_method[local_io][remote_io];
+ /* Not Just Works/Confirm results in MITM Authentication */
+ if (method != JUST_CFM)
+ set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags);
- BT_DBG("tk_method: %d", method);
-
- if (method == SMP_JUST_WORKS || method == SMP_JUST_CFM)
- hcon->auth &= ~SMP_AUTH_MITM;
-
- /* Don't bother confirming unbonded JUST_WORKS */
- if (!(auth & SMP_AUTH_BONDING) && method == SMP_JUST_CFM) {
- hcon->tk_valid = TRUE;
- return 0;
- } else if (method == SMP_JUST_WORKS) {
- hcon->tk_valid = TRUE;
- return 0;
- } else if (method == SMP_OVERLAP) {
+ /* If both devices have Keyoard-Display I/O, the master
+ * Confirms and the slave Enters the passkey.
+ */
+ if (method == OVERLAP) {
if (hcon->link_mode & HCI_LM_MASTER)
- method = SMP_CFM_PASSKEY;
+ method = CFM_PASSKEY;
else
- method = SMP_REQ_PASSKEY;
+ method = REQ_PASSKEY;
}
- BT_DBG("tk_method-2: %d", method);
-
- if (method == SMP_CFM_PASSKEY) {
+ /* Generate random passkey. Not valid until confirmed. */
+ if (method == CFM_PASSKEY) {
u8 key[16];
- /* Generate a passkey for display. It is not valid until
- * confirmed.
- */
+
memset(key, 0, sizeof(key));
get_random_bytes(&passkey, sizeof(passkey));
passkey %= 1000000;
put_unaligned_le32(passkey, key);
- swap128(key, hcon->tk);
+ swap128(key, smp->tk);
BT_DBG("PassKey: %d", passkey);
}
-agent_request:
hci_dev_lock(hcon->hdev);
- switch (method) {
- case SMP_REQ_PASSKEY:
- ret = mgmt_user_confirm_request(hcon->hdev->id,
- HCI_EV_USER_PASSKEY_REQUEST, conn->dst, 0);
- break;
- case SMP_CFM_PASSKEY:
- default:
- ret = mgmt_user_confirm_request(hcon->hdev->id,
- HCI_EV_USER_CONFIRM_REQUEST, conn->dst, passkey);
- break;
- }
+ if (method == REQ_PASSKEY)
+ ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
+ hcon->type, hcon->dst_type);
+ else
+ ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+ hcon->type, hcon->dst_type,
+ cpu_to_le32(passkey), 0);
hci_dev_unlock(hcon->hdev);
return ret;
}
-static int send_pairing_confirm(struct l2cap_conn *conn)
+static void confirm_work(struct work_struct *work)
{
- struct hci_conn *hcon = conn->hcon;
- struct crypto_blkcipher *tfm = hcon->hdev->tfm;
+ struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
+ struct l2cap_conn *conn = smp->conn;
+ struct crypto_blkcipher *tfm;
struct smp_cmd_pairing_confirm cp;
int ret;
- u8 res[16];
+ u8 res[16], reason;
+
+ BT_DBG("conn %p", conn);
+
+ tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ smp->tfm = tfm;
if (conn->hcon->out)
- ret = smp_c1(tfm, hcon->tk, hcon->prnd, hcon->preq, hcon->prsp,
- 0, conn->src, hcon->dst_type, conn->dst, res);
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
+ conn->src, conn->hcon->dst_type, conn->dst, res);
else
- ret = smp_c1(tfm, hcon->tk, hcon->prnd, hcon->preq, hcon->prsp,
- hcon->dst_type, conn->dst, 0, conn->src, res);
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+ conn->hcon->dst_type, conn->dst, 0, conn->src,
+ res);
+ if (ret) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
- if (ret)
- return SMP_CONFIRM_FAILED;
+ clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
swap128(res, cp.confirm_val);
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
- hcon->cfm_pending = FALSE;
+ return;
- smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
-
- return 0;
+error:
+ smp_failure(conn, reason, 1);
}
-int le_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, void *cp)
+static void random_work(struct work_struct *work)
{
- struct mgmt_cp_user_passkey_reply *psk_reply = cp;
- struct l2cap_conn *conn = hcon->smp_conn;
- u8 key[16];
- u8 reason = 0;
- int ret = 0;
-
- BT_DBG("");
-
- hcon->tk_valid = TRUE;
-
- switch (mgmt_op) {
- case MGMT_OP_USER_CONFIRM_NEG_REPLY:
- reason = SMP_CONFIRM_FAILED;
- break;
- case MGMT_OP_USER_CONFIRM_REPLY:
- break;
- case MGMT_OP_USER_PASSKEY_REPLY:
- memset(key, 0, sizeof(key));
- BT_DBG("PassKey: %d", psk_reply->passkey);
- put_unaligned_le32(psk_reply->passkey, key);
- swap128(key, hcon->tk);
- break;
- default:
- reason = SMP_CONFIRM_FAILED;
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (reason) {
- BT_DBG("smp_send_cmd: SMP_CMD_PAIRING_FAIL");
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
- &reason);
- del_timer(&hcon->smp_timer);
- if (hcon->disconn_cfm_cb)
- hcon->disconn_cfm_cb(hcon, SMP_UNSPECIFIED);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
- mgmt_auth_failed(hcon->hdev->id, conn->dst, reason);
- hci_conn_put(hcon);
- l2cap_conn_del(hcon, EACCES, 0);
- } else if (hcon->cfm_pending) {
- BT_DBG("send_pairing_confirm");
- ret = send_pairing_confirm(conn);
- }
-
- return ret;
-}
-
-static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
-{
+ struct smp_chan *smp = container_of(work, struct smp_chan, random);
+ struct l2cap_conn *conn = smp->conn;
struct hci_conn *hcon = conn->hcon;
- struct smp_cmd_pairing rsp, *req = (void *) skb->data;
- u8 key_size;
- u8 auth = SMP_AUTH_NONE;
+ struct crypto_blkcipher *tfm = smp->tfm;
+ u8 reason, confirm[16], res[16], key[16];
int ret;
- BT_DBG("conn %p", conn);
-
- hcon->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&hcon->preq[1], req, sizeof(*req));
- skb_pull(skb, sizeof(*req));
-
- if (req->oob_flag && hcon->oob) {
- /* By definition, OOB data pairing will have MITM protection */
- auth = req->auth_req | SMP_AUTH_MITM;
- } else if (req->auth_req & SMP_AUTH_BONDING) {
- /* We will attempt MITM for all Bonding attempts */
- auth = SMP_AUTH_BONDING | SMP_AUTH_MITM;
+ if (IS_ERR_OR_NULL(tfm)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
}
- /* We didn't start the pairing, so no requirements */
- build_pairing_cmd(conn, req, &rsp, auth);
-
- key_size = min(req->max_key_size, rsp.max_key_size);
- if (check_enc_key_size(conn, key_size))
- return SMP_ENC_KEY_SIZE;
-
- ret = smp_rand(hcon->prnd);
- if (ret)
- return SMP_UNSPECIFIED;
-
- /* Request setup of TK */
- ret = tk_request(conn, req->oob_flag, auth, rsp.io_capability,
- req->io_capability);
- if (ret)
- return SMP_UNSPECIFIED;
-
- hcon->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&hcon->prsp[1], &rsp, sizeof(rsp));
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
-
- mod_timer(&hcon->smp_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT));
-
- return 0;
-}
-
-static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
-{
- struct hci_conn *hcon = conn->hcon;
- struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
- u8 key_size, auth = SMP_AUTH_NONE;
- int ret;
-
- BT_DBG("conn %p", conn);
-
- skb_pull(skb, sizeof(*rsp));
-
- req = (void *) &hcon->preq[1];
-
- key_size = min(req->max_key_size, rsp->max_key_size);
- if (check_enc_key_size(conn, key_size))
- return SMP_ENC_KEY_SIZE;
-
- hcon->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&hcon->prsp[1], rsp, sizeof(*rsp));
-
- ret = smp_rand(hcon->prnd);
- if (ret)
- return SMP_UNSPECIFIED;
-
- if ((req->auth_req & SMP_AUTH_BONDING) &&
- (rsp->auth_req & SMP_AUTH_BONDING))
- auth = SMP_AUTH_BONDING;
-
- auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
-
- ret = tk_request(conn, req->oob_flag, auth, rsp->io_capability,
- req->io_capability);
- if (ret)
- return SMP_UNSPECIFIED;
-
- hcon->cfm_pending = TRUE;
-
- /* Can't compose response until we have been confirmed */
- if (!hcon->tk_valid)
- return 0;
-
- ret = send_pairing_confirm(conn);
- if (ret)
- return SMP_CONFIRM_FAILED;
-
- return 0;
-}
-
-static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
-{
- struct hci_conn *hcon = conn->hcon;
- int ret;
-
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
- memcpy(hcon->pcnf, skb->data, sizeof(hcon->pcnf));
- skb_pull(skb, sizeof(hcon->pcnf));
-
- if (conn->hcon->out) {
- u8 random[16];
-
- swap128(hcon->prnd, random);
- smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
- random);
- } else if (hcon->tk_valid) {
- ret = send_pairing_confirm(conn);
-
- if (ret)
- return SMP_CONFIRM_FAILED;
- } else
- hcon->cfm_pending = TRUE;
-
-
- mod_timer(&hcon->smp_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT));
-
- return 0;
-}
-
-static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
-{
- struct hci_conn *hcon = conn->hcon;
- struct crypto_blkcipher *tfm = hcon->hdev->tfm;
- int ret;
- u8 key[16], res[16], random[16], confirm[16];
-
- swap128(skb->data, random);
- skb_pull(skb, sizeof(random));
-
- if (conn->hcon->out)
- ret = smp_c1(tfm, hcon->tk, random, hcon->preq, hcon->prsp, 0,
- conn->src, hcon->dst_type, conn->dst,
- res);
+ if (hcon->out)
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
+ conn->src, hcon->dst_type, conn->dst, res);
else
- ret = smp_c1(tfm, hcon->tk, random, hcon->preq, hcon->prsp,
- hcon->dst_type, conn->dst, 0, conn->src,
- res);
- if (ret)
- return SMP_UNSPECIFIED;
-
- BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ hcon->dst_type, conn->dst, 0, conn->src, res);
+ if (ret) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
swap128(res, confirm);
- if (memcmp(hcon->pcnf, confirm, sizeof(hcon->pcnf)) != 0) {
+ if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
BT_ERR("Pairing failed (confirmation values mismatch)");
- return SMP_CONFIRM_FAILED;
+ reason = SMP_CONFIRM_FAILED;
+ goto error;
}
- if (conn->hcon->out) {
+ if (hcon->out) {
u8 stk[16], rand[8];
__le16 ediv;
memset(rand, 0, sizeof(rand));
ediv = 0;
- smp_s1(tfm, hcon->tk, random, hcon->prnd, key);
+ smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
swap128(key, stk);
- memset(stk + hcon->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - hcon->smp_key_size);
+ memset(stk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
hci_le_start_enc(hcon, ediv, rand, stk);
- hcon->enc_key_size = hcon->smp_key_size;
+ hcon->enc_key_size = smp->enc_key_size;
} else {
u8 stk[16], r[16], rand[8];
__le16 ediv;
@@ -641,159 +470,336 @@
memset(rand, 0, sizeof(rand));
ediv = 0;
- swap128(hcon->prnd, r);
+ swap128(smp->prnd, r);
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
- smp_s1(tfm, hcon->tk, hcon->prnd, random, key);
+ smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
swap128(key, stk);
- memset(stk + hcon->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - hcon->smp_key_size);
+ memset(stk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
- hci_add_ltk(conn->hcon->hdev, 0, conn->dst, hcon->dst_type,
- hcon->smp_key_size, hcon->auth, ediv, rand, stk);
+ hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
+ HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
+ ediv, rand);
}
- return 0;
+ return;
+
+error:
+ smp_failure(conn, reason, 1);
}
-static int smp_encrypt_link(struct hci_conn *hcon, struct link_key *key)
+static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
{
- struct key_master_id *master;
- u8 sec_level;
- u8 zerobuf[8];
+ struct smp_chan *smp;
- if (!hcon || !key || !key->data)
- return -EINVAL;
+ smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
+ if (!smp)
+ return NULL;
- memset(zerobuf, 0, sizeof(zerobuf));
+ INIT_WORK(&smp->confirm, confirm_work);
+ INIT_WORK(&smp->random, random_work);
- master = (void *) key->data;
+ smp->conn = conn;
+ conn->smp_chan = smp;
+ conn->hcon->smp_conn = conn;
- if (!master->ediv && !memcmp(master->rand, zerobuf, sizeof(zerobuf)))
- return -EINVAL;
+ hci_conn_hold(conn->hcon);
- hcon->enc_key_size = key->pin_len;
- hcon->sec_req = TRUE;
- sec_level = authreq_to_seclevel(key->auth);
+ return smp;
+}
- BT_DBG("cur %d, req: %d", hcon->sec_level, sec_level);
+void smp_chan_destroy(struct l2cap_conn *conn)
+{
+ struct smp_chan *smp = conn->smp_chan;
- if (sec_level > hcon->sec_level)
- hcon->pending_sec_level = sec_level;
+ BUG_ON(!smp);
+ if (smp->tfm)
+ crypto_free_blkcipher(smp->tfm);
- if (!(hcon->link_mode & HCI_LM_ENCRYPT))
- hci_conn_hold(hcon);
+ kfree(smp);
+ conn->smp_chan = NULL;
+ conn->hcon->smp_conn = NULL;
+ hci_conn_drop(conn->hcon);
+}
- hci_le_start_enc(hcon, master->ediv, master->rand, key->val);
+int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
+{
+ struct l2cap_conn *conn = hcon->smp_conn;
+ struct smp_chan *smp;
+ u32 value;
+ u8 key[16];
+
+ BT_DBG("");
+
+ if (!conn)
+ return -ENOTCONN;
+
+ smp = conn->smp_chan;
+
+ switch (mgmt_op) {
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ value = le32_to_cpu(passkey);
+ memset(key, 0, sizeof(key));
+ BT_DBG("PassKey: %d", value);
+ put_unaligned_le32(value, key);
+ swap128(key, smp->tk);
+ /* Fall Through */
+ case MGMT_OP_USER_CONFIRM_REPLY:
+ set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+ break;
+ case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
+ return 0;
+ default:
+ smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
+ return -EOPNOTSUPP;
+ }
+
+ /* If it is our turn to send Pairing Confirm, do so now */
+ if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags))
+ queue_work(hcon->hdev->workqueue, &smp->confirm);
return 0;
}
-static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct hci_conn *hcon = conn->hcon;
- struct smp_cmd_security_req *rp = (void *) skb->data;
- struct smp_cmd_pairing cp;
- struct link_key *key;
+ struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ struct smp_chan *smp;
+ u8 key_size;
+ u8 auth = SMP_AUTH_NONE;
+ int ret;
BT_DBG("conn %p", conn);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+ if (conn->hcon->link_mode & HCI_LM_MASTER)
+ return SMP_CMD_NOTSUPP;
+
+ if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+ smp = smp_chan_create(conn);
+ else
+ smp = conn->smp_chan;
+
+ if (!smp)
+ return SMP_UNSPECIFIED;
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], req, sizeof(*req));
+ skb_pull(skb, sizeof(*req));
+
+ /* We didn't start the pairing, so match remote */
+ if (req->auth_req & SMP_AUTH_BONDING)
+ auth = req->auth_req;
+
+ conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
+
+ build_pairing_cmd(conn, req, &rsp, auth);
+
+ key_size = min(req->max_key_size, rsp.max_key_size);
+ if (check_enc_key_size(conn, key_size))
+ return SMP_ENC_KEY_SIZE;
+
+ ret = smp_rand(smp->prnd);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
+
+ /* Request setup of TK */
+ ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ return 0;
+}
+
+static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ u8 key_size, auth = SMP_AUTH_NONE;
+ int ret;
+
+ BT_DBG("conn %p", conn);
+
+ if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+ return SMP_CMD_NOTSUPP;
+
+ skb_pull(skb, sizeof(*rsp));
+
+ req = (void *) &smp->preq[1];
+
+ key_size = min(req->max_key_size, rsp->max_key_size);
+ if (check_enc_key_size(conn, key_size))
+ return SMP_ENC_KEY_SIZE;
+
+ ret = smp_rand(smp->prnd);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
+
+ if ((req->auth_req & SMP_AUTH_BONDING) &&
+ (rsp->auth_req & SMP_AUTH_BONDING))
+ auth = SMP_AUTH_BONDING;
+
+ auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
+
+ ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+
+ /* Can't compose response until we have been confirmed */
+ if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
return 0;
- key = hci_find_link_key_type(hcon->hdev, conn->dst, KEY_TYPE_LTK);
- if (key && ((key->auth & SMP_AUTH_MITM) ||
- !(rp->auth_req & SMP_AUTH_MITM))) {
+ queue_work(hdev->workqueue, &smp->confirm);
- if (smp_encrypt_link(hcon, key) < 0)
- goto invalid_key;
+ return 0;
+}
- return 0;
+static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+
+ BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+ memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
+ skb_pull(skb, sizeof(smp->pcnf));
+
+ if (conn->hcon->out) {
+ u8 random[16];
+
+ swap128(smp->prnd, random);
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
+ random);
+ } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
+ queue_work(hdev->workqueue, &smp->confirm);
+ } else {
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
}
-invalid_key:
- hcon->sec_req = FALSE;
+ return 0;
+}
- /* Switch to Pairing Connection Parameters */
- hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL, SMP_MAX_CONN_INTERVAL,
- SMP_MAX_CONN_LATENCY, SMP_SUPERVISION_TIMEOUT);
+static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ swap128(skb->data, smp->rrnd);
+ skb_pull(skb, sizeof(smp->rrnd));
+
+ queue_work(hdev->workqueue, &smp->random);
+
+ return 0;
+}
+
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
+{
+ struct smp_ltk *key;
+ struct hci_conn *hcon = conn->hcon;
+
+ key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
+ if (!key)
+ return 0;
+
+ if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+ return 0;
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
+ return 1;
+
+ hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
+ hcon->enc_key_size = key->enc_size;
+
+ return 1;
+
+}
+static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_security_req *rp = (void *) skb->data;
+ struct smp_cmd_pairing cp;
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp;
+
+ BT_DBG("conn %p", conn);
+
+ hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
+
+ if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+ return 0;
+
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ return 0;
+
+ smp = smp_chan_create(conn);
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
- hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
- hcon->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&hcon->preq[1], &cp, sizeof(cp));
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- mod_timer(&hcon->smp_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT));
-
- set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
-
- hci_conn_hold(hcon);
-
return 0;
}
-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
{
- struct hci_conn *hcon = conn->hcon;
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct smp_chan *smp = conn->smp_chan;
__u8 authreq;
- BT_DBG("conn %p hcon %p %d req: %d",
- conn, hcon, hcon->sec_level, sec_level);
+ BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
- if (IS_ERR(hcon->hdev->tfm))
+ if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
return 1;
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
- return -EINPROGRESS;
-
if (sec_level == BT_SECURITY_LOW)
return 1;
-
if (hcon->sec_level >= sec_level)
return 1;
+ if (hcon->link_mode & HCI_LM_MASTER)
+ if (smp_ltk_encrypt(conn, sec_level))
+ goto done;
+
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ return 0;
+
+ smp = smp_chan_create(conn);
+ if (!smp)
+ return 1;
+
authreq = seclevel_to_authreq(sec_level);
- hcon->smp_conn = conn;
- hcon->pending_sec_level = sec_level;
- if (hcon->link_mode & HCI_LM_MASTER) {
- struct link_key *key;
-
- key = hci_find_link_key_type(hcon->hdev, conn->dst,
- KEY_TYPE_LTK);
-
- if (smp_encrypt_link(hcon, key) == 0)
- goto done;
- }
-
- hcon->sec_req = FALSE;
-
if (hcon->link_mode & HCI_LM_MASTER) {
struct smp_cmd_pairing cp;
- /* Switch to Pairing Connection Parameters */
- hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL,
- SMP_MAX_CONN_INTERVAL, SMP_MAX_CONN_LATENCY,
- SMP_SUPERVISION_TIMEOUT);
-
build_pairing_cmd(conn, &cp, NULL, authreq);
- hcon->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&hcon->preq[1], &cp, sizeof(cp));
-
- mod_timer(&hcon->smp_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- hci_conn_hold(hcon);
} else {
struct smp_cmd_security_req cp;
cp.auth_req = authreq;
@@ -801,95 +807,80 @@
}
done:
- set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+ hcon->pending_sec_level = sec_level;
return 0;
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct hci_conn *hcon = conn->hcon;
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
- u8 rand[8];
- int err;
+ struct smp_chan *smp = conn->smp_chan;
skb_pull(skb, sizeof(*rp));
- BT_DBG("conn %p", conn);
-
- memset(rand, 0, sizeof(rand));
-
- err = hci_add_ltk(hcon->hdev, 0, conn->dst, hcon->dst_type,
- 0, 0, 0, rand, rp->ltk);
- if (err)
- return SMP_UNSPECIFIED;
+ memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
return 0;
}
static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct hci_conn *hcon = conn->hcon;
struct smp_cmd_master_ident *rp = (void *) skb->data;
- struct smp_cmd_pairing *paircmd = (void *) &hcon->prsp[1];
- struct link_key *key;
- u8 *keydist;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+ u8 authenticated;
skb_pull(skb, sizeof(*rp));
- key = hci_find_link_key_type(hcon->hdev, conn->dst, KEY_TYPE_LTK);
- if (key == NULL)
- return SMP_UNSPECIFIED;
-
- if (hcon->out)
- keydist = &paircmd->resp_key_dist;
- else
- keydist = &paircmd->init_key_dist;
-
- BT_DBG("keydist 0x%x", *keydist);
-
- hci_add_ltk(hcon->hdev, 1, conn->dst, hcon->dst_type,
- hcon->smp_key_size, hcon->auth, rp->ediv,
- rp->rand, key->val);
-
- *keydist &= ~SMP_DIST_ENC_KEY;
- if (hcon->out) {
- if (!(*keydist))
- smp_distribute_keys(conn, 1);
- }
+ hci_dev_lock(hdev);
+ authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
+ hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+ HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
+ rp->ediv, rp->rand);
+ smp_distribute_keys(conn, 1);
+ hci_dev_unlock(hdev);
return 0;
}
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct hci_conn *hcon = conn->hcon;
__u8 code = skb->data[0];
__u8 reason;
int err = 0;
- if (IS_ERR(hcon->hdev->tfm)) {
- err = PTR_ERR(hcon->hdev->tfm);
+ if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) {
+ err = -ENOTSUPP;
reason = SMP_PAIRING_NOTSUPP;
- BT_ERR("SMP_PAIRING_NOTSUPP %p", hcon->hdev->tfm);
goto done;
}
- hcon->smp_conn = conn;
skb_pull(skb, sizeof(code));
+ /*
+ * The SMP context must be initialized for all other PDUs except
+ * pairing and security requests. If we get any other PDU when
+ * not initialized simply disconnect (done if this function
+ * returns an error).
+ */
+ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+ !conn->smp_chan) {
+ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+ kfree_skb(skb);
+ return -ENOTSUPP;
+ }
+
switch (code) {
case SMP_CMD_PAIRING_REQ:
reason = smp_cmd_pairing_req(conn, skb);
break;
case SMP_CMD_PAIRING_FAIL:
+ smp_failure(conn, skb->data[0], 0);
reason = 0;
err = -EPERM;
- del_timer(&hcon->smp_timer);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
- mgmt_auth_failed(hcon->hdev->id, conn->dst, skb->data[0]);
- hci_conn_put(hcon);
break;
case SMP_CMD_PAIRING_RSP:
@@ -932,40 +923,33 @@
}
done:
- if (reason) {
- BT_ERR("SMP_CMD_PAIRING_FAIL: %d", reason);
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
- &reason);
- del_timer(&hcon->smp_timer);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
- mgmt_auth_failed(hcon->hdev->id, conn->dst, reason);
- hci_conn_put(hcon);
- }
+ if (reason)
+ smp_failure(conn, reason, 1);
kfree_skb(skb);
return err;
}
-static int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
+int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
{
- struct hci_conn *hcon = conn->hcon;
struct smp_cmd_pairing *req, *rsp;
+ struct smp_chan *smp = conn->smp_chan;
__u8 *keydist;
BT_DBG("conn %p force %d", conn, force);
- if (IS_ERR(hcon->hdev->tfm))
- return PTR_ERR(hcon->hdev->tfm);
-
- rsp = (void *) &hcon->prsp[1];
-
- /* The responder sends its keys first */
- if (!force && hcon->out && (rsp->resp_key_dist & 0x07))
+ if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
return 0;
- req = (void *) &hcon->preq[1];
+ rsp = (void *) &smp->prsp[1];
- if (hcon->out) {
+ /* The responder sends its keys first */
+ if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
+ return 0;
+
+ req = (void *) &smp->preq[1];
+
+ if (conn->hcon->out) {
keydist = &rsp->init_key_dist;
*keydist &= req->init_key_dist;
} else {
@@ -979,6 +963,8 @@
if (*keydist & SMP_DIST_ENC_KEY) {
struct smp_cmd_encrypt_info enc;
struct smp_cmd_master_ident ident;
+ struct hci_conn *hcon = conn->hcon;
+ u8 authenticated;
__le16 ediv;
get_random_bytes(enc.ltk, sizeof(enc.ltk));
@@ -987,11 +973,12 @@
smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
- hci_add_ltk(hcon->hdev, 1, conn->dst, hcon->dst_type,
- hcon->smp_key_size, hcon->auth, ediv,
- ident.rand, enc.ltk);
+ authenticated = hcon->sec_level == BT_SECURITY_HIGH;
+ hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+ HCI_SMP_LTK_SLAVE, 1, authenticated,
+ enc.ltk, smp->enc_key_size, ediv, ident.rand);
- ident.ediv = cpu_to_le16(ediv);
+ ident.ediv = ediv;
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
@@ -1028,63 +1015,11 @@
*keydist &= ~SMP_DIST_SIGN;
}
- if (hcon->out) {
- if (hcon->disconn_cfm_cb)
- hcon->disconn_cfm_cb(hcon, 0);
- del_timer(&hcon->smp_timer);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
- hci_conn_put(hcon);
- } else if (rsp->resp_key_dist) {
- if (hcon->disconn_cfm_cb)
- hcon->disconn_cfm_cb(hcon, SMP_UNSPECIFIED);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
- mgmt_auth_failed(hcon->hdev->id, conn->dst, SMP_UNSPECIFIED);
- hci_conn_put(hcon);
+ if (conn->hcon->out || force) {
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
+ cancel_delayed_work_sync(&conn->security_timer);
+ smp_chan_destroy(conn);
}
return 0;
}
-
-void smp_conn_security_fail(struct l2cap_conn *conn, u8 code, u8 reason)
-{
- BT_DBG("smp: %d %d ", code, reason);
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-}
-
-int smp_link_encrypt_cmplt(struct l2cap_conn *conn, u8 status, u8 encrypt)
-{
- struct hci_conn *hcon = conn->hcon;
-
- BT_DBG("smp: %d %d %d", status, encrypt, hcon->sec_req);
-
- clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
-
- if (!status && encrypt && hcon->sec_level < hcon->pending_sec_level)
- hcon->sec_level = hcon->pending_sec_level;
-
- if (!status && encrypt && !hcon->sec_req)
- return smp_distribute_keys(conn, 0);
-
- /* Fall back to Pairing request if failed a Link Security request */
- else if (hcon->sec_req && (status || !encrypt))
- smp_conn_security(conn, hcon->pending_sec_level);
-
- hci_conn_put(hcon);
-
- return 0;
-}
-
-void smp_timeout(unsigned long arg)
-{
- struct l2cap_conn *conn = (void *) arg;
- u8 reason = SMP_UNSPECIFIED;
-
- BT_DBG("%p", conn);
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
- mgmt_auth_failed(conn->hcon->hdev->id, conn->dst, SMP_UNSPECIFIED);
- hci_conn_put(conn->hcon);
- //delete the l2cap connection
- l2cap_conn_del(conn->hcon, EACCES, 0);
-}