Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 144a8c8..117d3bf 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -598,6 +598,24 @@
# LAST -- dummy/emulated controller
#
+config USB_GADGET_MSM_72K
+ boolean "MSM 72K Device Controller"
+ depends on ARCH_MSM
+ select USB_GADGET_SELECTED
+ select USB_GADGET_DUALSPEED
+ help
+ USB gadget driver for Qualcomm MSM 72K architecture.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "msm72k" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_MSM_72K
+ tristate
+ depends on USB_GADGET_MSM_72K
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
config USB_GADGET_DUMMY_HCD
boolean "Dummy HCD (DEVELOPMENT)"
depends on USB=y || (USB=m && USB_GADGET=m)
@@ -1064,4 +1082,84 @@
endchoice
+config USB_CSW_HACK
+ boolean "USB Mass storage csw hack Feature"
+ default y
+ help
+ This csw hack feature is for increasing the performance of the mass
+ storage
+
+config MODEM_SUPPORT
+ boolean "modem support in generic serial function driver"
+ depends on USB_G_ANDROID
+ default y
+ help
+ This feature enables the modem functionality in the
+ generic serial.
+ adds interrupt endpoint support to send modem notifications
+ to host.
+ adds CDC descriptors to enumerate the generic serial as MODEM.
+ adds CDC class requests to configure MODEM line settings.
+ Say "y" to enable MODEM support in the generic serial driver.
+
+config RMNET_SMD_CTL_CHANNEL
+ string "RMNET control SMD channel name"
+ depends on USB_G_ANDROID && MSM_SMD
+ default ""
+ help
+ Control SMD channel for transferring QMI messages
+
+config RMNET_SMD_DATA_CHANNEL
+ string "RMNET Data SMD channel name"
+ depends on USB_G_ANDROID && MSM_SMD
+ default ""
+ help
+ Data SMD channel for transferring network data
+
+config RMNET_SDIO_CTL_CHANNEL
+ int "RMNET control SDIO channel id"
+ default 8
+ depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+ help
+ Control SDIO channel for transferring RMNET QMI messages
+
+config RMNET_SDIO_DATA_CHANNEL
+ int "RMNET Data SDIO channel id"
+ default 8
+ depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+ help
+ Data SDIO channel for transferring network data
+
+config RMNET_SMD_SDIO_CTL_CHANNEL
+ int "RMNET(sdio_smd) Control SDIO channel id"
+ depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+ default 8
+ help
+ Control SDIO channel for transferring QMI messages
+
+config RMNET_SMD_SDIO_DATA_CHANNEL
+ int "RMNET(sdio_smd) Data SDIO channel id"
+ default 8
+ depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+ help
+ Data SDIO channel for transferring network data
+
+config RMNET_SDIO_SMD_DATA_CHANNEL
+ string "RMNET(sdio_smd) Data SMD channel name"
+ depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+ default "DATA40"
+ help
+ Data SMD channel for transferring network data
+
+config USB_ANDROID_RMNET_CTRL_SMD
+ boolean "RmNet(BAM) control over SMD driver"
+ depends on MSM_SMD
+ help
+ Enabling this option adds rmnet control over SMD
+ support to the android gadget. Rmnet is an
+ alternative to CDC-ECM and Windows RNDIS.
+ It uses QUALCOMM MSM Interface for control
+ transfers. This option enables only control interface.
+ Data interface used is BAM.
+
endif # USB_GADGET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index ab17a4c..064960c 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -29,6 +29,7 @@
mv_udc-y := mv_udc_core.o mv_udc_phy.o
obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
+obj-$(CONFIG_USB_MSM_72K) += msm72k_udc.o
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index b13633b..8146af7 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -30,6 +30,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/android.h>
#include "gadget_chips.h"
@@ -45,9 +46,10 @@
#include "epautoconf.c"
#include "composite.c"
+#include "f_diag.c"
#include "f_mass_storage.c"
-#include "u_serial.c"
-#include "f_acm.c"
+//#include "u_serial.c"
+//#include "f_acm.c"
#include "f_adb.c"
#include "f_mtp.c"
#include "f_accessory.c"
@@ -99,6 +101,7 @@
struct list_head enabled_functions;
struct usb_composite_dev *cdev;
struct device *dev;
+ struct android_usb_platform_data *pdata;
bool enabled;
bool connected;
@@ -187,6 +190,68 @@
/*-------------------------------------------------------------------------*/
/* Supported functions initialization */
+char diag_clients[32]; /* enabled DIAG clients - "diag[,diag_mdm]" */
+static ssize_t clients_store(
+ struct device *device, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ strncpy(diag_clients, buff, sizeof(diag_clients));
+
+ return size;
+}
+
+static DEVICE_ATTR(clients, S_IWUSR, NULL, clients_store);
+static struct device_attribute *diag_function_attributes[] =
+ { &dev_attr_clients, NULL };
+
+static int diag_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return diag_setup();
+}
+
+static void diag_function_cleanup(struct android_usb_function *f)
+{
+ diag_cleanup();
+}
+
+static int diag_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ char *name;
+ char buf[32], *b;
+ int once = 0, err = -1;
+ int (*notify)(uint32_t, const char *);
+
+ strncpy(buf, diag_clients, sizeof(buf));
+ b = strim(buf);
+
+ while (b) {
+ name = strsep(&b, ",");
+ /* Allow only first diag channel to update pid and serial no */
+ if (!once++)
+ notify = _android_dev->pdata->update_pid_and_serial_num;
+ else
+ notify = NULL;
+
+ if (name) {
+ err = diag_function_add(c, name, notify);
+ if (err)
+ pr_err("diag: Cannot open channel '%s'", name);
+ }
+ }
+
+ return err;
+}
+
+static struct android_usb_function diag_function = {
+ .name = "diag",
+ .init = diag_function_init,
+ .cleanup = diag_function_cleanup,
+ .bind_config = diag_function_bind_config,
+ .attributes = diag_function_attributes,
+};
+
static int adb_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
{
return adb_setup();
@@ -209,7 +274,7 @@
.bind_config = adb_function_bind_config,
};
-
+#if 0
#define MAX_ACM_INSTANCES 4
struct acm_function_config {
int instances;
@@ -280,7 +345,7 @@
.bind_config = acm_function_bind_config,
.attributes = acm_function_attributes,
};
-
+#endif
static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
{
@@ -644,8 +709,9 @@
static struct android_usb_function *supported_functions[] = {
+ &diag_function,
&adb_function,
- &acm_function,
+// &acm_function,
&mtp_function,
&ptp_function,
&rndis_function,
@@ -1104,6 +1170,19 @@
return 0;
}
+static int __devinit android_probe(struct platform_device *pdev)
+{
+ struct android_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct android_dev *dev = _android_dev;
+
+ dev->pdata = pdata;
+
+ return 0;
+}
+
+static struct platform_driver android_platform_driver = {
+ .driver = { .name = "android_usb"},
+};
static int __init init(void)
{
@@ -1135,6 +1214,8 @@
composite_driver.setup = android_setup;
composite_driver.disconnect = android_disconnect;
+ platform_driver_probe(&android_platform_driver, android_probe);
+
return usb_composite_probe(&android_usb_driver, android_bind);
}
module_init(init);
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 139ac94..8a75420 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,11 +9,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
*/
#include <linux/module.h>
@@ -64,7 +59,8 @@
.flags = CI13XXX_REGS_SHARED |
CI13XXX_REQUIRE_TRANSCEIVER |
CI13XXX_PULLUP_ON_VBUS |
- CI13XXX_DISABLE_STREAMING,
+ CI13XXX_DISABLE_STREAMING |
+ CI13XXX_ZERO_ITC,
.notify_event = ci13xxx_msm_notify_event,
};
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index baaf87e..9a03ca7 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -318,6 +318,17 @@
hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */
+ /*
+ * ITC (Interrupt Threshold Control) field is to set the maximum
+ * rate at which the device controller will issue interrupts.
+ * The maximum interrupt interval measured in micro frames.
+ * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
+ * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
+ * can be set to lesser value to gain performance.
+ */
+ if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
+ hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
+
if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
pr_err("cannot enter in device mode");
pr_err("lpm = %i", hw_bank.lpm);
@@ -417,6 +428,10 @@
data |= ENDPTCTRL_RXE;
}
hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+
+ /* make sure endpoint is enabled before returning */
+ mb();
+
return 0;
}
@@ -1219,7 +1234,7 @@
{
struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
unsigned long flags;
- u32 dump[512];
+ u32 *dump;
unsigned i, k, n = 0;
dbg_trace("[%s] %p\n", __func__, buf);
@@ -1227,9 +1242,12 @@
dev_err(dev, "[%s] EINVAL\n", __func__);
return 0;
}
+ dump = kmalloc(2048, GFP_KERNEL);
+ if (dump == NULL)
+ return -ENOMEM;
spin_lock_irqsave(udc->lock, flags);
- k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
+ k = hw_register_read(dump, 512);
spin_unlock_irqrestore(udc->lock, flags);
for (i = 0; i < k; i++) {
@@ -1237,7 +1255,7 @@
"reg[0x%04X] = 0x%08X\n",
i * (unsigned)sizeof(u32), dump[i]);
}
-
+ kfree(dump);
return n;
}
@@ -1317,6 +1335,42 @@
}
static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ trace();
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!udc->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ dbg_trace("remote wakeup feature is not enabled\n");
+ goto out;
+ }
+ if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+ ret = -EINVAL;
+ dbg_trace("port is not suspended\n");
+ goto out;
+ }
+ hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+ spin_unlock_irqrestore(udc->lock, flags);
+ return ret;
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+
+ ci13xxx_wakeup(&udc->gadget);
+
+ return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+
/**
* dbg_create_files: initializes the attribute interface
* @dev: device
@@ -1353,8 +1407,13 @@
retval = device_create_file(dev, &dev_attr_requests);
if (retval)
goto rm_registers;
+ retval = device_create_file(dev, &dev_attr_wakeup);
+ if (retval)
+ goto rm_remote_wakeup;
return 0;
+rm_remote_wakeup:
+ device_remove_file(dev, &dev_attr_wakeup);
rm_registers:
device_remove_file(dev, &dev_attr_registers);
rm_qheads:
@@ -1391,6 +1450,7 @@
device_remove_file(dev, &dev_attr_events);
device_remove_file(dev, &dev_attr_driver);
device_remove_file(dev, &dev_attr_device);
+ device_remove_file(dev, &dev_attr_wakeup);
return 0;
}
@@ -1619,6 +1679,7 @@
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->remote_wakeup = 0;
udc->suspended = 0;
+ udc->configured = 0;
spin_unlock_irqrestore(udc->lock, flags);
/* flush all endpoints */
@@ -1930,6 +1991,8 @@
do {
hw_test_and_set_setup_guard();
memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+ /* Ensure buffer is read before acknowledging to h/w */
+ mb();
} while (!hw_test_and_clear_setup_guard());
type = req.bRequestType;
@@ -1991,6 +2054,10 @@
break;
err = isr_setup_status_phase(udc);
break;
+ case USB_REQ_SET_CONFIGURATION:
+ if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
+ udc->configured = !!req.wValue;
+ goto delegate;
case USB_REQ_SET_FEATURE:
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
le16_to_cpu(req.wValue) ==
@@ -2104,12 +2171,15 @@
else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
mEp->qh.ptr->cap &= ~QH_MULT;
else
- mEp->qh.ptr->cap &= ~QH_ZLT;
+ mEp->qh.ptr->cap |= QH_ZLT;
mEp->qh.ptr->cap |=
(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
+ /* complete all the updates to ept->head before enabling endpoint*/
+ mb();
+
/*
* Enable endpoints in the HW other than ep0 as ep0
* is always enabled
@@ -2467,7 +2537,8 @@
if (is_active) {
pm_runtime_get_sync(&_gadget->dev);
hw_device_reset(udc);
- hw_device_state(udc->ep0out.qh.dma);
+ if (udc->softconnect)
+ hw_device_state(udc->ep0out.qh.dma);
} else {
hw_device_state(0);
if (udc->udc_driver->notify_event)
@@ -2481,31 +2552,6 @@
return 0;
}
-static int ci13xxx_wakeup(struct usb_gadget *_gadget)
-{
- struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
- unsigned long flags;
- int ret = 0;
-
- trace();
-
- spin_lock_irqsave(udc->lock, flags);
- if (!udc->remote_wakeup) {
- ret = -EOPNOTSUPP;
- dbg_trace("remote wakeup feature is not enabled\n");
- goto out;
- }
- if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
- ret = -EINVAL;
- dbg_trace("port is not suspended\n");
- goto out;
- }
- hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
-out:
- spin_unlock_irqrestore(udc->lock, flags);
- return ret;
-}
-
static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
{
struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
@@ -2515,6 +2561,32 @@
return -ENOTSUPP;
}
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->softconnect = is_active;
+ if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
+ !udc->vbus_active) || !udc->driver) {
+ spin_unlock_irqrestore(udc->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (is_active) {
+ hw_device_state(udc->ep0out.qh.dma);
+ } else {
+ hw_device_state(0);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_STOPPED_EVENT);
+ }
+ return 0;
+}
+
+
/**
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
@@ -2524,6 +2596,7 @@
.vbus_session = ci13xxx_vbus_session,
.wakeup = ci13xxx_wakeup,
.vbus_draw = ci13xxx_vbus_draw,
+ .pullup = ci13xxx_pullup,
};
/**
@@ -2627,6 +2700,7 @@
/* bind gadget */
driver->driver.bus = NULL;
udc->gadget.dev.driver = &driver->driver;
+ udc->softconnect = 1;
spin_unlock_irqrestore(udc->lock, flags);
retval = bind(&udc->gadget); /* MAY SLEEP */
@@ -2649,6 +2723,9 @@
}
}
+ if (!udc->softconnect)
+ goto done;
+
retval = hw_device_state(udc->ep0out.qh.dma);
if (retval)
pm_runtime_put_sync(&udc->gadget.dev);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 2370777..27af8aa 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -108,6 +108,7 @@
#define CI13XXX_REQUIRE_TRANSCEIVER BIT(1)
#define CI13XXX_PULLUP_ON_VBUS BIT(2)
#define CI13XXX_DISABLE_STREAMING BIT(3)
+#define CI13XXX_ZERO_ITC BIT(4)
#define CI13XXX_CONTROLLER_RESET_EVENT 0
#define CI13XXX_CONTROLLER_STOPPED_EVENT 1
@@ -131,11 +132,13 @@
u8 remote_wakeup; /* Is remote wakeup feature
enabled by the host? */
u8 suspended; /* suspended by the host */
+ u8 configured; /* is device configured */
u8 test_mode; /* the selected test mode */
struct usb_gadget_driver *driver; /* 3rd party gadget driver */
struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
int vbus_active; /* is VBUS active */
+ int softconnect; /* is pull-up enable allowed */
struct otg_transceiver *transceiver; /* Transceiver struct */
};
@@ -189,6 +192,8 @@
#define USBMODE_CM_HOST (0x03UL << 0)
#define USBMODE_SLOM BIT(3)
#define USBMODE_SDIS BIT(4)
+#define USBCMD_ITC(n) (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
+#define USBCMD_ITC_MASK (0xFF << 16)
/* ENDPTCTRL */
#define ENDPTCTRL_RXS BIT(0)
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 68b1a8e..3fd12b1 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -5,6 +5,7 @@
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
* Copyright (C) 2009 by Samsung Electronics
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
* Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
*
* This software is distributed under the terms of the GNU General
@@ -17,6 +18,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/usb/android_composite.h>
#include "u_serial.h"
#include "gadget_chips.h"
@@ -49,6 +51,7 @@
struct gserial port;
u8 ctrl_id, data_id;
u8 port_num;
+ enum transport_type transport;
u8 pending;
@@ -83,6 +86,17 @@
#define ACM_CTRL_DCD (1 << 0)
};
+static unsigned int no_tty_ports;
+static unsigned int no_sdio_ports;
+static unsigned int no_smd_ports;
+static unsigned int nr_ports;
+
+static struct port_info {
+ enum transport_type transport;
+ unsigned port_num;
+ unsigned client_port_num;
+} gacm_ports[GSERIAL_NO_PORTS];
+
static inline struct f_acm *func_to_acm(struct usb_function *f)
{
return container_of(f, struct f_acm, port.func);
@@ -93,6 +107,95 @@
return container_of(p, struct f_acm, port);
}
+static char *transport_to_str(enum transport_type t)
+{
+ switch (t) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ return "TTY";
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ return "SDIO";
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ return "SMD";
+ }
+
+ return "NONE";
+}
+
+static int gport_setup(struct usb_configuration *c)
+{
+ int ret = 0;
+
+ pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n",
+ __func__, no_tty_ports, no_sdio_ports, nr_ports);
+
+ if (no_tty_ports)
+ ret = gserial_setup(c->cdev->gadget, no_tty_ports);
+ if (no_sdio_ports)
+ ret = gsdio_setup(c->cdev->gadget, no_sdio_ports);
+ if (no_smd_ports)
+ ret = gsmd_setup(c->cdev->gadget, no_smd_ports);
+
+ return ret;
+}
+
+static int gport_connect(struct f_acm *acm)
+{
+ unsigned port_num;
+
+ port_num = gacm_ports[acm->port_num].client_port_num;
+
+
+ pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_port_no:%d\n",
+ __func__, transport_to_str(acm->transport),
+ acm, &acm->port, acm->port_num, port_num);
+
+ switch (acm->transport) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ gserial_connect(&acm->port, port_num);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ gsdio_connect(&acm->port, port_num);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ gsmd_connect(&acm->port, port_num);
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ transport_to_str(acm->transport));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int gport_disconnect(struct f_acm *acm)
+{
+ unsigned port_num;
+
+ port_num = gacm_ports[acm->port_num].client_port_num;
+
+ pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_pno:%d\n",
+ __func__, transport_to_str(acm->transport),
+ acm, &acm->port, acm->port_num, port_num);
+
+ switch (acm->transport) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ gserial_disconnect(&acm->port);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ gsdio_disconnect(&acm->port, port_num);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ gsmd_disconnect(&acm->port, port_num);
+ break;
+ default:
+ pr_err("%s: Un-supported transport:%s\n", __func__,
+ transport_to_str(acm->transport));
+ return -ENODEV;
+ }
+
+ return 0;
+}
/*-------------------------------------------------------------------------*/
/* notification endpoint uses smallish and infrequent fixed-size messages */
@@ -333,8 +436,7 @@
/* SET_LINE_CODING ... just read and save what the host sends */
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_LINE_CODING:
- if (w_length != sizeof(struct usb_cdc_line_coding)
- || w_index != acm->ctrl_id)
+ if (w_length != sizeof(struct usb_cdc_line_coding))
goto invalid;
value = w_length;
@@ -345,8 +447,6 @@
/* GET_LINE_CODING ... return what host sent, or initial value */
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_GET_LINE_CODING:
- if (w_index != acm->ctrl_id)
- goto invalid;
value = min_t(unsigned, w_length,
sizeof(struct usb_cdc_line_coding));
@@ -356,9 +456,6 @@
/* SET_CONTROL_LINE_STATE ... save what the host sent */
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
- if (w_index != acm->ctrl_id)
- goto invalid;
-
value = 0;
/* FIXME we should not allow data to flow until the
@@ -366,6 +463,12 @@
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
+ if (acm->port.notify_modem) {
+ unsigned port_num =
+ gacm_ports[acm->port_num].client_port_num;
+
+ acm->port.notify_modem(&acm->port, port_num, w_value);
+ }
break;
default:
@@ -415,7 +518,7 @@
} else if (intf == acm->data_id) {
if (acm->port.in->driver_data) {
DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
- gserial_disconnect(&acm->port);
+ gport_disconnect(acm);
} else {
DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
}
@@ -423,7 +526,7 @@
acm->hs.in, acm->fs.in);
acm->port.out_desc = ep_choose(cdev->gadget,
acm->hs.out, acm->fs.out);
- gserial_connect(&acm->port, acm->port_num);
+ gport_connect(acm);
} else
return -EINVAL;
@@ -437,7 +540,7 @@
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
- gserial_disconnect(&acm->port);
+ gport_disconnect(acm);
usb_ep_disable(acm->notify);
acm->notify->driver_data = NULL;
}
@@ -568,6 +671,15 @@
return acm_notify_serial_state(acm);
}
+static int acm_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state = ctrl_bits;
+
+ return acm_notify_serial_state(acm);
+}
+
/*-------------------------------------------------------------------------*/
/* ACM function driver setup/binding */
@@ -764,12 +876,14 @@
spin_lock_init(&acm->lock);
acm->port_num = port_num;
+ acm->transport = gacm_ports[port_num].transport;
acm->port.connect = acm_connect;
acm->port.disconnect = acm_disconnect;
acm->port.send_break = acm_send_break;
+ acm->port.send_modem_ctrl_bits = acm_send_modem_ctrl_bits;
- acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num);
+ acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num + 1);
if (!acm->port.func.name) {
kfree(acm);
return -ENOMEM;
@@ -787,3 +901,117 @@
kfree(acm);
return status;
}
+
+#ifdef CONFIG_USB_ANDROID_ACM
+#include <linux/platform_device.h>
+
+static struct acm_platform_data *acm_pdata;
+
+static int acm_probe(struct platform_device *pdev)
+{
+ acm_pdata = pdev->dev.platform_data;
+ return 0;
+}
+
+static struct platform_driver acm_platform_driver = {
+ .driver = { .name = "acm", },
+ .probe = acm_probe,
+};
+
+int acm1_function_bind_config(struct usb_configuration *c)
+{
+ int ret = acm_bind_config(c, 0);
+ if (ret == 0)
+ gport_setup(c);
+ return ret;
+}
+
+int acm2_function_bind_config(struct usb_configuration *c)
+{
+ int ret = acm_bind_config(c, 1);
+
+ return ret;
+}
+
+static struct android_usb_function acm1_function = {
+ .name = "acm1",
+ .bind_config = acm1_function_bind_config,
+};
+
+static struct android_usb_function acm2_function = {
+ .name = "acm2",
+ .bind_config = acm2_function_bind_config,
+};
+
+static int facm_remove(struct platform_device *pdev)
+{
+ gserial_cleanup();
+
+ return 0;
+}
+
+static struct platform_driver usb_facm = {
+ .remove = facm_remove,
+ .driver = {
+ .name = "usb_facm",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init facm_probe(struct platform_device *pdev)
+{
+ struct usb_gadget_facm_pdata *pdata = pdev->dev.platform_data;
+ int i;
+
+ dev_dbg(&pdev->dev, "%s: probe\n", __func__);
+
+ if (!pdata)
+ goto probe_android_register;
+
+ for (i = 0; i < GSERIAL_NO_PORTS; i++) {
+ gacm_ports[i].transport = pdata->transport[i];
+ gacm_ports[i].port_num = i;
+
+ switch (gacm_ports[i].transport) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ gacm_ports[i].client_port_num = no_tty_ports;
+ no_tty_ports++;
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ gacm_ports[i].client_port_num = no_sdio_ports;
+ no_sdio_ports++;
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ gacm_ports[i].client_port_num = no_smd_ports;
+ no_smd_ports++;
+ break;
+ default:
+ pr_err("%s: Un-supported transport transport: %u\n",
+ __func__, gacm_ports[i].transport);
+ return -ENODEV;
+ }
+
+ nr_ports++;
+ }
+
+ pr_info("%s:gport:tty_ports:%u sdio_ports:%u "
+ "smd_ports:%u nr_ports:%u\n",
+ __func__, no_tty_ports, no_sdio_ports,
+ no_smd_ports, nr_ports);
+
+probe_android_register:
+ android_register_function(&acm1_function);
+ android_register_function(&acm2_function);
+
+ return 0;
+}
+
+static int __init init(void)
+{
+ printk(KERN_INFO "f_acm init\n");
+
+ return platform_driver_probe(&usb_facm, facm_probe);
+}
+module_init(init);
+
+#endif /* CONFIG_USB_ANDROID_ACM */
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
new file mode 100644
index 0000000..5366018
--- /dev/null
+++ b/drivers/usb/gadget/f_diag.c
@@ -0,0 +1,752 @@
+/* drivers/usb/gadget/f_diag.c
+ * Diag Function Device - Route ARM9 and ARM11 DIAG messages
+ * between HOST and DEVICE.
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/usbdiag.h>
+#include <mach/rpc_hsusb.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+static DEFINE_SPINLOCK(ch_lock);
+static LIST_HEAD(usb_diag_ch_list);
+
+static struct usb_interface_descriptor intf_desc = {
+ .bLength = sizeof intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xFF,
+ .bInterfaceSubClass = 0xFF,
+ .bInterfaceProtocol = 0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .bInterval = 0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+ .bInterval = 0,
+};
+
+static struct usb_descriptor_header *fs_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &fs_bulk_in_desc,
+ (struct usb_descriptor_header *) &fs_bulk_out_desc,
+ NULL,
+ };
+static struct usb_descriptor_header *hs_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &hs_bulk_in_desc,
+ (struct usb_descriptor_header *) &hs_bulk_out_desc,
+ NULL,
+};
+
+/**
+ * struct diag_context - USB diag function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_desc: USB IN endpoint descriptor struct
+ * @out_desc: USB OUT endpoint descriptor struct
+ * @read_pool: List of requests used for Rx (OUT ep)
+ * @write_pool: List of requests used for Tx (IN ep)
+ * @config_work: Work item schedule after interface is configured to notify
+ * CONNECT event to diag char driver and updating product id
+ * and serial number to MODEM/IMEM.
+ * @lock: Spinlock to proctect read_pool, write_pool lists
+ * @cdev: USB composite device struct
+ * @ch: USB diag channel
+ *
+ */
+struct diag_context {
+ struct usb_function function;
+ struct usb_ep *out;
+ struct usb_ep *in;
+ struct usb_endpoint_descriptor *in_desc;
+ struct usb_endpoint_descriptor *out_desc;
+ struct list_head read_pool;
+ struct list_head write_pool;
+ struct work_struct config_work;
+ spinlock_t lock;
+ unsigned configured;
+ struct usb_composite_dev *cdev;
+ int (*update_pid_and_serial_num)(uint32_t, const char *);
+ struct usb_diag_ch ch;
+
+ /* pkt counters */
+ unsigned long dpkts_tolaptop;
+ unsigned long dpkts_tomodem;
+ unsigned dpkts_tolaptop_pending;
+};
+
+static inline struct diag_context *func_to_diag(struct usb_function *f)
+{
+ return container_of(f, struct diag_context, function);
+}
+
+static void usb_config_work_func(struct work_struct *work)
+{
+ struct diag_context *ctxt = container_of(work,
+ struct diag_context, config_work);
+ struct usb_composite_dev *cdev = ctxt->cdev;
+ struct usb_gadget_strings *table;
+ struct usb_string *s;
+
+ if (ctxt->ch.notify)
+ ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_CONNECT, NULL);
+
+ if (!ctxt->update_pid_and_serial_num)
+ return;
+
+ /* pass on product id and serial number to dload */
+ if (!cdev->desc.iSerialNumber) {
+ ctxt->update_pid_and_serial_num(
+ cdev->desc.idProduct, 0);
+ return;
+ }
+
+ /*
+ * Serial number is filled by the composite driver. So
+ * it is fair enough to assume that it will always be
+ * found at first table of strings.
+ */
+ table = *(cdev->driver->strings);
+ for (s = table->strings; s && s->s; s++)
+ if (s->id == cdev->desc.iSerialNumber) {
+ ctxt->update_pid_and_serial_num(
+ cdev->desc.idProduct, s->s);
+ break;
+ }
+}
+
+static void diag_write_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct diag_context *ctxt = ep->driver_data;
+ struct diag_request *d_req = req->context;
+ unsigned long flags;
+
+ ctxt->dpkts_tolaptop_pending--;
+
+ if (!req->status) {
+ if ((req->length >= ep->maxpacket) &&
+ ((req->length % ep->maxpacket) == 0)) {
+ ctxt->dpkts_tolaptop_pending++;
+ req->length = 0;
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ /* Queue zero length packet */
+ usb_ep_queue(ctxt->in, req, GFP_ATOMIC);
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->write_pool);
+ if (req->length != 0) {
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ }
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ if (ctxt->ch.notify)
+ ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_WRITE_DONE, d_req);
+}
+
+static void diag_read_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct diag_context *ctxt = ep->driver_data;
+ struct diag_request *d_req = req->context;
+ unsigned long flags;
+
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->read_pool);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ ctxt->dpkts_tomodem++;
+
+ if (ctxt->ch.notify)
+ ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_READ_DONE, d_req);
+}
+
+/**
+ * usb_diag_open() - Open a diag channel over USB
+ * @name: Name of the channel
+ * @priv: Private structure pointer which will be passed in notify()
+ * @notify: Callback function to receive notifications
+ *
+ * This function iterates overs the available channels and returns
+ * the channel handler if the name matches. The notify callback is called
+ * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
+ *
+ */
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+ void (*notify)(void *, unsigned, struct diag_request *))
+{
+ struct usb_diag_ch *ch;
+ struct diag_context *ctxt;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ /* Check if we already have a channel with this name */
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ if (!strcmp(name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch_lock, flags);
+
+ if (!found) {
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return ERR_PTR(-ENOMEM);
+
+ ch = &ctxt->ch;
+ }
+
+ ch->name = name;
+ ch->priv = priv;
+ ch->notify = notify;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ list_add_tail(&ch->list, &usb_diag_ch_list);
+ spin_unlock_irqrestore(&ch_lock, flags);
+
+ return ch;
+}
+EXPORT_SYMBOL(usb_diag_open);
+
+/**
+ * usb_diag_close() - Close a diag channel over USB
+ * @ch: Channel handler
+ *
+ * This function closes the diag channel.
+ *
+ */
+void usb_diag_close(struct usb_diag_ch *ch)
+{
+ struct diag_context *dev = container_of(ch, struct diag_context, ch);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ ch->priv = NULL;
+ ch->notify = NULL;
+ /* Free-up the resources if channel is no more active */
+ if (!ch->priv_usb) {
+ list_del(&ch->list);
+ kfree(dev);
+ }
+
+ spin_unlock_irqrestore(&ch_lock, flags);
+}
+EXPORT_SYMBOL(usb_diag_close);
+
+/**
+ * usb_diag_free_req() - Free USB requests
+ * @ch: Channel handler
+ *
+ * This function free read and write USB requests for the interface
+ * associated with this channel.
+ *
+ */
+void usb_diag_free_req(struct usb_diag_ch *ch)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+
+ if (!ctxt)
+ return;
+
+ list_for_each_safe(act, tmp, &ctxt->write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ctxt->in, req);
+ }
+
+ list_for_each_safe(act, tmp, &ctxt->read_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ctxt->out, req);
+ }
+}
+EXPORT_SYMBOL(usb_diag_free_req);
+
+/**
+ * usb_diag_alloc_req() - Allocate USB requests
+ * @ch: Channel handler
+ * @n_write: Number of requests for Tx
+ * @n_read: Number of requests for Rx
+ *
+ * This function allocate read and write USB requests for the interface
+ * associated with this channel. The actual buffer is not allocated.
+ * The buffer is passed by diag char driver.
+ *
+ */
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ struct usb_request *req;
+ int i;
+
+ if (!ctxt)
+ return -ENODEV;
+
+ for (i = 0; i < n_write; i++) {
+ req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
+ if (!req)
+ goto fail;
+ req->complete = diag_write_complete;
+ list_add_tail(&req->list, &ctxt->write_pool);
+ }
+
+ for (i = 0; i < n_read; i++) {
+ req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
+ if (!req)
+ goto fail;
+ req->complete = diag_read_complete;
+ list_add_tail(&req->list, &ctxt->read_pool);
+ }
+
+ return 0;
+
+fail:
+ usb_diag_free_req(ch);
+ return -ENOMEM;
+
+}
+EXPORT_SYMBOL(usb_diag_alloc_req);
+
+/**
+ * usb_diag_read() - Read data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Rx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ *
+ */
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+
+ if (!ctxt->configured) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&ctxt->read_pool)) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(ctxt->out, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->read_pool);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: cannot queue"
+ " read request\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_diag_read);
+
+/**
+ * usb_diag_write() - Write data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Tx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ *
+ */
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+
+ if (!ctxt->configured) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&ctxt->write_pool)) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(ctxt->in, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->write_pool);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: cannot queue"
+ " read request\n", __func__);
+ return -EIO;
+ }
+
+ ctxt->dpkts_tolaptop++;
+ ctxt->dpkts_tolaptop_pending++;
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_diag_write);
+
+static void diag_function_disable(struct usb_function *f)
+{
+ struct diag_context *dev = func_to_diag(f);
+ unsigned long flags;
+
+ DBG(dev->cdev, "diag_function_disable\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->configured = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (dev->ch.notify)
+ dev->ch.notify(dev->ch.priv, USB_DIAG_DISCONNECT, NULL);
+
+ usb_ep_disable(dev->in);
+ dev->in->driver_data = NULL;
+
+ usb_ep_disable(dev->out);
+ dev->out->driver_data = NULL;
+
+}
+
+static int diag_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct diag_context *dev = func_to_diag(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
+ struct usb_diag_ch *ch;
+ int rc = 0;
+
+ dev->in_desc = ep_choose(cdev->gadget,
+ &hs_bulk_in_desc, &fs_bulk_in_desc);
+ dev->out_desc = ep_choose(cdev->gadget,
+ &hs_bulk_out_desc, &fs_bulk_out_desc);
+ dev->in->driver_data = dev;
+ rc = usb_ep_enable(dev->in, dev->in_desc);
+ if (rc) {
+ ERROR(dev->cdev, "can't enable %s, result %d\n",
+ dev->in->name, rc);
+ return rc;
+ }
+ dev->out->driver_data = dev;
+ rc = usb_ep_enable(dev->out, dev->out_desc);
+ if (rc) {
+ ERROR(dev->cdev, "can't enable %s, result %d\n",
+ dev->out->name, rc);
+ usb_ep_disable(dev->in);
+ return rc;
+ }
+ schedule_work(&dev->config_work);
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt;
+
+ ctxt = ch->priv_usb;
+ ctxt->dpkts_tolaptop = 0;
+ ctxt->dpkts_tomodem = 0;
+ ctxt->dpkts_tolaptop_pending = 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->configured = 1;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return rc;
+}
+
+static void diag_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct diag_context *ctxt = func_to_diag(f);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+
+ usb_free_descriptors(f->descriptors);
+ ctxt->ch.priv_usb = NULL;
+}
+
+static int diag_function_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct diag_context *ctxt = func_to_diag(f);
+ struct usb_ep *ep;
+ int status = -ENODEV;
+
+ intf_desc.bInterfaceNumber = usb_interface_id(c, f);
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+ ctxt->in = ep;
+ ep->driver_data = ctxt;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+ ctxt->out = ep;
+ ep->driver_data = ctxt;
+
+ /* copy descriptors, and track endpoint copies */
+ f->descriptors = usb_copy_descriptors(fs_diag_desc);
+ if (!f->descriptors)
+ goto fail;
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hs_bulk_in_desc.bEndpointAddress =
+ fs_bulk_in_desc.bEndpointAddress;
+ hs_bulk_out_desc.bEndpointAddress =
+ fs_bulk_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+ }
+ return 0;
+fail:
+ if (ctxt->out)
+ ctxt->out->driver_data = NULL;
+ if (ctxt->in)
+ ctxt->in->driver_data = NULL;
+ return status;
+
+}
+
+int diag_function_add(struct usb_configuration *c, const char *name,
+ int (*update_pid)(uint32_t, const char *))
+{
+ struct diag_context *dev;
+ struct usb_diag_ch *_ch;
+ int found = 0, ret;
+
+ DBG(c->cdev, "diag_function_add\n");
+
+ list_for_each_entry(_ch, &usb_diag_ch_list, list) {
+ if (!strcmp(name, _ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ERROR(c->cdev, "unable to get diag usb channel\n");
+ return -ENODEV;
+ }
+
+ dev = container_of(_ch, struct diag_context, ch);
+ /* claim the channel for this USB interface */
+ _ch->priv_usb = dev;
+
+ dev->update_pid_and_serial_num = update_pid;
+ dev->cdev = c->cdev;
+ dev->function.name = _ch->name;
+ dev->function.descriptors = fs_diag_desc;
+ dev->function.hs_descriptors = hs_diag_desc;
+ dev->function.bind = diag_function_bind;
+ dev->function.unbind = diag_function_unbind;
+ dev->function.set_alt = diag_function_set_alt;
+ dev->function.disable = diag_function_disable;
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->read_pool);
+ INIT_LIST_HEAD(&dev->write_pool);
+ INIT_WORK(&dev->config_work, usb_config_work_func);
+
+ ret = usb_add_function(c, &dev->function);
+ if (ret) {
+ INFO(c->cdev, "usb_add_function failed\n");
+ _ch->priv_usb = NULL;
+ }
+
+ return ret;
+}
+
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = debug_buffer;
+ int temp = 0;
+ struct usb_diag_ch *ch;
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt;
+
+ ctxt = ch->priv_usb;
+
+ temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+ "---Name: %s---\n"
+ "dpkts_tolaptop: %lu\n"
+ "dpkts_tomodem: %lu\n"
+ "pkts_tolaptop_pending: %u\n",
+ ch->name, ctxt->dpkts_tolaptop,
+ ctxt->dpkts_tomodem,
+ ctxt->dpkts_tolaptop_pending);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_diag_ch *ch;
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt;
+
+ ctxt = ch->priv_usb;
+
+ ctxt->dpkts_tolaptop = 0;
+ ctxt->dpkts_tomodem = 0;
+ ctxt->dpkts_tolaptop_pending = 0;
+ }
+
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations debug_fdiag_ops = {
+ .open = debug_open,
+ .read = debug_read_stats,
+ .write = debug_reset_stats,
+};
+
+static void fdiag_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("usb_diag", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("status", 0444, dent, 0, &debug_fdiag_ops);
+}
+#else
+static void fdiag_debugfs_init(void)
+{
+ return;
+}
+#endif
+
+static void diag_cleanup(void)
+{
+ struct diag_context *dev;
+ struct list_head *act, *tmp;
+ struct usb_diag_ch *_ch;
+ unsigned long flags;
+
+ list_for_each_safe(act, tmp, &usb_diag_ch_list) {
+ _ch = list_entry(act, struct usb_diag_ch, list);
+ dev = container_of(_ch, struct diag_context, ch);
+
+ spin_lock_irqsave(&ch_lock, flags);
+ /* Free if diagchar is not using the channel anymore */
+ if (!_ch->priv) {
+ list_del(&_ch->list);
+ kfree(dev);
+ }
+ spin_unlock_irqrestore(&ch_lock, flags);
+
+ }
+}
+
+static int diag_setup(void)
+{
+ fdiag_debugfs_init();
+
+ return 0;
+}
diff --git a/drivers/usb/gadget/f_diag.h b/drivers/usb/gadget/f_diag.h
new file mode 100644
index 0000000..82d9a25
--- /dev/null
+++ b/drivers/usb/gadget/f_diag.h
@@ -0,0 +1,24 @@
+/* drivers/usb/gadget/f_diag.h
+ *
+ * Diag Function Device - Route DIAG frames between SMD and USB
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __F_DIAG_H
+#define __F_DIAG_H
+
+int diag_function_add(struct usb_configuration *c, const char *);
+
+#endif /* __F_DIAG_H */
+
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5440c6d..ccd9c2d 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -312,7 +312,10 @@
#include "storage_common.c"
-
+#ifdef CONFIG_USB_CSW_HACK
+static int write_error_after_csw_sent;
+static int csw_hack_sent;
+#endif
/*-------------------------------------------------------------------------*/
struct fsg_dev;
@@ -469,6 +472,7 @@
}
typedef void (*fsg_routine_t)(struct fsg_dev *);
+static int send_status(struct fsg_common *common);
static int exception_in_progress(struct fsg_common *common)
{
@@ -625,7 +629,7 @@
if (ctrl->bRequestType !=
(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != fsg->interface_number || w_value != 0)
+ if (w_value != 0)
return -EDOM;
/*
@@ -640,7 +644,7 @@
if (ctrl->bRequestType !=
(USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != fsg->interface_number || w_value != 0)
+ if (w_value != 0)
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *)req->buf = fsg->common->nluns - 1;
@@ -881,6 +885,9 @@
ssize_t nwritten;
int rc;
+#ifdef CONFIG_USB_CSW_HACK
+ int i;
+#endif
if (curlun->ro) {
curlun->sense_data = SS_WRITE_PROTECTED;
return -EINVAL;
@@ -994,7 +1001,17 @@
bh = common->next_buffhd_to_drain;
if (bh->state == BUF_STATE_EMPTY && !get_some_more)
break; /* We stopped early */
+#ifdef CONFIG_USB_CSW_HACK
+ /*
+ * If the csw packet is already submmitted to the hardware,
+ * by marking the state of buffer as full, then by checking
+ * the residue, we make sure that this csw packet is not
+ * written on to the storage media.
+ */
+ if (bh->state == BUF_STATE_FULL && common->residue) {
+#else
if (bh->state == BUF_STATE_FULL) {
+#endif
smp_rmb();
common->next_buffhd_to_drain = bh->next;
bh->state = BUF_STATE_EMPTY;
@@ -1045,9 +1062,36 @@
curlun->sense_data = SS_WRITE_ERROR;
curlun->sense_data_info = file_offset >> 9;
curlun->info_valid = 1;
+#ifdef CONFIG_USB_CSW_HACK
+ write_error_after_csw_sent = 1;
+ goto write_error;
+#endif
break;
}
+#ifdef CONFIG_USB_CSW_HACK
+write_error:
+ if ((nwritten == amount) && !csw_hack_sent) {
+ if (write_error_after_csw_sent)
+ break;
+ /*
+ * Check if any of the buffer is in the
+ * busy state, if any buffer is in busy state,
+ * means the complete data is not received
+ * yet from the host. So there is no point in
+ * csw right away without the complete data.
+ */
+ for (i = 0; i < FSG_NUM_BUFFERS; i++) {
+ if (common->buffhds[i].state ==
+ BUF_STATE_BUSY)
+ break;
+ }
+ if (!amount_left_to_req && i == FSG_NUM_BUFFERS) {
+ csw_hack_sent = 1;
+ send_status(common);
+ }
+ }
+#endif
/* Did the host decide to stop early? */
if (bh->outreq->actual != bh->outreq->length) {
common->short_packet_received = 1;
@@ -1508,8 +1552,7 @@
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
-
- if (curlun->prevent_medium_removal && !prevent)
+ if (!curlun->nofua && curlun->prevent_medium_removal && !prevent)
fsg_lun_fsync_sub(curlun);
curlun->prevent_medium_removal = prevent;
return 0;
@@ -1790,6 +1833,19 @@
csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
csw->Tag = common->tag;
csw->Residue = cpu_to_le32(common->residue);
+#ifdef CONFIG_USB_CSW_HACK
+ /* Since csw is being sent early, before
+ * writing on to storage media, need to set
+ * residue to zero,assuming that write will succeed.
+ */
+ if (write_error_after_csw_sent) {
+ write_error_after_csw_sent = 0;
+ csw->Residue = cpu_to_le32(common->residue);
+ } else
+ csw->Residue = 0;
+#else
+ csw->Residue = cpu_to_le32(common->residue);
+#endif
csw->Status = status;
bh->inreq->length = USB_BULK_CS_WRAP_LEN;
@@ -2349,7 +2405,6 @@
/* Reset interface setting and re-init endpoint state (toggle etc). */
static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
{
- const struct usb_endpoint_descriptor *d;
struct fsg_dev *fsg;
int i, rc = 0;
@@ -2374,15 +2429,6 @@
}
}
- /* Disable the endpoints */
- if (fsg->bulk_in_enabled) {
- usb_ep_disable(fsg->bulk_in);
- fsg->bulk_in_enabled = 0;
- }
- if (fsg->bulk_out_enabled) {
- usb_ep_disable(fsg->bulk_out);
- fsg->bulk_out_enabled = 0;
- }
common->fsg = NULL;
wake_up(&common->fsg_wait);
@@ -2395,22 +2441,6 @@
common->fsg = new_fsg;
fsg = common->fsg;
- /* Enable the endpoints */
- d = fsg_ep_desc(common->gadget,
- &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
- rc = enable_endpoint(common, fsg->bulk_in, d);
- if (rc)
- goto reset;
- fsg->bulk_in_enabled = 1;
-
- d = fsg_ep_desc(common->gadget,
- &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
- rc = enable_endpoint(common, fsg->bulk_out, d);
- if (rc)
- goto reset;
- fsg->bulk_out_enabled = 1;
- common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
- clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
/* Allocate the requests */
for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
@@ -2440,6 +2470,29 @@
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
+ struct fsg_common *common = fsg->common;
+ const struct usb_endpoint_descriptor *d;
+ int rc;
+
+ /* Enable the endpoints */
+ d = fsg_ep_desc(common->gadget,
+ &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
+ rc = enable_endpoint(common, fsg->bulk_in, d);
+ if (rc)
+ return rc;
+ fsg->bulk_in_enabled = 1;
+
+ d = fsg_ep_desc(common->gadget,
+ &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
+ rc = enable_endpoint(common, fsg->bulk_out, d);
+ if (rc) {
+ usb_ep_disable(fsg->bulk_in);
+ fsg->bulk_in_enabled = 0;
+ return rc;
+ }
+ fsg->bulk_out_enabled = 1;
+ common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+ clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
fsg->common->new_fsg = fsg;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
@@ -2448,6 +2501,18 @@
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
+
+ /* Disable the endpoints */
+ if (fsg->bulk_in_enabled) {
+ usb_ep_disable(fsg->bulk_in);
+ fsg->bulk_in_enabled = 0;
+ fsg->bulk_in->driver_data = NULL;
+ }
+ if (fsg->bulk_out_enabled) {
+ usb_ep_disable(fsg->bulk_out);
+ fsg->bulk_out_enabled = 0;
+ fsg->bulk_out->driver_data = NULL;
+ }
fsg->common->new_fsg = NULL;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
}
@@ -2559,6 +2624,7 @@
*/
if (!fsg_is_set(common))
break;
+ common->ep0req->length = 0;
if (test_and_clear_bit(IGNORE_BULK_OUT,
&common->fsg->atomic_bitflags))
usb_ep_clear_halt(common->fsg->bulk_in);
@@ -2654,6 +2720,16 @@
common->state = FSG_STATE_STATUS_PHASE;
spin_unlock_irq(&common->lock);
+#ifdef CONFIG_USB_CSW_HACK
+ /* Since status is already sent for write scsi command,
+ * need to skip sending status once again if it is a
+ * write scsi command.
+ */
+ if (csw_hack_sent) {
+ csw_hack_sent = 0;
+ continue;
+ }
+#endif
if (send_status(common))
continue;
@@ -2779,6 +2855,7 @@
curlun->ro = lcfg->cdrom || lcfg->ro;
curlun->initially_ro = curlun->ro;
curlun->removable = lcfg->removable;
+ curlun->nofua = lcfg->nofua;
curlun->dev.release = fsg_lun_release;
curlun->dev.parent = &gadget->dev;
/* curlun->dev.driver = &fsg_driver.driver; XXX */
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
new file mode 100644
index 0000000..770a225
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/android_composite.h>
+#include <linux/spinlock.h>
+
+#include <linux/platform_data/usb_rmnet.h>
+#include "u_rmnet.h"
+#include "gadget_chips.h"
+
+
+#define RMNET_NOTIFY_INTERVAL 5
+#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+struct rmnet_descs {
+ struct usb_endpoint_descriptor *in;
+ struct usb_endpoint_descriptor *out;
+ struct usb_endpoint_descriptor *notify;
+};
+
+#define ACM_CTRL_DTR (1 << 0)
+
+/* TODO: use separate structures for data and
+ * control paths
+ */
+struct f_rmnet {
+ struct grmnet port;
+ int ifc_id;
+ u8 port_num;
+ atomic_t online;
+ struct usb_composite_dev *cdev;
+
+ spinlock_t lock;
+
+ /* usb descriptors */
+ struct rmnet_descs fs;
+ struct rmnet_descs hs;
+
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_endpoint_descriptor *notify_desc;
+ struct usb_request *notify_req;
+
+ /* control info */
+ struct list_head cpkt_resp_q;
+ atomic_t notify_count;
+ unsigned long cpkts_len;
+};
+
+#define NR_PORTS 1
+static unsigned int nr_ports;
+static struct rmnet_ports {
+ unsigned port_num;
+ struct f_rmnet *port;
+#ifdef CONFIG_USB_ANDROID
+ struct android_usb_function android_f;
+#endif
+} ports[NR_PORTS];
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+ return container_of(f, struct f_rmnet, port.func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+ return container_of(r, struct f_rmnet, port);
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->length = len;
+
+ return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gport_setup(int no_ports)
+{
+ int ret;
+
+ pr_debug("%s: no_ports:%d\n", __func__, no_ports);
+
+ ret = gbam_setup(no_ports);
+ if (ret)
+ return ret;
+
+ ret = gsmd_ctrl_setup(no_ports);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int gport_connect(struct f_rmnet *dev)
+{
+ int ret;
+
+ pr_debug("%s:dev:%p portno:%d\n",
+ __func__, dev, dev->port_num);
+
+ ret = gsmd_ctrl_connect(&dev->port, dev->port_num);
+ if (ret) {
+ pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = gbam_connect(&dev->port, dev->port_num);
+ if (ret) {
+ pr_err("%s: gbam_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gport_disconnect(struct f_rmnet *dev)
+{
+ pr_debug("%s:dev:%p portno:%d\n",
+ __func__, dev, dev->port_num);
+
+ gbam_disconnect(&dev->port, dev->port_num);
+
+ gsmd_ctrl_disconnect(&dev->port, dev->port_num);
+
+ return 0;
+}
+
+static int frmnet_remove(struct platform_device *dev)
+{
+ /* TBD:
+ * 1. Unregister android function
+ * 2. Free name from ports
+ * 3. Free rmnet device
+ * 4. Free Copy Descriptors
+ */
+ return 0;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+
+ pr_debug("%s: portno:%d\n", __func__, dev->port_num);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ frmnet_free_req(dev->notify, dev->notify_req);
+
+ kfree(dev);
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+
+ pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+ usb_ep_disable(dev->notify);
+
+ atomic_set(&dev->online, 0);
+
+ gport_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret;
+
+ pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ if (dev->notify->driver_data) {
+ pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+ usb_ep_disable(dev->notify);
+ }
+ dev->notify_desc = ep_choose(cdev->gadget,
+ dev->hs.notify,
+ dev->fs.notify);
+ ret = usb_ep_enable(dev->notify, dev->notify_desc);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, dev->notify->name, ret);
+ return ret;
+ }
+ dev->notify->driver_data = dev;
+
+ if (dev->port.in->driver_data) {
+ pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+ gport_disconnect(dev);
+ }
+
+ dev->port.in_desc = ep_choose(cdev->gadget,
+ dev->hs.in, dev->fs.in);
+ dev->port.out_desc = ep_choose(cdev->gadget,
+ dev->hs.out, dev->fs.out);
+
+ ret = gport_connect(dev);
+
+ atomic_set(&dev->online, 1);
+
+ return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!atomic_read(&dev->online) || !req || !req->buf) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ if (atomic_inc_return(&dev->notify_count) != 1) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ event = req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (ret) {
+ atomic_dec(&dev->notify_count);
+ pr_debug("ep enqueue error %d\n", ret);
+ }
+}
+
+static int
+frmnet_send_cpkt_response(struct grmnet *gr, struct rmnet_ctrl_pkt *cpkt)
+{
+ struct f_rmnet *dev;
+ unsigned long flags;
+
+ if (!gr || !cpkt) {
+ pr_err("%s: Invalid grmnet/cpkt, grmnet:%p cpkt:%p\n",
+ __func__, gr, cpkt);
+ return -ENODEV;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ if (!atomic_read(&dev->online)) {
+ rmnet_free_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add(&cpkt->list, &dev->cpkt_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ frmnet_ctrl_response_available(dev);
+
+ return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ struct usb_composite_dev *cdev;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ if (!dev) {
+ pr_err("%s: rmnet dev is null\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ cdev = dev->cdev;
+
+ cpkt = rmnet_alloc_ctrl_pkt(req->actual, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return;
+ }
+
+ memcpy(cpkt->buf, req->buf, req->actual);
+
+ if (dev->port.send_cpkt_request)
+ dev->port.send_cpkt_request(&dev->port, dev->port_num, cpkt);
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ int status = req->status;
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&dev->notify_count, 0);
+ break;
+ default:
+ pr_err("rmnet notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (atomic_dec_and_test(&dev->notify_count))
+ break;
+
+ status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&dev->notify_count);
+ pr_debug("ep enqueue error %d\n", status);
+ }
+ break;
+ }
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = cdev->req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int ret = -EOPNOTSUPP;
+
+ pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: usb cable is not connected\n", __func__);
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_length > req->length)
+ goto invalid;
+ ret = w_length;
+ req->complete = frmnet_cmd_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value)
+ goto invalid;
+ else {
+ unsigned len;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty "
+ " req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ if (dev->port.send_cbits_tomodem)
+ dev->port.send_cbits_tomodem(&dev->port,
+ dev->port_num,
+ w_value);
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+ rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n", __func__);
+ return -ENODEV;
+ }
+ dev->port.in = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ dev->port.out = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+
+ dev->notify_req = frmnet_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ ret = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
+
+ dev->notify_req->complete = frmnet_notify_complete;
+ dev->notify_req->context = dev;
+
+ f->descriptors = usb_copy_descriptors(rmnet_fs_function);
+
+ dev->fs.in = usb_find_endpoint(rmnet_fs_function,
+ f->descriptors,
+ &rmnet_fs_in_desc);
+ dev->fs.out = usb_find_endpoint(rmnet_fs_function,
+ f->descriptors,
+ &rmnet_fs_out_desc);
+ dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
+ f->descriptors,
+ &rmnet_fs_notify_desc);
+
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ rmnet_hs_in_desc.bEndpointAddress =
+ rmnet_fs_in_desc.bEndpointAddress;
+ rmnet_hs_out_desc.bEndpointAddress =
+ rmnet_fs_out_desc.bEndpointAddress;
+ rmnet_hs_notify_desc.bEndpointAddress =
+ rmnet_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
+
+ dev->hs.in = usb_find_endpoint(rmnet_hs_function,
+ f->hs_descriptors, &rmnet_hs_in_desc);
+ dev->hs.out = usb_find_endpoint(rmnet_hs_function,
+ f->hs_descriptors, &rmnet_hs_out_desc);
+ dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
+ f->hs_descriptors, &rmnet_hs_notify_desc);
+ }
+
+ pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
+ __func__, dev->port_num,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+ dev->port.in->name, dev->port.out->name);
+
+ return 0;
+
+ep_notify_alloc_fail:
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
+ep_auto_notify_fail:
+ dev->port.out->driver_data = NULL;
+ dev->port.out = NULL;
+ep_auto_out_fail:
+ dev->port.in->driver_data = NULL;
+ dev->port.in = NULL;
+
+ return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID
+static int frmnet_bind_config(struct usb_configuration *c)
+{
+ static unsigned portno;
+ int status;
+ struct f_rmnet *dev;
+ struct usb_function *f;
+ unsigned long flags;
+
+ pr_debug("%s: usb config:%p\n", __func__, c);
+
+ if (portno >= nr_ports) {
+ pr_err("%s: supporting ports#%u port_id:%u", __func__,
+ nr_ports, portno);
+ return -ENODEV;
+ }
+
+ if (rmnet_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0) {
+ pr_err("%s: failed to get string id, err:%d\n",
+ __func__, status);
+ return status;
+ }
+ rmnet_string_defs[0].id = status;
+ }
+
+ dev = ports[portno].port;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cdev = c->cdev;
+ f = &dev->port.func;
+ f->name = ports[portno].android_f.name;
+ portno++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ f->strings = rmnet_strings;
+ f->bind = frmnet_bind;
+ f->unbind = frmnet_unbind;
+ f->disable = frmnet_disable;
+ f->set_alt = frmnet_set_alt;
+ f->setup = frmnet_setup;
+ dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+
+ status = usb_add_function(c, f);
+ if (status) {
+ pr_err("%s: usb add function failed: %d\n",
+ __func__, status);
+ kfree(ports[portno].android_f.name);
+ kfree(dev);
+ return status;
+ }
+
+ pr_debug("%s: complete\n", __func__);
+
+ return status;
+}
+
+static struct platform_driver usb_rmnet = {
+ .remove = frmnet_remove,
+ .driver = {
+ .name = "usb_rmnet",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __devinit frmnet_probe(struct platform_device *pdev)
+{
+ struct usb_rmnet_pdata *pdata = pdev->dev.platform_data;
+ int i;
+ struct f_rmnet *dev;
+ int ret;
+ int instances;
+
+ instances = 1;
+ if (pdata)
+ instances = pdata->num_instances;
+
+ pr_debug("%s: instances :%d\n", __func__, instances);
+
+ for (i = 0; i < instances; i++) {
+ dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+ if (!dev) {
+ pr_err("%s: Unable to allocate rmnet device\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail_probe;
+ }
+
+ dev->port_num = i;
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+ ports[i].port = dev;
+ ports[i].port_num = i;
+ ports[i].android_f.name = kasprintf(GFP_KERNEL, "rmnet%d", i);
+ ports[i].android_f.bind_config = frmnet_bind_config;
+
+ pr_debug("%s: anroid f_name:%s\n", __func__,
+ ports[i].android_f.name);
+
+ nr_ports++;
+
+ android_register_function(&ports[i].android_f);
+ }
+
+ gport_setup(nr_ports);
+
+ return 0;
+
+fail_probe:
+ for (i = 0; i < nr_ports; i++) {
+ /* android_unregister_function(&ports[i].android_f); */
+ kfree(ports[i].android_f.name);
+ kfree(ports[i].port);
+ }
+
+ return ret;
+}
+
+static int __init frmnet_init(void)
+{
+ return platform_driver_probe(&usb_rmnet, frmnet_probe);
+}
+module_init(frmnet_init);
+
+static void __exit frmnet_exit(void)
+{
+ platform_driver_unregister(&usb_rmnet);
+}
+module_exit(frmnet_exit);
+
+MODULE_DESCRIPTION("rmnet function driver");
+MODULE_LICENSE("GPL v2");
+#endif
diff --git a/drivers/usb/gadget/f_rmnet.h b/drivers/usb/gadget/f_rmnet.h
new file mode 100644
index 0000000..2d816c6
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __F_RMNET_H
+#define __F_RMNET_H
+
+int rmnet_function_add(struct usb_configuration *c);
+
+#endif /* __F_RMNET_H */
diff --git a/drivers/usb/gadget/f_rmnet_sdio.c b/drivers/usb/gadget/f_rmnet_sdio.c
new file mode 100644
index 0000000..aa8fd3a
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_sdio.c
@@ -0,0 +1,1314 @@
+/*
+ * f_rmnet_sdio.c -- RmNet SDIO function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/sdio_cmux.h>
+#include <mach/sdio_dmux.h>
+
+static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL;
+module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
+
+static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL;
+module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
+
+#define ACM_CTRL_DTR (1 << 0)
+
+#define SDIO_MUX_HDR 8
+#define RMNET_SDIO_NOTIFY_INTERVAL 5
+#define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification)
+
+#define RMNET_SDIO_RX_REQ_MAX 16
+#define RMNET_SDIO_RX_REQ_SIZE 2048
+#define RMNET_SDIO_TX_REQ_MAX 200
+
+#define TX_PKT_DROP_THRESHOLD 1000
+#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
+#define RX_PKT_FLOW_CTRL_DISABLE 500
+
+unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
+module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
+module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+/* QMI requests & responses buffer*/
+struct rmnet_sdio_qmi_buf {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct rmnet_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+
+ struct usb_ep *epout;
+ struct usb_ep *epin;
+ struct usb_ep *epnotify;
+ struct usb_request *notify_req;
+
+ u8 ifc_id;
+ /* QMI lists */
+ struct list_head qmi_req_q;
+ struct list_head qmi_resp_q;
+ /* Tx/Rx lists */
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_queue;
+
+ spinlock_t lock;
+ atomic_t online;
+ atomic_t notify_count;
+
+ struct workqueue_struct *wq;
+ struct work_struct disconnect_work;
+
+ struct work_struct ctl_rx_work;
+ struct work_struct data_rx_work;
+
+ struct delayed_work sdio_open_work;
+ atomic_t sdio_open;
+
+ unsigned int dpkts_pending_atdmux;
+ int cbits_to_modem;
+ struct work_struct set_modem_ctl_bits_work;
+
+ /* pkt logging dpkt - data pkt; cpkt - control pkt*/
+ unsigned long dpkt_tolaptop;
+ unsigned long dpkt_tomodem;
+ unsigned long tx_drp_cnt;
+ unsigned long cpkt_tolaptop;
+ unsigned long cpkt_tomodem;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+ .bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+ .bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "QMI RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+static struct rmnet_sdio_qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+
+{
+ struct rmnet_sdio_qmi_buf *qmi;
+
+ qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags);
+ if (qmi != NULL) {
+ qmi->buf = kmalloc(len, kmalloc_flags);
+ if (qmi->buf == NULL) {
+ kfree(qmi);
+ qmi = NULL;
+ }
+ }
+
+ return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+static void rmnet_free_qmi(struct rmnet_sdio_qmi_buf *qmi)
+{
+ kfree(qmi->buf);
+ kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer. Returns a pointer to the
+ * usb_request or a pointer with an error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+ if (len && req != NULL) {
+ req->length = len;
+ req->buf = kmalloc(len, kmalloc_flags);
+ if (req->buf == NULL) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+
+ return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&dev->notify_count, 0);
+ break;
+ default:
+ ERROR(cdev, "rmnet notifyep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+
+ /* handle multiple pending QMI_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&dev->notify_count))
+ break;
+
+ status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&dev->notify_count);
+ ERROR(cdev, "rmnet notify ep enq error %d\n", status);
+ }
+ break;
+ }
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event = req->buf;
+ int status;
+
+ /* Response will be sent later */
+ if (atomic_inc_return(&dev->notify_count) != 1)
+ return;
+
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ atomic_dec(&dev->notify_count);
+ ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+ }
+}
+
+#define MAX_CTRL_PKT_SIZE 4096
+static void rmnet_ctl_receive_cb(void *data, int size, void *priv)
+{
+ struct rmnet_dev *dev = priv;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct rmnet_sdio_qmi_buf *qmi_resp;
+ unsigned long flags;
+
+ if (!data || !size)
+ return;
+
+ if (size > MAX_CTRL_PKT_SIZE) {
+ ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n",
+ size, MAX_CTRL_PKT_SIZE);
+ return;
+ }
+
+ if (!atomic_read(&dev->online)) {
+ DBG(cdev, "USB disconnected\n");
+ return;
+ }
+
+ qmi_resp = rmnet_alloc_qmi(size, GFP_KERNEL);
+ if (IS_ERR(qmi_resp)) {
+ DBG(cdev, "unable to allocate memory for QMI resp\n");
+ return;
+ }
+ memcpy(qmi_resp->buf, data, size);
+ qmi_resp->len = size;
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ qmi_response_available(dev);
+}
+
+static void rmnet_ctl_write_done(void *data, int size, void *priv)
+{
+ struct rmnet_dev *dev = priv;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ VDBG(cdev, "rmnet control write done = %d bytes\n", size);
+}
+
+static void rmnet_sts_callback(int id, void *priv)
+{
+ struct rmnet_dev *dev = priv;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "rmnet_sts_callback: id: %d\n", id);
+}
+
+static void rmnet_control_rx_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev = container_of(w, struct rmnet_dev, ctl_rx_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct rmnet_sdio_qmi_buf *qmi_req;
+ unsigned long flags;
+ int ret;
+
+ while (1) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(&dev->qmi_req_q))
+ goto unlock;
+
+ qmi_req = list_first_entry(&dev->qmi_req_q,
+ struct rmnet_sdio_qmi_buf, list);
+ list_del(&qmi_req->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf,
+ qmi_req->len);
+ if (ret != qmi_req->len) {
+ ERROR(cdev, "rmnet control SDIO write failed\n");
+ return;
+ }
+
+ dev->cpkt_tomodem++;
+
+ /*
+ * cmux_write API copies the buffer and gives it to sdio_al.
+ * Hence freeing the memory before write is completed.
+ */
+ rmnet_free_qmi(qmi_req);
+ }
+unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ switch (req->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case 0:
+ return;
+ default:
+ INFO(cdev, "rmnet %s response error %d, %d/%d\n",
+ ep->name, req->status,
+ req->actual, req->length);
+ }
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct rmnet_sdio_qmi_buf *qmi_req;
+ int len = req->actual;
+
+ if (req->status < 0) {
+ ERROR(cdev, "rmnet command error %d\n", req->status);
+ return;
+ }
+
+ /* discard the packet if sdio is not available */
+ if (!atomic_read(&dev->sdio_open))
+ return;
+
+ qmi_req = rmnet_alloc_qmi(len, GFP_ATOMIC);
+ if (IS_ERR(qmi_req)) {
+ ERROR(cdev, "unable to allocate memory for QMI req\n");
+ return;
+ }
+ memcpy(qmi_req->buf, req->buf, len);
+ qmi_req->len = len;
+ spin_lock(&dev->lock);
+ list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+ spin_unlock(&dev->lock);
+ queue_work(dev->wq, &dev->ctl_rx_work);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int ret = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ struct rmnet_sdio_qmi_buf *resp;
+
+ if (!atomic_read(&dev->online))
+ return -ENOTCONN;
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_length > req->length)
+ goto invalid;
+ ret = w_length;
+ req->complete = rmnet_command_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value)
+ goto invalid;
+ else {
+ unsigned len;
+
+ spin_lock(&dev->lock);
+
+ if (list_empty(&dev->qmi_resp_q)) {
+ INFO(cdev, "qmi resp empty "
+ " req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ resp = list_first_entry(&dev->qmi_resp_q,
+ struct rmnet_sdio_qmi_buf, list);
+ list_del(&resp->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, resp->len);
+ memcpy(req->buf, resp->buf, len);
+ ret = len;
+ req->context = dev;
+ req->complete = rmnet_response_complete;
+ rmnet_free_qmi(resp);
+
+ /* check if its the right place to add */
+ dev->cpkt_tolaptop++;
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ /* This is a workaround for RmNet and is borrowed from the
+ * CDC/ACM standard. The host driver will issue the above ACM
+ * standard request to the RmNet interface in the following
+ * scenario: Once the network adapter is disabled from device
+ * manager, the above request will be sent from the qcusbnet
+ * host driver, with DTR being '0'. Once network adapter is
+ * enabled from device manager (or during enumeration), the
+ * request will be sent with DTR being '1'.
+ */
+ if (w_value & ACM_CTRL_DTR)
+ dev->cbits_to_modem |= TIOCM_DTR;
+ else
+ dev->cbits_to_modem &= ~TIOCM_DTR;
+ queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int
+rmnet_rx_submit(struct rmnet_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+ struct sk_buff *skb;
+ int retval;
+
+ skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
+ if (skb == NULL)
+ return -ENOMEM;
+ skb_reserve(skb, SDIO_MUX_HDR);
+
+ req->buf = skb->data;
+ req->length = RMNET_SDIO_RX_REQ_SIZE;
+ req->context = skb;
+
+ retval = usb_ep_queue(dev->epout, req, gfp_flags);
+ if (retval)
+ dev_kfree_skb_any(skb);
+
+ return retval;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+ unsigned long flags;
+
+ if (!atomic_read(&dev->online)) {
+ pr_err("%s: USB not connected\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_for_each_safe(act, tmp, &dev->rx_idle) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ status = rmnet_rx_submit(dev, req, GFP_ATOMIC);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (status) {
+ ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+ list_add_tail(&req->list, &dev->rx_idle);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void usb_rmnet_sdio_start_tx(struct rmnet_dev *dev)
+{
+ unsigned long flags;
+ int status;
+ struct sk_buff *skb;
+ struct usb_request *req;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (!atomic_read(&dev->online))
+ return;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->tx_idle)) {
+ skb = __skb_dequeue(&dev->tx_skb_queue);
+ if (!skb) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+
+ list_del(&req->list);
+ spin_unlock(&dev->lock);
+ status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+ spin_lock(&dev->lock);
+ if (status) {
+ /* USB still online, queue requests back */
+ if (atomic_read(&dev->online)) {
+ ERROR(cdev, "rmnet tx data enqueue err %d\n",
+ status);
+ list_add_tail(&req->list, &dev->tx_idle);
+ __skb_queue_head(&dev->tx_skb_queue, skb);
+ } else {
+ req->buf = 0;
+ rmnet_free_req(dev->epin, req);
+ dev_kfree_skb_any(skb);
+ }
+ break;
+ }
+ dev->dpkt_tolaptop++;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_receive_cb(void *priv, struct sk_buff *skb)
+{
+ struct rmnet_dev *dev = priv;
+ unsigned long flags;
+
+ /* SDIO mux sends NULL SKB when link state changes */
+ if (!skb)
+ return;
+
+ if (!atomic_read(&dev->online)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->tx_skb_queue.qlen > tx_pkt_drop_thld) {
+ if (printk_ratelimit())
+ pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
+ __func__, dev->tx_drp_cnt);
+ dev->tx_drp_cnt++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ __skb_queue_tail(&dev->tx_skb_queue, skb);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ usb_rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_data_write_done(void *priv, struct sk_buff *skb)
+{
+ struct rmnet_dev *dev = priv;
+
+ /* SDIO mux sends NULL SKB when link state changes */
+ if (!skb)
+ return;
+
+ dev_kfree_skb_any(skb);
+ /* this function is called from
+ * sdio mux from spin_lock_irqsave
+ */
+ spin_lock(&dev->lock);
+ dev->dpkts_pending_atdmux--;
+
+ if (dev->dpkts_pending_atdmux >= rx_fctrl_dis_thld) {
+ spin_unlock(&dev->lock);
+ return;
+ }
+ spin_unlock(&dev->lock);
+
+ rmnet_start_rx(dev);
+}
+
+static void rmnet_data_rx_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev = container_of(w, struct rmnet_dev, data_rx_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct sk_buff *skb;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while ((skb = __skb_dequeue(&dev->rx_skb_queue))) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ret < 0) {
+ ERROR(cdev, "rmnet SDIO data write failed\n");
+ dev_kfree_skb_any(skb);
+ } else {
+ dev->dpkt_tomodem++;
+ dev->dpkts_pending_atdmux++;
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = ep->driver_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ rmnet_free_req(ep, req);
+ return;
+ default:
+ /* unexpected failure */
+ ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ spin_lock(&dev->lock);
+ if (queue) {
+ __skb_queue_tail(&dev->rx_skb_queue, skb);
+ queue_work(dev->wq, &dev->data_rx_work);
+ }
+
+ if (dev->dpkts_pending_atdmux >= rx_fctrl_en_thld) {
+ list_add_tail(&req->list, &dev->rx_idle);
+ spin_unlock(&dev->lock);
+ return;
+ }
+ spin_unlock(&dev->lock);
+
+ status = rmnet_rx_submit(dev, req, GFP_ATOMIC);
+ if (status) {
+ ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+ list_add_tail(&req->list, &dev->rx_idle);
+ }
+}
+
+static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = ep->driver_data;
+ struct sk_buff *skb = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ break;
+ default:
+ ERROR(cdev, "rmnet data tx ep error %d\n", status);
+ break;
+ }
+
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &dev->tx_idle);
+ spin_unlock(&dev->lock);
+ dev_kfree_skb_any(skb);
+
+ usb_rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+ struct rmnet_sdio_qmi_buf *qmi;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ dev->dpkt_tolaptop = 0;
+ dev->dpkt_tomodem = 0;
+ dev->cpkt_tolaptop = 0;
+ dev->cpkt_tomodem = 0;
+ dev->dpkts_pending_atdmux = 0;
+ dev->tx_drp_cnt = 0;
+
+ /* free all usb requests in tx pool */
+ list_for_each_safe(act, tmp, &dev->tx_idle) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ req->buf = NULL;
+ rmnet_free_req(dev->epout, req);
+ }
+
+ /* free all usb requests in rx pool */
+ list_for_each_safe(act, tmp, &dev->rx_idle) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ req->buf = NULL;
+ rmnet_free_req(dev->epin, req);
+ }
+
+ /* free all buffers in qmi request pool */
+ list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+ qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
+ list_del(&qmi->list);
+ rmnet_free_qmi(qmi);
+ }
+
+ /* free all buffers in qmi request pool */
+ list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+ qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
+ list_del(&qmi->list);
+ rmnet_free_qmi(qmi);
+ }
+
+ while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
+ dev_kfree_skb_any(skb);
+
+ while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
+ dev_kfree_skb_any(skb);
+
+ rmnet_free_req(dev->epnotify, dev->notify_req);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_set_modem_ctl_bits_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev;
+
+ dev = container_of(w, struct rmnet_dev, set_modem_ctl_bits_work);
+
+ if (!atomic_read(&dev->sdio_open))
+ return;
+
+ pr_debug("%s: cbits_to_modem:%d\n",
+ __func__, dev->cbits_to_modem);
+
+ sdio_cmux_tiocmset(rmnet_sdio_ctl_ch,
+ dev->cbits_to_modem,
+ ~dev->cbits_to_modem);
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+ /* REVISIT: Push all the data to sdio if anythign is pending */
+}
+static void rmnet_suspend(struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+ if (!atomic_read(&dev->online))
+ return;
+ /* This is a workaround for Windows Host bug during suspend.
+ * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
+ * Since it is not beind done, Hence exclusively dropping the DTR
+ * from function driver suspend.
+ */
+ dev->cbits_to_modem &= ~TIOCM_DTR;
+ queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+}
+static void rmnet_disable(struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+ if (!atomic_read(&dev->online))
+ return;
+
+ usb_ep_disable(dev->epnotify);
+ usb_ep_disable(dev->epout);
+ usb_ep_disable(dev->epin);
+
+ atomic_set(&dev->online, 0);
+ atomic_set(&dev->notify_count, 0);
+ rmnet_free_buf(dev);
+
+ /* cleanup work */
+ queue_work(dev->wq, &dev->disconnect_work);
+ dev->cbits_to_modem = 0;
+ queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+}
+
+#define SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000)
+#define SDIO_OPEN_MAX_RETRY 90
+static void rmnet_open_sdio_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev =
+ container_of(w, struct rmnet_dev, sdio_open_work.work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret;
+ static int retry_cnt;
+ static bool ctl_ch_opened, data_ch_opened;
+
+ if (!ctl_ch_opened) {
+ /* Control channel for QMI messages */
+ ret = sdio_cmux_open(rmnet_sdio_ctl_ch, rmnet_ctl_receive_cb,
+ rmnet_ctl_write_done, rmnet_sts_callback, dev);
+ if (!ret)
+ ctl_ch_opened = true;
+ }
+ if (!data_ch_opened) {
+ /* Data channel for network packets */
+ ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
+ rmnet_data_receive_cb,
+ rmnet_data_write_done);
+ if (!ret)
+ data_ch_opened = true;
+ }
+
+ if (ctl_ch_opened && data_ch_opened) {
+ atomic_set(&dev->sdio_open, 1);
+
+ /* if usb cable is connected, update DTR status to modem */
+ if (atomic_read(&dev->online))
+ queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+
+ pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
+ __func__, retry_cnt);
+ return;
+ }
+
+ retry_cnt++;
+ pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n",
+ __func__, retry_cnt);
+
+ if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
+ if (!ctl_ch_opened)
+ ERROR(cdev, "Unable to open control SDIO channel\n");
+ else
+ sdio_cmux_close(rmnet_sdio_ctl_ch);
+
+ if (!data_ch_opened)
+ ERROR(cdev, "Unable to open DATA SDIO channel\n");
+ else
+ msm_sdio_dmux_close(rmnet_sdio_data_ch);
+
+ } else {
+ queue_delayed_work(dev->wq, &dev->sdio_open_work,
+ SDIO_OPEN_RETRY_DELAY);
+ }
+}
+
+static int rmnet_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int ret, i;
+
+ /* allocate notification */
+ dev->notify_req = rmnet_alloc_req(dev->epnotify,
+ RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
+
+ if (IS_ERR(dev->notify_req)) {
+ ret = PTR_ERR(dev->notify_req);
+ goto free_buf;
+ }
+ for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epout, 0, GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto free_buf;
+ }
+ req->complete = rmnet_complete_epout;
+ list_add_tail(&req->list, &dev->rx_idle);
+ }
+ for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epin, 0, GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto free_buf;
+ }
+ req->complete = rmnet_complete_epin;
+ list_add_tail(&req->list, &dev->tx_idle);
+ }
+
+ dev->notify_req->complete = rmnet_notify_complete;
+ dev->notify_req->context = dev;
+ dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
+
+ dev->epin->driver_data = dev;
+ usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+ &rmnet_hs_in_desc,
+ &rmnet_fs_in_desc));
+ dev->epout->driver_data = dev;
+ usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+ &rmnet_hs_out_desc,
+ &rmnet_fs_out_desc));
+ usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+ &rmnet_hs_notify_desc,
+ &rmnet_fs_notify_desc));
+
+ atomic_set(&dev->online, 1);
+
+ /* Queue Rx data requests */
+ rmnet_start_rx(dev);
+
+ return 0;
+
+free_buf:
+ rmnet_free_buf(dev);
+ dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+ return ret;
+}
+
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ int id;
+ struct usb_ep *ep;
+
+ dev->cdev = cdev;
+
+ /* allocate interface ID */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->ifc_id = id;
+ rmnet_interface_desc.bInterfaceNumber = id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+ if (!ep)
+ goto out;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epin = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+ if (!ep)
+ goto out;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epout = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+ if (!ep)
+ goto out;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epnotify = ep;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rmnet_hs_in_desc.bEndpointAddress =
+ rmnet_fs_in_desc.bEndpointAddress;
+ rmnet_hs_out_desc.bEndpointAddress =
+ rmnet_fs_out_desc.bEndpointAddress;
+ rmnet_hs_notify_desc.bEndpointAddress =
+ rmnet_fs_notify_desc.bEndpointAddress;
+ }
+
+ queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
+
+ return 0;
+
+out:
+ if (dev->epnotify)
+ dev->epnotify->driver_data = NULL;
+ if (dev->epout)
+ dev->epout->driver_data = NULL;
+ if (dev->epin)
+ dev->epin->driver_data = NULL;
+
+ return -ENODEV;
+}
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+ destroy_workqueue(dev->wq);
+
+ rmnet_free_buf(dev);
+ dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+
+ msm_sdio_dmux_close(rmnet_sdio_data_ch);
+ sdio_cmux_close(rmnet_sdio_ctl_ch);
+
+ atomic_set(&dev->sdio_open, 0);
+
+ kfree(dev);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_dev *dev = file->private_data;
+ char *buf;
+ unsigned long flags;
+ int ret;
+
+ buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "dpkts_to_modem: %lu\n"
+ "dpkts_to_laptop: %lu\n"
+ "cpkts_to_modem: %lu\n"
+ "cpkts_to_laptop: %lu\n"
+ "cbits_to_modem: %d\n"
+ "tx skb size: %u\n"
+ "rx_skb_size: %u\n"
+ "dpkts_pending_at_dmux: %u\n"
+ "tx drp cnt: %lu\n"
+ "cbits_tomodem: %d",
+ dev->dpkt_tomodem, dev->dpkt_tolaptop,
+ dev->cpkt_tomodem, dev->cpkt_tolaptop,
+ dev->cbits_to_modem,
+ dev->tx_skb_queue.qlen, dev->rx_skb_queue.qlen,
+ dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
+ dev->cbits_to_modem);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_dev *dev = file->private_data;
+
+ dev->dpkt_tolaptop = 0;
+ dev->dpkt_tomodem = 0;
+ dev->cpkt_tolaptop = 0;
+ dev->cpkt_tomodem = 0;
+ dev->dpkts_pending_atdmux = 0;
+ dev->tx_drp_cnt = 0;
+
+ /* TBD: How do we reset skb qlen
+ * it might have side effects
+ */
+
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+const struct file_operations debug_stats_ops = {
+ .open = debug_open,
+ .read = debug_read_stats,
+ .write = debug_reset_stats,
+};
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("usb_rmnet", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("status", 0444, dent, dev, &debug_stats_ops);
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+ return;
+}
+#endif
+
+int rmnet_sdio_function_add(struct usb_configuration *c)
+{
+ struct rmnet_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->wq = create_singlethread_workqueue("k_rmnet_work");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto free_dev;
+ }
+
+ spin_lock_init(&dev->lock);
+ atomic_set(&dev->notify_count, 0);
+ atomic_set(&dev->online, 0);
+
+ INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+ INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_set_modem_ctl_bits_work);
+
+ INIT_WORK(&dev->ctl_rx_work, rmnet_control_rx_work);
+ INIT_WORK(&dev->data_rx_work, rmnet_data_rx_work);
+
+ INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work);
+
+ INIT_LIST_HEAD(&dev->qmi_req_q);
+ INIT_LIST_HEAD(&dev->qmi_resp_q);
+
+ INIT_LIST_HEAD(&dev->rx_idle);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ skb_queue_head_init(&dev->tx_skb_queue);
+ skb_queue_head_init(&dev->rx_skb_queue);
+
+ dev->function.name = "rmnet_sdio";
+ dev->function.strings = rmnet_strings;
+ dev->function.descriptors = rmnet_fs_function;
+ dev->function.hs_descriptors = rmnet_hs_function;
+ dev->function.bind = rmnet_bind;
+ dev->function.unbind = rmnet_unbind;
+ dev->function.setup = rmnet_setup;
+ dev->function.set_alt = rmnet_set_alt;
+ dev->function.disable = rmnet_disable;
+ dev->function.suspend = rmnet_suspend;
+
+ ret = usb_add_function(c, &dev->function);
+ if (ret)
+ goto free_wq;
+
+ usb_debugfs_init(dev);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(dev->wq);
+free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET_SDIO
+static struct android_usb_function rmnet_function = {
+ .name = "rmnet_sdio",
+ .bind_config = rmnet_sdio_function_add,
+};
+
+static int __init rmnet_init(void)
+{
+ android_register_function(&rmnet_function);
+ return 0;
+}
+module_init(rmnet_init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET_SDIO */
diff --git a/drivers/usb/gadget/f_rmnet_smd.c b/drivers/usb/gadget/f_rmnet_smd.c
new file mode 100644
index 0000000..00925f9
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_smd.c
@@ -0,0 +1,1333 @@
+/*
+ * f_rmnet.c -- RmNet function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/msm_smd.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+
+#include "gadget_chips.h"
+
+static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
+module_param(rmnet_ctl_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
+
+static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
+module_param(rmnet_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
+
+#define ACM_CTRL_DTR (1 << 0)
+
+#define RMNET_NOTIFY_INTERVAL 5
+#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+#define QMI_REQ_MAX 4
+#define QMI_REQ_SIZE 2048
+#define QMI_RESP_MAX 8
+#define QMI_RESP_SIZE 2048
+
+#define RX_REQ_MAX 8
+#define RX_REQ_SIZE 2048
+#define TX_REQ_MAX 8
+#define TX_REQ_SIZE 2048
+
+#define TXN_MAX 2048
+
+/* QMI requests & responses buffer*/
+struct qmi_buf {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+/* Control & data SMD channel private data */
+struct rmnet_smd_info {
+ struct smd_channel *ch;
+ struct tasklet_struct tx_tlet;
+ struct tasklet_struct rx_tlet;
+#define CH_OPENED 0
+ unsigned long flags;
+ /* pending rx packet length */
+ atomic_t rx_pkt;
+ /* wait for smd open event*/
+ wait_queue_head_t wait;
+};
+
+struct rmnet_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+
+ struct usb_ep *epout;
+ struct usb_ep *epin;
+ struct usb_ep *epnotify;
+ struct usb_request *notify_req;
+
+ u8 ifc_id;
+ /* QMI lists */
+ struct list_head qmi_req_pool;
+ struct list_head qmi_resp_pool;
+ struct list_head qmi_req_q;
+ struct list_head qmi_resp_q;
+ /* Tx/Rx lists */
+ struct list_head tx_idle;
+ struct list_head rx_idle;
+ struct list_head rx_queue;
+
+ spinlock_t lock;
+ atomic_t online;
+ atomic_t notify_count;
+
+ struct rmnet_smd_info smd_ctl;
+ struct rmnet_smd_info smd_data;
+
+ struct workqueue_struct *wq;
+ struct work_struct connect_work;
+ struct work_struct disconnect_work;
+
+ unsigned long dpkts_to_host;
+ unsigned long dpkts_from_modem;
+ unsigned long dpkts_from_host;
+ unsigned long dpkts_to_modem;
+
+ unsigned long cpkts_to_host;
+ unsigned long cpkts_from_modem;
+ unsigned long cpkts_from_host;
+ unsigned long cpkts_to_modem;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "QMI RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+static struct qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+{
+ struct qmi_buf *qmi;
+
+ qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
+ if (qmi != NULL) {
+ qmi->buf = kmalloc(len, kmalloc_flags);
+ if (qmi->buf == NULL) {
+ kfree(qmi);
+ qmi = NULL;
+ }
+ }
+
+ return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+static void rmnet_free_qmi(struct qmi_buf *qmi)
+{
+ kfree(qmi->buf);
+ kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer. Returns a pointer to the
+ * usb_request or a error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+ if (req != NULL) {
+ req->length = len;
+ req->buf = kmalloc(len, kmalloc_flags);
+ if (req->buf == NULL) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+
+ return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&dev->notify_count, 0);
+ break;
+ default:
+ ERROR(cdev, "rmnet notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (ep != dev->epnotify)
+ break;
+
+ /* handle multiple pending QMI_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&dev->notify_count))
+ break;
+
+ status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&dev->notify_count);
+ ERROR(cdev, "rmnet notify ep enqueue error %d\n",
+ status);
+ }
+ break;
+ }
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event = req->buf;
+ int status;
+
+ /* Response will be sent later */
+ if (atomic_inc_return(&dev->notify_count) != 1)
+ return;
+
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ atomic_dec(&dev->notify_count);
+ ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+ }
+}
+
+/* TODO
+ * handle modem restart events
+ */
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+ struct rmnet_smd_info *smd_info = priv;
+ int len = atomic_read(&smd_info->rx_pkt);
+ struct rmnet_dev *dev = (struct rmnet_dev *) smd_info->tx_tlet.data;
+
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ if (!atomic_read(&dev->online))
+ break;
+ if (len && (smd_write_avail(smd_info->ch) >= len))
+ tasklet_schedule(&smd_info->rx_tlet);
+
+ if (smd_read_avail(smd_info->ch))
+ tasklet_schedule(&smd_info->tx_tlet);
+
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ /* usb endpoints are not enabled untill smd channels
+ * are opened. wake up worker thread to continue
+ * connection processing
+ */
+ set_bit(CH_OPENED, &smd_info->flags);
+ wake_up(&smd_info->wait);
+ break;
+ case SMD_EVENT_CLOSE:
+ /* We will never come here.
+ * reset flags after closing smd channel
+ * */
+ clear_bit(CH_OPENED, &smd_info->flags);
+ break;
+ }
+}
+
+static void rmnet_control_tx_tlet(unsigned long arg)
+{
+ struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct qmi_buf *qmi_resp;
+ int sz;
+ unsigned long flags;
+
+ while (1) {
+ sz = smd_cur_packet_size(dev->smd_ctl.ch);
+ if (sz == 0)
+ break;
+ if (smd_read_avail(dev->smd_ctl.ch) < sz)
+ break;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(&dev->qmi_resp_pool)) {
+ ERROR(cdev, "rmnet QMI Tx buffers full\n");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ qmi_resp = list_first_entry(&dev->qmi_resp_pool,
+ struct qmi_buf, list);
+ list_del(&qmi_resp->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cpkts_from_modem++;
+ list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ qmi_response_available(dev);
+ }
+
+}
+
+static void rmnet_control_rx_tlet(unsigned long arg)
+{
+ struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct qmi_buf *qmi_req;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (1) {
+
+ if (list_empty(&dev->qmi_req_q)) {
+ atomic_set(&dev->smd_ctl.rx_pkt, 0);
+ break;
+ }
+ qmi_req = list_first_entry(&dev->qmi_req_q,
+ struct qmi_buf, list);
+ if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
+ atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
+ DBG(cdev, "rmnet control smd channel full\n");
+ break;
+ }
+
+ list_del(&qmi_req->list);
+ dev->cpkts_from_host++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ret != qmi_req->len) {
+ ERROR(cdev, "rmnet control smd write failed\n");
+ break;
+ }
+ dev->cpkts_to_modem++;
+ list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct qmi_buf *qmi_req;
+ int ret;
+
+ if (req->status < 0) {
+ ERROR(cdev, "rmnet command error %d\n", req->status);
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ dev->cpkts_from_host++;
+ /* no pending control rx packet */
+ if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
+ if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
+ atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
+ goto queue_req;
+ }
+ spin_unlock(&dev->lock);
+ ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
+ /* This should never happen */
+ if (ret != req->actual)
+ ERROR(cdev, "rmnet control smd write failed\n");
+ spin_lock(&dev->lock);
+ dev->cpkts_to_modem++;
+ spin_unlock(&dev->lock);
+ return;
+ }
+queue_req:
+ if (list_empty(&dev->qmi_req_pool)) {
+ spin_unlock(&dev->lock);
+ ERROR(cdev, "rmnet QMI pool is empty\n");
+ return;
+ }
+
+ qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
+ list_del(&qmi_req->list);
+ spin_unlock(&dev->lock);
+ memcpy(qmi_req->buf, req->buf, req->actual);
+ qmi_req->len = req->actual;
+ spin_lock(&dev->lock);
+ list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+ spin_unlock(&dev->lock);
+}
+static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+
+ spin_lock(&dev->lock);
+ dev->cpkts_to_host++;
+ spin_unlock(&dev->lock);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int ret = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ struct qmi_buf *resp;
+ int schedule = 0;
+
+ if (!atomic_read(&dev->online))
+ return -ENOTCONN;
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_length > req->length)
+ goto invalid;
+ ret = w_length;
+ req->complete = rmnet_command_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value)
+ goto invalid;
+ else {
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->qmi_resp_q)) {
+ INFO(cdev, "qmi resp empty "
+ " req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+ resp = list_first_entry(&dev->qmi_resp_q,
+ struct qmi_buf, list);
+ list_del(&resp->list);
+ spin_unlock(&dev->lock);
+ memcpy(req->buf, resp->buf, resp->len);
+ ret = resp->len;
+ spin_lock(&dev->lock);
+
+ if (list_empty(&dev->qmi_resp_pool))
+ schedule = 1;
+ list_add_tail(&resp->list, &dev->qmi_resp_pool);
+
+ if (schedule)
+ tasklet_schedule(&dev->smd_ctl.tx_tlet);
+ spin_unlock(&dev->lock);
+ req->complete = rmnet_txcommand_complete;
+ req->context = dev;
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ /* This is a workaround for RmNet and is borrowed from the
+ * CDC/ACM standard. The host driver will issue the above ACM
+ * standard request to the RmNet interface in the following
+ * scenario: Once the network adapter is disabled from device
+ * manager, the above request will be sent from the qcusbnet
+ * host driver, with DTR being '0'. Once network adapter is
+ * enabled from device manager (or during enumeration), the
+ * request will be sent with DTR being '1'.
+ */
+ if (w_value & ACM_CTRL_DTR)
+ ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
+ else
+ ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status;
+ struct usb_request *req;
+ struct list_head *pool = &dev->rx_idle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del(&req->list);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (status) {
+ ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+ list_add_tail(&req->list, pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_tx_tlet(unsigned long arg)
+{
+ struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int status;
+ int sz;
+ unsigned long flags;
+
+ while (1) {
+
+ sz = smd_cur_packet_size(dev->smd_data.ch);
+ if (sz == 0)
+ break;
+ if (smd_read_avail(dev->smd_data.ch) < sz)
+ break;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(&dev->tx_idle)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ DBG(cdev, "rmnet data Tx buffers full\n");
+ break;
+ }
+ req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ req->length = smd_read(dev->smd_data.ch, req->buf, sz);
+ status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+ if (status) {
+ ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, &dev->tx_idle);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->dpkts_from_modem++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+
+}
+
+static void rmnet_data_rx_tlet(unsigned long arg)
+{
+ struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (1) {
+ if (list_empty(&dev->rx_queue)) {
+ atomic_set(&dev->smd_data.rx_pkt, 0);
+ break;
+ }
+ req = list_first_entry(&dev->rx_queue,
+ struct usb_request, list);
+ if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+ atomic_set(&dev->smd_data.rx_pkt, req->actual);
+ DBG(cdev, "rmnet SMD data channel full\n");
+ break;
+ }
+
+ list_del(&req->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ret != req->actual) {
+ ERROR(cdev, "rmnet SMD data write failed\n");
+ break;
+ }
+ dev->dpkts_to_modem++;
+ list_add_tail(&req->list, &dev->rx_idle);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We have free rx data requests. */
+ rmnet_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+ int ret;
+
+ switch (status) {
+ case 0:
+ /* normal completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &dev->rx_idle);
+ spin_unlock(&dev->lock);
+ return;
+ default:
+ /* unexpected failure */
+ ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &dev->rx_idle);
+ spin_unlock(&dev->lock);
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ dev->dpkts_from_host++;
+ if (!atomic_read(&dev->smd_data.rx_pkt)) {
+ if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+ atomic_set(&dev->smd_data.rx_pkt, req->actual);
+ goto queue_req;
+ }
+ spin_unlock(&dev->lock);
+ ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+ /* This should never happen */
+ if (ret != req->actual)
+ ERROR(cdev, "rmnet data smd write failed\n");
+ /* Restart Rx */
+ spin_lock(&dev->lock);
+ dev->dpkts_to_modem++;
+ list_add_tail(&req->list, &dev->rx_idle);
+ spin_unlock(&dev->lock);
+ rmnet_start_rx(dev);
+ return;
+ }
+queue_req:
+ list_add_tail(&req->list, &dev->rx_queue);
+ spin_unlock(&dev->lock);
+}
+
+static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+ int schedule = 0;
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &dev->tx_idle);
+ spin_unlock(&dev->lock);
+ break;
+ default:
+ ERROR(cdev, "rmnet data tx ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->tx_idle))
+ schedule = 1;
+ list_add_tail(&req->list, &dev->tx_idle);
+ dev->dpkts_to_host++;
+ if (schedule)
+ tasklet_schedule(&dev->smd_data.tx_tlet);
+ spin_unlock(&dev->lock);
+ break;
+ }
+
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+ struct qmi_buf *qmi;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+ struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+ disconnect_work);
+
+ tasklet_kill(&dev->smd_ctl.rx_tlet);
+ tasklet_kill(&dev->smd_ctl.tx_tlet);
+ tasklet_kill(&dev->smd_data.rx_tlet);
+ tasklet_kill(&dev->smd_data.tx_tlet);
+
+ smd_close(dev->smd_ctl.ch);
+ dev->smd_ctl.flags = 0;
+
+ smd_close(dev->smd_data.ch);
+ dev->smd_data.flags = 0;
+
+ atomic_set(&dev->notify_count, 0);
+
+ list_for_each_safe(act, tmp, &dev->rx_queue) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ list_add_tail(&req->list, &dev->rx_idle);
+ }
+
+ list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+ qmi = list_entry(act, struct qmi_buf, list);
+ list_del(&qmi->list);
+ list_add_tail(&qmi->list, &dev->qmi_req_pool);
+ }
+
+ list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+ qmi = list_entry(act, struct qmi_buf, list);
+ list_del(&qmi->list);
+ list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+ }
+
+}
+
+/* SMD close may sleep
+ * schedule a work to close smd channels
+ */
+static void rmnet_disable(struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+ if (!atomic_read(&dev->online))
+ return;
+
+ atomic_set(&dev->online, 0);
+
+ usb_ep_fifo_flush(dev->epnotify);
+ usb_ep_disable(dev->epnotify);
+ usb_ep_fifo_flush(dev->epout);
+ usb_ep_disable(dev->epout);
+
+ usb_ep_fifo_flush(dev->epin);
+ usb_ep_disable(dev->epin);
+
+ /* cleanup work */
+ queue_work(dev->wq, &dev->disconnect_work);
+}
+
+static void rmnet_connect_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret = 0;
+
+ /* Control channel for QMI messages */
+ ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
+ &dev->smd_ctl, rmnet_smd_notify);
+ if (ret) {
+ ERROR(cdev, "Unable to open control smd channel\n");
+ return;
+ }
+ wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
+ &dev->smd_ctl.flags));
+
+ /* Data channel for network packets */
+ ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
+ &dev->smd_data, rmnet_smd_notify);
+ if (ret) {
+ ERROR(cdev, "Unable to open data smd channel\n");
+ smd_close(dev->smd_ctl.ch);
+ return;
+ }
+ wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
+ &dev->smd_data.flags));
+
+ atomic_set(&dev->online, 1);
+ /* Queue Rx data requests */
+ rmnet_start_rx(dev);
+}
+
+/* SMD open may sleep.
+ * Schedule a work to open smd channels and enable
+ * endpoints if smd channels are opened successfully.
+ */
+static int rmnet_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret = 0;
+
+ ret = usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+ &rmnet_hs_in_desc,
+ &rmnet_fs_in_desc));
+ if (ret) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ dev->epin->name, ret);
+ return ret;
+ }
+ ret = usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+ &rmnet_hs_out_desc,
+ &rmnet_fs_out_desc));
+ if (ret) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ dev->epout->name, ret);
+ usb_ep_disable(dev->epin);
+ return ret;
+ }
+
+ ret = usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+ &rmnet_hs_notify_desc,
+ &rmnet_fs_notify_desc));
+ if (ret) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ dev->epnotify->name, ret);
+ usb_ep_disable(dev->epin);
+ usb_ep_disable(dev->epout);
+ return ret;
+ }
+
+ queue_work(dev->wq, &dev->connect_work);
+ return 0;
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+ struct qmi_buf *qmi;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+
+ dev->dpkts_to_host = 0;
+ dev->dpkts_from_modem = 0;
+ dev->dpkts_from_host = 0;
+ dev->dpkts_to_modem = 0;
+
+ dev->cpkts_to_host = 0;
+ dev->cpkts_from_modem = 0;
+ dev->cpkts_from_host = 0;
+ dev->cpkts_to_modem = 0;
+ /* free all usb requests in tx pool */
+ list_for_each_safe(act, tmp, &dev->tx_idle) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epout, req);
+ }
+
+ /* free all usb requests in rx pool */
+ list_for_each_safe(act, tmp, &dev->rx_idle) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epin, req);
+ }
+
+ /* free all buffers in qmi request pool */
+ list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
+ qmi = list_entry(act, struct qmi_buf, list);
+ list_del(&qmi->list);
+ rmnet_free_qmi(qmi);
+ }
+
+ /* free all buffers in qmi request pool */
+ list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
+ qmi = list_entry(act, struct qmi_buf, list);
+ list_del(&qmi->list);
+ rmnet_free_qmi(qmi);
+ }
+
+ rmnet_free_req(dev->epnotify, dev->notify_req);
+}
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ int i, id, ret;
+ struct qmi_buf *qmi;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ dev->cdev = cdev;
+
+ /* allocate interface ID */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->ifc_id = id;
+ rmnet_interface_desc.bInterfaceNumber = id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+ if (!ep)
+ return -ENODEV;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epin = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+ if (!ep)
+ return -ENODEV;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epout = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+ if (!ep)
+ return -ENODEV;
+ ep->driver_data = cdev; /* clain endpoint */
+ dev->epnotify = ep;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rmnet_hs_in_desc.bEndpointAddress =
+ rmnet_fs_in_desc.bEndpointAddress;
+ rmnet_hs_out_desc.bEndpointAddress =
+ rmnet_fs_out_desc.bEndpointAddress;
+ rmnet_hs_notify_desc.bEndpointAddress =
+ rmnet_fs_notify_desc.bEndpointAddress;
+
+ }
+
+ /* allocate notification */
+ dev->notify_req = rmnet_alloc_req(dev->epnotify, RMNET_MAX_NOTIFY_SIZE,
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req))
+ return PTR_ERR(dev->notify_req);
+
+ dev->notify_req->complete = rmnet_notify_complete;
+ dev->notify_req->context = dev;
+ dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
+
+ /* Allocate the qmi request and response buffers */
+ for (i = 0; i < QMI_REQ_MAX; i++) {
+ qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
+ if (IS_ERR(qmi)) {
+ ret = PTR_ERR(qmi);
+ goto free_buf;
+ }
+ list_add_tail(&qmi->list, &dev->qmi_req_pool);
+ }
+
+ for (i = 0; i < QMI_RESP_MAX; i++) {
+ qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
+ if (IS_ERR(qmi)) {
+ ret = PTR_ERR(qmi);
+ goto free_buf;
+ }
+ list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+ }
+
+ /* Allocate bulk in/out requests for data transfer */
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto free_buf;
+ }
+ req->length = TXN_MAX;
+ req->context = dev;
+ req->complete = rmnet_complete_epout;
+ list_add_tail(&req->list, &dev->rx_idle);
+ }
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epin, TX_REQ_SIZE, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto free_buf;
+ }
+ req->context = dev;
+ req->complete = rmnet_complete_epin;
+ list_add_tail(&req->list, &dev->tx_idle);
+ }
+
+ return 0;
+
+free_buf:
+ rmnet_free_buf(dev);
+ dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+ return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_dev *dev = file->private_data;
+ struct rmnet_smd_info smd_ctl_info = dev->smd_ctl;
+ struct rmnet_smd_info smd_data_info = dev->smd_data;
+ char *buf;
+ unsigned long flags;
+ int ret;
+
+ buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = scnprintf(buf, 512,
+ "smd_control_ch_opened: %lu\n"
+ "smd_data_ch_opened: %lu\n"
+ "usb online : %d\n"
+ "dpkts_from_modem: %lu\n"
+ "dpkts_to_host: %lu\n"
+ "pending_dpkts_to_host: %lu\n"
+ "dpkts_from_host: %lu\n"
+ "dpkts_to_modem: %lu\n"
+ "pending_dpkts_to_modem: %lu\n"
+ "cpkts_from_modem: %lu\n"
+ "cpkts_to_host: %lu\n"
+ "pending_cpkts_to_host: %lu\n"
+ "cpkts_from_host: %lu\n"
+ "cpkts_to_modem: %lu\n"
+ "pending_cpkts_to_modem: %lu\n"
+ "smd_read_avail_ctrl: %d\n"
+ "smd_write_avail_ctrl: %d\n"
+ "smd_read_avail_data: %d\n"
+ "smd_write_avail_data: %d\n",
+ smd_ctl_info.flags, smd_data_info.flags,
+ atomic_read(&dev->online),
+ dev->dpkts_from_modem, dev->dpkts_to_host,
+ (dev->dpkts_from_modem - dev->dpkts_to_host),
+ dev->dpkts_from_host, dev->dpkts_to_modem,
+ (dev->dpkts_from_host - dev->dpkts_to_modem),
+ dev->cpkts_from_modem, dev->cpkts_to_host,
+ (dev->cpkts_from_modem - dev->cpkts_to_host),
+ dev->cpkts_from_host, dev->cpkts_to_modem,
+ (dev->cpkts_from_host - dev->cpkts_to_modem),
+ smd_read_avail(dev->smd_ctl.ch),
+ smd_write_avail(dev->smd_ctl.ch),
+ smd_read_avail(dev->smd_data.ch),
+ smd_write_avail(dev->smd_data.ch));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_dev *dev = file->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ dev->dpkts_to_host = 0;
+ dev->dpkts_from_modem = 0;
+ dev->dpkts_from_host = 0;
+ dev->dpkts_to_modem = 0;
+
+ dev->cpkts_to_host = 0;
+ dev->cpkts_from_modem = 0;
+ dev->cpkts_from_host = 0;
+ dev->cpkts_to_modem = 0;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+const struct file_operations rmnet_debug_stats_ops = {
+ .open = debug_open,
+ .read = debug_read_stats,
+ .write = debug_reset_stats,
+};
+
+struct dentry *dent;
+struct dentry *dent_status;
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+
+ dent = debugfs_create_dir("usb_rmnet", 0);
+ if (IS_ERR(dent))
+ return;
+
+ dent_status = debugfs_create_file("status", 0444, dent, dev,
+ &rmnet_debug_stats_ops);
+
+ if (!dent_status) {
+ debugfs_remove(dent);
+ dent = NULL;
+ return;
+ }
+
+ return;
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev) {}
+#endif
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+ tasklet_kill(&dev->smd_ctl.rx_tlet);
+ tasklet_kill(&dev->smd_ctl.tx_tlet);
+ tasklet_kill(&dev->smd_data.rx_tlet);
+ tasklet_kill(&dev->smd_data.tx_tlet);
+
+ flush_workqueue(dev->wq);
+ rmnet_free_buf(dev);
+ dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+
+ destroy_workqueue(dev->wq);
+ debugfs_remove_recursive(dent);
+ kfree(dev);
+
+}
+
+int rmnet_function_add(struct usb_configuration *c)
+{
+ struct rmnet_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->wq = create_singlethread_workqueue("k_rmnet_work");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto free_dev;
+ }
+
+ spin_lock_init(&dev->lock);
+ atomic_set(&dev->notify_count, 0);
+ atomic_set(&dev->online, 0);
+ atomic_set(&dev->smd_ctl.rx_pkt, 0);
+ atomic_set(&dev->smd_data.rx_pkt, 0);
+
+ INIT_WORK(&dev->connect_work, rmnet_connect_work);
+ INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+
+ tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
+ (unsigned long) dev);
+ tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
+ (unsigned long) dev);
+ tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
+ (unsigned long) dev);
+ tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
+ (unsigned long) dev);
+
+ init_waitqueue_head(&dev->smd_ctl.wait);
+ init_waitqueue_head(&dev->smd_data.wait);
+
+ INIT_LIST_HEAD(&dev->qmi_req_pool);
+ INIT_LIST_HEAD(&dev->qmi_req_q);
+ INIT_LIST_HEAD(&dev->qmi_resp_pool);
+ INIT_LIST_HEAD(&dev->qmi_resp_q);
+ INIT_LIST_HEAD(&dev->rx_idle);
+ INIT_LIST_HEAD(&dev->rx_queue);
+ INIT_LIST_HEAD(&dev->tx_idle);
+
+ dev->function.name = "rmnet";
+ dev->function.strings = rmnet_strings;
+ dev->function.descriptors = rmnet_fs_function;
+ dev->function.hs_descriptors = rmnet_hs_function;
+ dev->function.bind = rmnet_bind;
+ dev->function.unbind = rmnet_unbind;
+ dev->function.setup = rmnet_setup;
+ dev->function.set_alt = rmnet_set_alt;
+ dev->function.disable = rmnet_disable;
+
+ ret = usb_add_function(c, &dev->function);
+ if (ret)
+ goto free_wq;
+
+ usb_debugfs_init(dev);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(dev->wq);
+free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET
+static struct android_usb_function rmnet_function = {
+ .name = "rmnet",
+ .bind_config = rmnet_function_add,
+};
+
+static int __init init(void)
+{
+ android_register_function(&rmnet_function);
+ return 0;
+}
+module_init(init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET */
diff --git a/drivers/usb/gadget/f_rmnet_smd_sdio.c b/drivers/usb/gadget/f_rmnet_smd_sdio.c
new file mode 100644
index 0000000..e99716b
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_smd_sdio.c
@@ -0,0 +1,1995 @@
+/*
+ * f_rmnet_smd_sdio.c -- RmNet SMD & SDIO function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <asm/ioctls.h>
+
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/msm_smd.h>
+#include <mach/sdio_cmux.h>
+#include <mach/sdio_dmux.h>
+
+static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL;
+module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
+
+static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL;
+module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
+
+static char *rmnet_smd_data_ch = CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL;
+module_param(rmnet_smd_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_smd_data_ch, "RmNet data SMD channel");
+
+#define ACM_CTRL_DTR (1 << 0)
+
+#define SDIO_MUX_HDR 8
+#define RMNET_SDIO_NOTIFY_INTERVAL 5
+#define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification)
+
+#define RMNET_SDIO_RX_REQ_MAX 16
+#define RMNET_SDIO_RX_REQ_SIZE 2048
+#define RMNET_SDIO_TX_REQ_MAX 100
+
+#define RMNET_SDIO_TX_PKT_DROP_THRESHOLD 1000
+#define RMNET_SDIO_RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
+#define RMNET_SDIO_RX_PKT_FLOW_CTRL_DISABLE 500
+
+static uint32_t sdio_tx_pkt_drop_thld = RMNET_SDIO_TX_PKT_DROP_THRESHOLD;
+module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+static uint32_t sdio_rx_fctrl_en_thld =
+ RMNET_SDIO_RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static uint32_t sdio_rx_fctrl_dis_thld = RMNET_SDIO_RX_PKT_FLOW_CTRL_DISABLE;
+module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+
+#define RMNET_SMD_RX_REQ_MAX 8
+#define RMNET_SMD_RX_REQ_SIZE 2048
+#define RMNET_SMD_TX_REQ_MAX 8
+#define RMNET_SMD_TX_REQ_SIZE 2048
+#define RMNET_SMD_TXN_MAX 2048
+
+struct rmnet_ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+enum usb_rmnet_xport_type {
+ USB_RMNET_XPORT_UNDEFINED,
+ USB_RMNET_XPORT_SDIO,
+ USB_RMNET_XPORT_SMD,
+};
+
+struct rmnet_ctrl_dev {
+ struct list_head tx_q;
+ wait_queue_head_t tx_wait_q;
+ unsigned long tx_len;
+
+ struct list_head rx_q;
+ unsigned long rx_len;
+
+ unsigned long cbits_to_modem;
+
+ unsigned opened;
+};
+
+struct rmnet_sdio_dev {
+ /* Tx/Rx lists */
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_queue;
+
+
+
+ struct work_struct data_rx_work;
+
+ struct delayed_work open_work;
+ atomic_t sdio_open;
+
+ unsigned int dpkts_pending_atdmux;
+};
+
+/* Data SMD channel */
+struct rmnet_smd_info {
+ struct smd_channel *ch;
+ struct tasklet_struct tx_tlet;
+ struct tasklet_struct rx_tlet;
+#define CH_OPENED 0
+ unsigned long flags;
+ /* pending rx packet length */
+ atomic_t rx_pkt;
+ /* wait for smd open event*/
+ wait_queue_head_t wait;
+};
+
+struct rmnet_smd_dev {
+ /* Tx/Rx lists */
+ struct list_head tx_idle;
+ struct list_head rx_idle;
+ struct list_head rx_queue;
+
+ struct rmnet_smd_info smd_data;
+};
+
+struct rmnet_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+
+ struct usb_ep *epout;
+ struct usb_ep *epin;
+ struct usb_ep *epnotify;
+ struct usb_request *notify_req;
+
+ struct rmnet_smd_dev smd_dev;
+ struct rmnet_sdio_dev sdio_dev;
+ struct rmnet_ctrl_dev ctrl_dev;
+
+ u8 ifc_id;
+ enum usb_rmnet_xport_type xport;
+ spinlock_t lock;
+ atomic_t online;
+ atomic_t notify_count;
+ struct workqueue_struct *wq;
+ struct work_struct disconnect_work;
+
+ /* pkt counters */
+ unsigned long dpkts_tomsm;
+ unsigned long dpkts_tomdm;
+ unsigned long dpkts_tolaptop;
+ unsigned long tx_drp_cnt;
+ unsigned long cpkts_tolaptop;
+ unsigned long cpkts_tomdm;
+ unsigned long cpkts_drp_cnt;
+};
+
+static struct rmnet_dev *_dev;
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+ .bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+ .bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+static char *xport_to_str(enum usb_rmnet_xport_type t)
+{
+ switch (t) {
+ case USB_RMNET_XPORT_SDIO:
+ return "SDIO";
+ case USB_RMNET_XPORT_SMD:
+ return "SMD";
+ default:
+ return "UNDEFINED";
+ }
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *cpkt;
+
+ cpkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!cpkt)
+ return 0;
+
+ cpkt->buf = kzalloc(len, flags);
+ if (!cpkt->buf) {
+ kfree(cpkt);
+ return 0;
+ }
+
+ cpkt->len = len;
+
+ return cpkt;
+
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *cpkt)
+{
+ kfree(cpkt->buf);
+ kfree(cpkt);
+}
+
+/*
+ * Allocate a usb_request and its buffer. Returns a pointer to the
+ * usb_request or a pointer with an error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+ if (len && req != NULL) {
+ req->length = len;
+ req->buf = kmalloc(len, kmalloc_flags);
+ if (req->buf == NULL) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+
+ return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static int rmnet_sdio_rx_submit(struct rmnet_dev *dev, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct sk_buff *skb;
+ int retval;
+
+ skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
+ if (skb == NULL)
+ return -ENOMEM;
+ skb_reserve(skb, SDIO_MUX_HDR);
+
+ req->buf = skb->data;
+ req->length = RMNET_SDIO_RX_REQ_SIZE;
+ req->context = skb;
+
+ retval = usb_ep_queue(dev->epout, req, gfp_flags);
+ if (retval)
+ dev_kfree_skb_any(skb);
+
+ return retval;
+}
+
+static void rmnet_sdio_start_rx(struct rmnet_dev *dev)
+{
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status;
+ struct usb_request *req;
+ struct list_head *pool;
+ unsigned long flags;
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: USB not connected\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ pool = &sdio_dev->rx_idle;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ status = rmnet_sdio_rx_submit(dev, req, GFP_KERNEL);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (status) {
+ ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+ list_add_tail(&req->list, &sdio_dev->rx_idle);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_sdio_start_tx(struct rmnet_dev *dev)
+{
+ unsigned long flags;
+ int status;
+ struct sk_buff *skb;
+ struct usb_request *req;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+
+ if (!atomic_read(&dev->online))
+ return;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&sdio_dev->tx_idle)) {
+ skb = __skb_dequeue(&sdio_dev->tx_skb_queue);
+ if (!skb) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ req = list_first_entry(&sdio_dev->tx_idle,
+ struct usb_request, list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+
+ list_del(&req->list);
+ spin_unlock(&dev->lock);
+ status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+ spin_lock(&dev->lock);
+ if (status) {
+ /* USB still online, queue requests back */
+ if (atomic_read(&dev->online)) {
+ ERROR(cdev, "rmnet tx data enqueue err %d\n",
+ status);
+ list_add_tail(&req->list, &sdio_dev->tx_idle);
+ __skb_queue_head(&sdio_dev->tx_skb_queue, skb);
+ } else {
+ req->buf = 0;
+ rmnet_free_req(dev->epin, req);
+ dev_kfree_skb_any(skb);
+ }
+ break;
+ }
+ dev->dpkts_tolaptop++;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb)
+{
+ struct rmnet_dev *dev = priv;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+ if (!atomic_read(&dev->online)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ spin_lock_irqsave(&dev->lock, flags);
+ if (sdio_dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) {
+ pr_err_ratelimited("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
+ __func__, dev->tx_drp_cnt);
+ dev->tx_drp_cnt++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ __skb_queue_tail(&sdio_dev->tx_skb_queue, skb);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb)
+{
+ struct rmnet_dev *dev = priv;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+
+ if (!skb)
+ return;
+
+ dev_kfree_skb_any(skb);
+ /* this function is called from
+ * sdio mux from spin_lock_irqsave
+ */
+ spin_lock(&dev->lock);
+ sdio_dev->dpkts_pending_atdmux--;
+
+ if (sdio_dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) {
+ spin_unlock(&dev->lock);
+ return;
+ }
+ spin_unlock(&dev->lock);
+
+ rmnet_sdio_start_rx(dev);
+}
+
+static void rmnet_sdio_data_rx_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+ sdio_dev.data_rx_work);
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ struct sk_buff *skb;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue))) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ret < 0) {
+ ERROR(cdev, "rmnet SDIO data write failed\n");
+ dev_kfree_skb_any(skb);
+ } else {
+ dev->dpkts_tomdm++;
+ sdio_dev->dpkts_pending_atdmux++;
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void
+rmnet_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = ep->driver_data;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ rmnet_free_req(ep, req);
+ return;
+ }
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ rmnet_free_req(ep, req);
+ return;
+ default:
+ /* unexpected failure */
+ ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ spin_lock(&dev->lock);
+ if (queue) {
+ __skb_queue_tail(&sdio_dev->rx_skb_queue, skb);
+ queue_work(dev->wq, &sdio_dev->data_rx_work);
+ }
+
+ if (sdio_dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) {
+ list_add_tail(&req->list, &sdio_dev->rx_idle);
+ spin_unlock(&dev->lock);
+ return;
+ }
+ spin_unlock(&dev->lock);
+
+ status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
+ if (status) {
+ ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+ list_add_tail(&req->list, &sdio_dev->rx_idle);
+ }
+}
+
+static void
+rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = ep->driver_data;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct sk_buff *skb = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+
+ if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ rmnet_free_req(ep, req);
+ return;
+ }
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ break;
+ default:
+ ERROR(cdev, "rmnet data tx ep error %d\n", status);
+ break;
+ }
+
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &sdio_dev->tx_idle);
+ spin_unlock(&dev->lock);
+ dev_kfree_skb_any(skb);
+
+ rmnet_sdio_start_tx(dev);
+}
+
+static int rmnet_sdio_enable(struct rmnet_dev *dev)
+{
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ int i;
+ struct usb_request *req;
+
+ /*
+ * If the memory allocation fails, all the allocated
+ * requests will be freed upon cable disconnect.
+ */
+ for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epout, 0, GFP_KERNEL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->complete = rmnet_sdio_complete_epout;
+ list_add_tail(&req->list, &sdio_dev->rx_idle);
+ }
+ for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epin, 0, GFP_KERNEL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->complete = rmnet_sdio_complete_epin;
+ list_add_tail(&req->list, &sdio_dev->tx_idle);
+ }
+
+ rmnet_sdio_start_rx(dev);
+ return 0;
+}
+
+static void rmnet_smd_start_rx(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ int status;
+ struct usb_request *req;
+ struct list_head *pool = &smd_dev->rx_idle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del(&req->list);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (status) {
+ ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+ list_add_tail(&req->list, pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_smd_data_tx_tlet(unsigned long arg)
+{
+ struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int status;
+ int sz;
+ unsigned long flags;
+
+ while (1) {
+ if (!atomic_read(&dev->online))
+ break;
+ sz = smd_cur_packet_size(smd_dev->smd_data.ch);
+ if (sz == 0)
+ break;
+ if (smd_read_avail(smd_dev->smd_data.ch) < sz)
+ break;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(&smd_dev->tx_idle)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ DBG(cdev, "rmnet data Tx buffers full\n");
+ break;
+ }
+ req = list_first_entry(&smd_dev->tx_idle,
+ struct usb_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ req->length = smd_read(smd_dev->smd_data.ch, req->buf, sz);
+ status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+ if (status) {
+ ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, &smd_dev->tx_idle);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ dev->dpkts_tolaptop++;
+ }
+
+}
+
+static void rmnet_smd_data_rx_tlet(unsigned long arg)
+{
+ struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (1) {
+ if (!atomic_read(&dev->online))
+ break;
+ if (list_empty(&smd_dev->rx_queue)) {
+ atomic_set(&smd_dev->smd_data.rx_pkt, 0);
+ break;
+ }
+ req = list_first_entry(&smd_dev->rx_queue,
+ struct usb_request, list);
+ if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
+ atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
+ DBG(cdev, "rmnet SMD data channel full\n");
+ break;
+ }
+
+ list_del(&req->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ret != req->actual) {
+ ERROR(cdev, "rmnet SMD data write failed\n");
+ break;
+ }
+ dev->dpkts_tomsm++;
+ list_add_tail(&req->list, &smd_dev->rx_idle);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We have free rx data requests. */
+ rmnet_smd_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void
+rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+ int ret;
+
+ if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+ rmnet_free_req(ep, req);
+ return;
+ }
+
+ switch (status) {
+ case 0:
+ /* normal completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &smd_dev->rx_idle);
+ spin_unlock(&dev->lock);
+ return;
+ default:
+ /* unexpected failure */
+ ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &smd_dev->rx_idle);
+ spin_unlock(&dev->lock);
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ if (!atomic_read(&smd_dev->smd_data.rx_pkt)) {
+ if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
+ atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
+ goto queue_req;
+ }
+ spin_unlock(&dev->lock);
+ ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
+ /* This should never happen */
+ if (ret != req->actual)
+ ERROR(cdev, "rmnet data smd write failed\n");
+ /* Restart Rx */
+ dev->dpkts_tomsm++;
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &smd_dev->rx_idle);
+ spin_unlock(&dev->lock);
+ rmnet_smd_start_rx(dev);
+ return;
+ }
+queue_req:
+ list_add_tail(&req->list, &smd_dev->rx_queue);
+ spin_unlock(&dev->lock);
+}
+
+static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+ int schedule = 0;
+
+ if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+ rmnet_free_req(ep, req);
+ return;
+ }
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock(&dev->lock);
+ list_add_tail(&req->list, &smd_dev->tx_idle);
+ spin_unlock(&dev->lock);
+ break;
+ default:
+ ERROR(cdev, "rmnet data tx ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ spin_lock(&dev->lock);
+ if (list_empty(&smd_dev->tx_idle))
+ schedule = 1;
+ list_add_tail(&req->list, &smd_dev->tx_idle);
+
+ if (schedule)
+ tasklet_schedule(&smd_dev->smd_data.tx_tlet);
+ spin_unlock(&dev->lock);
+ break;
+ }
+
+}
+
+
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+ struct rmnet_dev *dev = priv;
+ struct rmnet_smd_info *smd_info = &dev->smd_dev.smd_data;
+ int len = atomic_read(&smd_info->rx_pkt);
+
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ if (!atomic_read(&dev->online))
+ break;
+ if (len && (smd_write_avail(smd_info->ch) >= len))
+ tasklet_schedule(&smd_info->rx_tlet);
+
+ if (smd_read_avail(smd_info->ch))
+ tasklet_schedule(&smd_info->tx_tlet);
+
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ /* usb endpoints are not enabled untill smd channels
+ * are opened. wake up worker thread to continue
+ * connection processing
+ */
+ set_bit(CH_OPENED, &smd_info->flags);
+ wake_up(&smd_info->wait);
+ break;
+ case SMD_EVENT_CLOSE:
+ /* We will never come here.
+ * reset flags after closing smd channel
+ * */
+ clear_bit(CH_OPENED, &smd_info->flags);
+ break;
+ }
+}
+
+static int rmnet_smd_enable(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ int i, ret;
+ struct usb_request *req;
+
+ if (test_bit(CH_OPENED, &smd_dev->smd_data.flags))
+ goto smd_alloc_req;
+
+ ret = smd_open(rmnet_smd_data_ch, &smd_dev->smd_data.ch,
+ dev, rmnet_smd_notify);
+ if (ret) {
+ ERROR(cdev, "Unable to open data smd channel\n");
+ return ret;
+ }
+
+ wait_event(smd_dev->smd_data.wait, test_bit(CH_OPENED,
+ &smd_dev->smd_data.flags));
+
+ /* Allocate bulk in/out requests for data transfer.
+ * If the memory allocation fails, all the allocated
+ * requests will be freed upon cable disconnect.
+ */
+smd_alloc_req:
+ for (i = 0; i < RMNET_SMD_RX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epout, RMNET_SMD_RX_REQ_SIZE,
+ GFP_KERNEL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->length = RMNET_SMD_TXN_MAX;
+ req->context = dev;
+ req->complete = rmnet_smd_complete_epout;
+ list_add_tail(&req->list, &smd_dev->rx_idle);
+ }
+
+ for (i = 0; i < RMNET_SMD_TX_REQ_MAX; i++) {
+ req = rmnet_alloc_req(dev->epin, RMNET_SMD_TX_REQ_SIZE,
+ GFP_KERNEL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->context = dev;
+ req->complete = rmnet_smd_complete_epin;
+ list_add_tail(&req->list, &smd_dev->tx_idle);
+ }
+
+ rmnet_smd_start_rx(dev);
+ return 0;
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int status = req->status;
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&dev->notify_count, 0);
+ break;
+ default:
+ ERROR(cdev, "rmnet notifyep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+
+ if (atomic_dec_and_test(&dev->notify_count))
+ break;
+
+ status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&dev->notify_count);
+ ERROR(cdev, "rmnet notify ep enq error %d\n", status);
+ }
+ break;
+ }
+}
+
+static void ctrl_response_available(struct rmnet_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event = req->buf;
+ int status;
+
+ /* Response will be sent later */
+ if (atomic_inc_return(&dev->notify_count) != 1)
+ return;
+
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ atomic_dec(&dev->notify_count);
+ ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+ }
+}
+
+#define MAX_CTRL_PKT_SIZE 4096
+
+static void rmnet_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ switch (req->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case 0:
+ return;
+ default:
+ INFO(cdev, "rmnet %s response error %d, %d/%d\n",
+ ep->name, req->status,
+ req->actual, req->length);
+ }
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct rmnet_dev *dev = req->context;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ int len = req->actual;
+
+ if (req->status < 0) {
+ ERROR(cdev, "rmnet command error %d\n", req->status);
+ return;
+ }
+
+ cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (!cpkt) {
+ ERROR(cdev, "unable to allocate memory for ctrl req\n");
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ if (!ctrl_dev->opened) {
+ spin_unlock(&dev->lock);
+ kfree(cpkt);
+ dev->cpkts_drp_cnt++;
+ pr_err_ratelimited(
+ "%s: ctrl pkts dropped: cpkts_drp_cnt: %lu\n",
+ __func__, dev->cpkts_drp_cnt);
+ return;
+ }
+
+ memcpy(cpkt->buf, req->buf, len);
+
+ list_add_tail(&cpkt->list, &ctrl_dev->tx_q);
+ ctrl_dev->tx_len++;
+ spin_unlock(&dev->lock);
+
+ /* wakeup read thread */
+ wake_up(&ctrl_dev->tx_wait_q);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int ret = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ struct rmnet_ctrl_pkt *cpkt;
+
+ if (!atomic_read(&dev->online))
+ return -ENOTCONN;
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_length > req->length)
+ goto invalid;
+ ret = w_length;
+ req->complete = rmnet_command_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value)
+ goto invalid;
+ else {
+ unsigned len;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&ctrl_dev->rx_q)) {
+ DBG(cdev, "ctrl resp queue empty"
+ " %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ spin_unlock(&dev->lock);
+ goto invalid;
+
+ }
+ cpkt = list_first_entry(&ctrl_dev->rx_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ ctrl_dev->rx_len--;
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+ req->complete = rmnet_response_complete;
+ req->context = dev;
+ rmnet_free_ctrl_pkt(cpkt);
+
+ dev->cpkts_tolaptop++;
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ /* This is a workaround for RmNet and is borrowed from the
+ * CDC/ACM standard. The host driver will issue the above ACM
+ * standard request to the RmNet interface in the following
+ * scenario: Once the network adapter is disabled from device
+ * manager, the above request will be sent from the qcusbnet
+ * host driver, with DTR being '0'. Once network adapter is
+ * enabled from device manager (or during enumeration), the
+ * request will be sent with DTR being '1'.
+ */
+ if (w_value & ACM_CTRL_DTR)
+ ctrl_dev->cbits_to_modem |= TIOCM_DTR;
+ else
+ ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
+
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ struct usb_request *req;
+ struct list_head *pool;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ /* free all usb requests in SDIO tx pool */
+ pool = &sdio_dev->tx_idle;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ req->buf = NULL;
+ rmnet_free_req(dev->epout, req);
+ }
+
+ pool = &sdio_dev->rx_idle;
+ /* free all usb requests in SDIO rx pool */
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ req->buf = NULL;
+ rmnet_free_req(dev->epin, req);
+ }
+
+ while ((skb = __skb_dequeue(&sdio_dev->tx_skb_queue)))
+ dev_kfree_skb_any(skb);
+
+ while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue)))
+ dev_kfree_skb_any(skb);
+
+ /* free all usb requests in SMD tx pool */
+ pool = &smd_dev->tx_idle;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epout, req);
+ }
+
+ pool = &smd_dev->rx_idle;
+ /* free all usb requests in SMD rx pool */
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epin, req);
+ }
+
+ /* free all usb requests in SMD rx queue */
+ pool = &smd_dev->rx_queue;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epin, req);
+ }
+
+ pool = &ctrl_dev->tx_q;
+ while (!list_empty(pool)) {
+ cpkt = list_first_entry(pool, struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ ctrl_dev->tx_len--;
+ }
+
+ pool = &ctrl_dev->rx_q;
+ while (!list_empty(pool)) {
+ cpkt = list_first_entry(pool, struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ ctrl_dev->rx_len--;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+ disconnect_work);
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ if (dev->xport == USB_RMNET_XPORT_SMD) {
+ tasklet_kill(&smd_dev->smd_data.rx_tlet);
+ tasklet_kill(&smd_dev->smd_data.tx_tlet);
+ }
+
+ rmnet_free_buf(dev);
+ dev->xport = 0;
+
+ /* wakeup read thread */
+ wake_up(&ctrl_dev->tx_wait_q);
+}
+
+static void rmnet_suspend(struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ if (!atomic_read(&dev->online))
+ return;
+ /* This is a workaround for Windows Host bug during suspend.
+ * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
+ * Since it is not being done, Hence exclusively dropping the DTR
+ * from function driver suspend.
+ */
+ ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
+}
+
+static void rmnet_disable(struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ if (!atomic_read(&dev->online))
+ return;
+
+ atomic_set(&dev->online, 0);
+
+ usb_ep_fifo_flush(dev->epnotify);
+ usb_ep_disable(dev->epnotify);
+ rmnet_free_req(dev->epnotify, dev->notify_req);
+
+ usb_ep_fifo_flush(dev->epout);
+ usb_ep_disable(dev->epout);
+
+ usb_ep_fifo_flush(dev->epin);
+ usb_ep_disable(dev->epin);
+
+ /* cleanup work */
+ ctrl_dev->cbits_to_modem = 0;
+ queue_work(dev->wq, &dev->disconnect_work);
+}
+
+#define SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000)
+#define SDIO_OPEN_MAX_RETRY 90
+static void rmnet_open_sdio_work(struct work_struct *w)
+{
+ struct rmnet_dev *dev =
+ container_of(w, struct rmnet_dev, sdio_dev.open_work.work);
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret;
+ static int retry_cnt;
+
+ /* Data channel for network packets */
+ ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
+ rmnet_sdio_data_receive_cb,
+ rmnet_sdio_data_write_done);
+ if (ret) {
+ if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
+ ERROR(cdev, "Unable to open SDIO DATA channel\n");
+ return;
+ }
+ retry_cnt++;
+ queue_delayed_work(dev->wq, &sdio_dev->open_work,
+ SDIO_OPEN_RETRY_DELAY);
+ return;
+ }
+
+
+ atomic_set(&sdio_dev->sdio_open, 1);
+ pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
+ __func__, retry_cnt);
+ retry_cnt = 0;
+ return;
+}
+
+static int rmnet_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ /* allocate notification */
+ dev->notify_req = rmnet_alloc_req(dev->epnotify,
+ RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
+
+ if (IS_ERR(dev->notify_req))
+ return PTR_ERR(dev->notify_req);
+
+ dev->notify_req->complete = rmnet_notify_complete;
+ dev->notify_req->context = dev;
+ dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
+ usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+ &rmnet_hs_notify_desc,
+ &rmnet_fs_notify_desc));
+
+ dev->epin->driver_data = dev;
+ usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+ &rmnet_hs_in_desc,
+ &rmnet_fs_in_desc));
+ dev->epout->driver_data = dev;
+ usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+ &rmnet_hs_out_desc,
+ &rmnet_fs_out_desc));
+
+ dev->dpkts_tolaptop = 0;
+ dev->cpkts_tolaptop = 0;
+ dev->cpkts_tomdm = 0;
+ dev->dpkts_tomdm = 0;
+ dev->dpkts_tomsm = 0;
+ dev->tx_drp_cnt = 0;
+ dev->cpkts_drp_cnt = 0;
+ sdio_dev->dpkts_pending_atdmux = 0;
+ atomic_set(&dev->online, 1);
+
+ return 0;
+}
+
+static ssize_t transport_store(
+ struct device *device, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct usb_function *f = dev_get_drvdata(device);
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ int value;
+ enum usb_rmnet_xport_type given_xport;
+ enum usb_rmnet_xport_type t;
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct list_head *pool;
+ struct sk_buff_head *skb_pool;
+ struct sk_buff *skb;
+ struct usb_request *req;
+ unsigned long flags;
+
+ if (!atomic_read(&dev->online)) {
+ pr_err("%s: usb cable is not connected\n", __func__);
+ return -EINVAL;
+ }
+
+ sscanf(buf, "%d", &value);
+ if (value)
+ given_xport = USB_RMNET_XPORT_SDIO;
+ else
+ given_xport = USB_RMNET_XPORT_SMD;
+
+ if (given_xport == dev->xport) {
+ pr_err("%s: given_xport:%s cur_xport:%s doing nothing\n",
+ __func__, xport_to_str(given_xport),
+ xport_to_str(dev->xport));
+ return 0;
+ }
+
+ pr_debug("usb_rmnet: TransportRequested: %s\n",
+ xport_to_str(given_xport));
+
+ /* prevent any other pkts to/from usb */
+ t = dev->xport;
+ dev->xport = USB_RMNET_XPORT_UNDEFINED;
+ if (t != USB_RMNET_XPORT_UNDEFINED) {
+ usb_ep_fifo_flush(dev->epin);
+ usb_ep_fifo_flush(dev->epout);
+ }
+
+ switch (t) {
+ case USB_RMNET_XPORT_SDIO:
+ spin_lock_irqsave(&dev->lock, flags);
+ /* tx_idle */
+
+ sdio_dev->dpkts_pending_atdmux = 0;
+
+ pool = &sdio_dev->tx_idle;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ req->buf = NULL;
+ rmnet_free_req(dev->epout, req);
+ }
+
+ /* rx_idle */
+ pool = &sdio_dev->rx_idle;
+ /* free all usb requests in SDIO rx pool */
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ req->buf = NULL;
+ rmnet_free_req(dev->epin, req);
+ }
+
+ /* tx_skb_queue */
+ skb_pool = &sdio_dev->tx_skb_queue;
+ while ((skb = __skb_dequeue(skb_pool)))
+ dev_kfree_skb_any(skb);
+ /* rx_skb_queue */
+ skb_pool = &sdio_dev->rx_skb_queue;
+ while ((skb = __skb_dequeue(skb_pool)))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ case USB_RMNET_XPORT_SMD:
+ /* close smd xport */
+ tasklet_kill(&smd_dev->smd_data.rx_tlet);
+ tasklet_kill(&smd_dev->smd_data.tx_tlet);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ /* free all usb requests in SMD tx pool */
+ pool = &smd_dev->tx_idle;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epout, req);
+ }
+
+ pool = &smd_dev->rx_idle;
+ /* free all usb requests in SMD rx pool */
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epin, req);
+ }
+
+ /* free all usb requests in SMD rx queue */
+ pool = &smd_dev->rx_queue;
+ while (!list_empty(pool)) {
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del(&req->list);
+ rmnet_free_req(dev->epin, req);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ default:
+ pr_debug("%s: undefined xport, do nothing\n", __func__);
+ }
+
+ dev->xport = given_xport;
+
+ switch (dev->xport) {
+ case USB_RMNET_XPORT_SDIO:
+ rmnet_sdio_enable(dev);
+ break;
+ case USB_RMNET_XPORT_SMD:
+ rmnet_smd_enable(dev);
+ break;
+ default:
+ /* we should never come here */
+ pr_err("%s: undefined transport\n", __func__);
+ }
+
+ return size;
+}
+static DEVICE_ATTR(transport, S_IRUGO | S_IWUSR, NULL, transport_store);
+
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ int id, ret;
+ struct usb_ep *ep;
+
+ dev->cdev = cdev;
+
+ /* allocate interface ID */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->ifc_id = id;
+ rmnet_interface_desc.bInterfaceNumber = id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+ if (!ep)
+ goto out;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epin = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+ if (!ep)
+ goto out;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epout = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+ if (!ep)
+ goto out;
+ ep->driver_data = cdev; /* claim endpoint */
+ dev->epnotify = ep;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rmnet_hs_in_desc.bEndpointAddress =
+ rmnet_fs_in_desc.bEndpointAddress;
+ rmnet_hs_out_desc.bEndpointAddress =
+ rmnet_fs_out_desc.bEndpointAddress;
+ rmnet_hs_notify_desc.bEndpointAddress =
+ rmnet_fs_notify_desc.bEndpointAddress;
+ }
+
+ ret = device_create_file(f->dev, &dev_attr_transport);
+ if (ret)
+ goto out;
+
+ queue_delayed_work(dev->wq, &sdio_dev->open_work, 0);
+
+ return 0;
+
+out:
+ if (dev->epnotify)
+ dev->epnotify->driver_data = NULL;
+ if (dev->epout)
+ dev->epout->driver_data = NULL;
+ if (dev->epin)
+ dev->epin->driver_data = NULL;
+
+ return -ENODEV;
+}
+
+static void rmnet_smd_init(struct rmnet_smd_dev *smd_dev)
+{
+ struct rmnet_dev *dev = container_of(smd_dev,
+ struct rmnet_dev, smd_dev);
+
+ atomic_set(&smd_dev->smd_data.rx_pkt, 0);
+ tasklet_init(&smd_dev->smd_data.rx_tlet, rmnet_smd_data_rx_tlet,
+ (unsigned long) dev);
+ tasklet_init(&smd_dev->smd_data.tx_tlet, rmnet_smd_data_tx_tlet,
+ (unsigned long) dev);
+
+ init_waitqueue_head(&smd_dev->smd_data.wait);
+
+ INIT_LIST_HEAD(&smd_dev->rx_idle);
+ INIT_LIST_HEAD(&smd_dev->rx_queue);
+ INIT_LIST_HEAD(&smd_dev->tx_idle);
+}
+
+static void rmnet_sdio_init(struct rmnet_sdio_dev *sdio_dev)
+{
+ INIT_WORK(&sdio_dev->data_rx_work, rmnet_sdio_data_rx_work);
+
+ INIT_DELAYED_WORK(&sdio_dev->open_work, rmnet_open_sdio_work);
+
+ INIT_LIST_HEAD(&sdio_dev->rx_idle);
+ INIT_LIST_HEAD(&sdio_dev->tx_idle);
+ skb_queue_head_init(&sdio_dev->tx_skb_queue);
+ skb_queue_head_init(&sdio_dev->rx_skb_queue);
+}
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+ struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+
+ smd_close(smd_dev->smd_data.ch);
+ smd_dev->smd_data.flags = 0;
+
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_dev *dev = file->private_data;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ char *debug_buf;
+ unsigned long flags;
+ int ret;
+
+ debug_buf = kmalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!debug_buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = scnprintf(debug_buf, DEBUG_BUF_SIZE,
+ "dpkts_tomsm: %lu\n"
+ "dpkts_tomdm: %lu\n"
+ "cpkts_tomdm: %lu\n"
+ "dpkts_tolaptop: %lu\n"
+ "cpkts_tolaptop: %lu\n"
+ "cbits_to_modem: %lu\n"
+ "tx skb size: %u\n"
+ "rx_skb_size: %u\n"
+ "dpkts_pending_at_dmux: %u\n"
+ "tx drp cnt: %lu\n"
+ "cpkts_drp_cnt: %lu\n"
+ "cpkt_tx_qlen: %lu\n"
+ "cpkt_rx_qlen_to_modem: %lu\n"
+ "xport: %s\n"
+ "ctr_ch_opened: %d\n",
+ dev->dpkts_tomsm, dev->dpkts_tomdm,
+ dev->cpkts_tomdm, dev->dpkts_tolaptop,
+ dev->cpkts_tolaptop, ctrl_dev->cbits_to_modem,
+ sdio_dev->tx_skb_queue.qlen,
+ sdio_dev->rx_skb_queue.qlen,
+ sdio_dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
+ dev->cpkts_drp_cnt,
+ ctrl_dev->tx_len, ctrl_dev->rx_len,
+ xport_to_str(dev->xport), ctrl_dev->opened);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, debug_buf, ret);
+
+ kfree(debug_buf);
+
+ return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_dev *dev = file->private_data;
+ struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+
+ dev->dpkts_tolaptop = 0;
+ dev->cpkts_tolaptop = 0;
+ dev->cpkts_tomdm = 0;
+ dev->dpkts_tomdm = 0;
+ dev->dpkts_tomsm = 0;
+ sdio_dev->dpkts_pending_atdmux = 0;
+ dev->tx_drp_cnt = 0;
+ dev->cpkts_drp_cnt = 0;
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+const struct file_operations rmnet_svlte_debug_stats_ops = {
+ .open = debug_open,
+ .read = debug_read_stats,
+ .write = debug_reset_stats,
+};
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("usb_rmnet", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("status", 0444, dent, dev,
+ &rmnet_svlte_debug_stats_ops);
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev) {}
+#endif
+
+int usb_rmnet_ctrl_open(struct inode *inode, struct file *fp)
+{
+ struct rmnet_dev *dev = _dev;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ctrl_dev->opened) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_err("%s: device is already opened\n", __func__);
+ return -EBUSY;
+ }
+
+ ctrl_dev->opened = 1;
+ fp->private_data = dev;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+
+int usb_rmnet_ctrl_release(struct inode *inode, struct file *fp)
+{
+ struct rmnet_dev *dev = fp->private_data;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ctrl_dev->opened = 0;
+ fp->private_data = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+ssize_t usb_rmnet_ctrl_read(struct file *fp,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct rmnet_dev *dev = fp->private_data;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+ int ret = 0;
+
+ctrl_read:
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(&ctrl_dev->tx_q)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ /* Implement sleep and wakeup here */
+ ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
+ !list_empty(&ctrl_dev->tx_q) ||
+ !atomic_read(&dev->online));
+ if (ret < 0)
+ return ret;
+
+ goto ctrl_read;
+ }
+
+ cpkt = list_first_entry(&ctrl_dev->tx_q, struct rmnet_ctrl_pkt, list);
+ if (cpkt->len > count) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_err("%s: cpkt size:%d > buf size:%d\n",
+ __func__, cpkt->len, count);
+ return -ENOMEM;
+ }
+ list_del(&cpkt->list);
+ ctrl_dev->tx_len--;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ count = cpkt->len;
+
+ ret = copy_to_user(buf, cpkt->buf, count);
+ dev->cpkts_tomdm++;
+
+ rmnet_free_ctrl_pkt(cpkt);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+ssize_t usb_rmnet_ctrl_write(struct file *fp,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct rmnet_dev *dev = fp->private_data;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!count) {
+ pr_err("%s: zero length ctrl pkt\n", __func__);
+ return -ENODEV;
+ }
+
+ if (count > MAX_CTRL_PKT_SIZE) {
+ pr_err("%s: max_pkt_size:%d given_pkt_size:%d\n",
+ __func__, MAX_CTRL_PKT_SIZE, count);
+ return -ENOMEM;
+ }
+
+ cpkt = rmnet_alloc_ctrl_pkt(count, GFP_KERNEL);
+ if (!cpkt) {
+ pr_err("%s: cannot allocate rmnet ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cpkt->buf, buf, count);
+ if (ret) {
+ pr_err("%s: copy_from_user failed err:%d\n",
+ __func__, ret);
+ rmnet_free_ctrl_pkt(cpkt);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ctrl_dev->rx_len++;
+ list_add(&cpkt->list, &ctrl_dev->rx_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ctrl_response_available(dev);
+
+ return count;
+}
+
+
+#define RMNET_CTRL_GET_DTR _IOR(0xFE, 0, int)
+static long
+usb_rmnet_ctrl_ioctl(struct file *fp, unsigned c, unsigned long value)
+{
+ struct rmnet_dev *dev = fp->private_data;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+ unsigned long *temp = (unsigned long *)value;
+ int ret = 0;
+
+ if (c != RMNET_CTRL_GET_DTR)
+ return -ENODEV;
+
+ ret = copy_to_user(temp,
+ &ctrl_dev->cbits_to_modem,
+ sizeof(*temp));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct file_operations rmnet_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = usb_rmnet_ctrl_open,
+ .release = usb_rmnet_ctrl_release,
+ .read = usb_rmnet_ctrl_read,
+ .write = usb_rmnet_ctrl_write,
+ .unlocked_ioctl = usb_rmnet_ctrl_ioctl,
+};
+
+static struct miscdevice rmnet_ctrl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "rmnet_ctrl",
+ .fops = &rmnet_ctrl_fops,
+};
+
+static int rmnet_ctrl_device_init(struct rmnet_dev *dev)
+{
+ int ret;
+ struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ INIT_LIST_HEAD(&ctrl_dev->tx_q);
+ INIT_LIST_HEAD(&ctrl_dev->rx_q);
+ init_waitqueue_head(&ctrl_dev->tx_wait_q);
+
+ ret = misc_register(&rmnet_ctrl_dev);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmnet_function_add(struct usb_configuration *c)
+{
+ struct rmnet_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ _dev = dev;
+
+ dev->wq = create_singlethread_workqueue("k_rmnet_work");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto free_dev;
+ }
+
+ spin_lock_init(&dev->lock);
+ atomic_set(&dev->notify_count, 0);
+ atomic_set(&dev->online, 0);
+ INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+ rmnet_smd_init(&dev->smd_dev);
+ rmnet_sdio_init(&dev->sdio_dev);
+
+ ret = rmnet_ctrl_device_init(dev);
+ if (ret) {
+ pr_debug("%s: rmnet_ctrl_device_init failed, err:%d\n",
+ __func__, ret);
+ goto free_wq;
+ }
+
+ dev->function.name = "rmnet_smd_sdio";
+ dev->function.strings = rmnet_strings;
+ dev->function.descriptors = rmnet_fs_function;
+ dev->function.hs_descriptors = rmnet_hs_function;
+ dev->function.bind = rmnet_bind;
+ dev->function.unbind = rmnet_unbind;
+ dev->function.setup = rmnet_setup;
+ dev->function.set_alt = rmnet_set_alt;
+ dev->function.disable = rmnet_disable;
+ dev->function.suspend = rmnet_suspend;
+
+ ret = usb_add_function(c, &dev->function);
+ if (ret)
+ goto free_wq;
+
+ usb_debugfs_init(dev);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(dev->wq);
+free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET_SMD_SDIO
+static struct android_usb_function rmnet_function = {
+ .name = "rmnet_smd_sdio",
+ .bind_config = rmnet_function_add,
+};
+
+static int __init rmnet_init(void)
+{
+ android_register_function(&rmnet_function);
+ return 0;
+}
+module_init(rmnet_init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET_SDIO */
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 490b00b..0c31544 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/usb/android_composite.h>
+#include <mach/usb_gadget_fserial.h>
#include "u_serial.h"
#include "gadget_chips.h"
@@ -30,6 +32,9 @@
struct gser_descs {
struct usb_endpoint_descriptor *in;
struct usb_endpoint_descriptor *out;
+#ifdef CONFIG_MODEM_SUPPORT
+ struct usb_endpoint_descriptor *notify;
+#endif
};
struct f_gser {
@@ -39,29 +44,129 @@
struct gser_descs fs;
struct gser_descs hs;
+ u8 online;
+ enum transport_type transport;
+
+#ifdef CONFIG_MODEM_SUPPORT
+ u8 pending;
+ spinlock_t lock;
+ struct usb_ep *notify;
+ struct usb_endpoint_descriptor *notify_desc;
+ struct usb_request *notify_req;
+
+ struct usb_cdc_line_coding port_line_coding;
+
+ /* SetControlLineState request */
+ u16 port_handshake_bits;
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+
+ /* SerialState notification */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
+#endif
};
+#ifdef CONFIG_USB_F_SERIAL
+static unsigned int no_tty_ports;
+static unsigned int no_sdio_ports;
+static unsigned int no_smd_ports;
+static unsigned int nr_ports;
+#endif
+
+static struct port_info {
+ enum transport_type transport;
+ unsigned port_num;
+ unsigned client_port_num;
+} gserial_ports[GSERIAL_NO_PORTS];
+
+static inline bool is_transport_sdio(enum transport_type t)
+{
+ if (t == USB_GADGET_FSERIAL_TRANSPORT_SDIO)
+ return 1;
+ return 0;
+}
+
static inline struct f_gser *func_to_gser(struct usb_function *f)
{
return container_of(f, struct f_gser, port.func);
}
+#ifdef CONFIG_MODEM_SUPPORT
+static inline struct f_gser *port_to_gser(struct gserial *p)
+{
+ return container_of(p, struct f_gser, port);
+}
+#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
+#endif
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
-static struct usb_interface_descriptor gser_interface_desc __initdata = {
+static struct usb_interface_descriptor gser_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
+#ifdef CONFIG_MODEM_SUPPORT
+ .bNumEndpoints = 3,
+#else
.bNumEndpoints = 2,
+#endif
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_cdc_header_desc gser_header_desc = {
+ .bLength = sizeof(gser_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+ .bcdCDC = __constant_cpu_to_le16(0x0110),
+};
+static struct usb_cdc_call_mgmt_descriptor
+gser_call_mgmt_descriptor = {
+ .bLength = sizeof(gser_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+ .bmCapabilities = 0,
+ /* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor gser_descriptor = {
+ .bLength = sizeof(gser_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+ .bmCapabilities = USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc gser_union_desc = {
+ .bLength = sizeof(gser_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+#endif
/* full speed support: */
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+#endif
static struct usb_endpoint_descriptor gser_fs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
@@ -79,29 +184,53 @@
static struct usb_descriptor_header *gser_fs_function[] __initdata = {
(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_fs_notify_desc,
+#endif
(struct usb_descriptor_header *) &gser_fs_in_desc,
(struct usb_descriptor_header *) &gser_fs_out_desc,
NULL,
};
/* high speed support: */
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+#endif
static struct usb_endpoint_descriptor gser_hs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor gser_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor gser_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
};
static struct usb_descriptor_header *gser_hs_function[] __initdata = {
(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_hs_notify_desc,
+#endif
(struct usb_descriptor_header *) &gser_hs_in_desc,
(struct usb_descriptor_header *) &gser_hs_out_desc,
NULL,
@@ -124,27 +253,232 @@
NULL,
};
+static char *transport_to_str(enum transport_type t)
+{
+ switch (t) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ return "TTY";
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ return "SDIO";
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ return "SMD";
+ }
+
+ return "NONE";
+}
+
+#ifdef CONFIG_USB_F_SERIAL
+static int gport_setup(struct usb_configuration *c)
+{
+ int ret = 0;
+
+ pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n",
+ __func__, no_tty_ports, no_sdio_ports, nr_ports);
+
+ if (no_tty_ports)
+ ret = gserial_setup(c->cdev->gadget, no_tty_ports);
+ if (no_sdio_ports)
+ ret = gsdio_setup(c->cdev->gadget, no_sdio_ports);
+ if (no_smd_ports)
+ ret = gsmd_setup(c->cdev->gadget, no_smd_ports);
+
+ return ret;
+}
+#endif
+static int gport_connect(struct f_gser *gser)
+{
+ unsigned port_num;
+
+ pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n",
+ __func__, transport_to_str(gser->transport),
+ gser, &gser->port, gser->port_num);
+
+ port_num = gserial_ports[gser->port_num].client_port_num;
+
+ switch (gser->transport) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ gserial_connect(&gser->port, port_num);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ gsdio_connect(&gser->port, port_num);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ gsmd_connect(&gser->port, port_num);
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ transport_to_str(gser->transport));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int gport_disconnect(struct f_gser *gser)
+{
+ unsigned port_num;
+
+ pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n",
+ __func__, transport_to_str(gser->transport),
+ gser, &gser->port, gser->port_num);
+
+ port_num = gserial_ports[gser->port_num].client_port_num;
+
+ switch (gser->transport) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ gserial_disconnect(&gser->port);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ gsdio_disconnect(&gser->port, port_num);
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ gsmd_disconnect(&gser->port, port_num);
+ break;
+ default:
+ pr_err("%s: Un-supported transport:%s\n", __func__,
+ transport_to_str(gser->transport));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MODEM_SUPPORT
+static void gser_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gser *gser = ep->driver_data;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ if (req->status != 0) {
+ DBG(cdev, "gser ttyGS%d completion, err %d\n",
+ gser->port_num, req->status);
+ return;
+ }
+
+ /* normal completion */
+ if (req->actual != sizeof(gser->port_line_coding)) {
+ DBG(cdev, "gser ttyGS%d short resp, len %d\n",
+ gser->port_num, req->actual);
+ usb_ep_set_halt(ep);
+ } else {
+ struct usb_cdc_line_coding *value = req->buf;
+ gser->port_line_coding = *value;
+ }
+}
/*-------------------------------------------------------------------------*/
+static int
+gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gser *gser = func_to_gser(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* SET_LINE_CODING ... just read and save what the host sends */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_LINE_CODING:
+ if (w_length != sizeof(struct usb_cdc_line_coding))
+ goto invalid;
+
+ value = w_length;
+ cdev->gadget->ep0->driver_data = gser;
+ req->complete = gser_complete_set_line_coding;
+ break;
+
+ /* GET_LINE_CODING ... return what host sent, or initial value */
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_GET_LINE_CODING:
+ value = min_t(unsigned, w_length,
+ sizeof(struct usb_cdc_line_coding));
+ memcpy(req->buf, &gser->port_line_coding, value);
+ break;
+
+ /* SET_CONTROL_LINE_STATE ... save what the host sent */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+ value = 0;
+ gser->port_handshake_bits = w_value;
+ if (gser->port.notify_modem) {
+ unsigned port_num =
+ gserial_ports[gser->port_num].client_port_num;
+
+ gser->port.notify_modem(&gser->port,
+ port_num, w_value);
+ }
+ break;
+
+ default:
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+ gser->port_num, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "gser response on ttyGS%d, err %d\n",
+ gser->port_num, value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+#endif
static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
+ int rc = 0;
/* we know alt == 0, so this is an activation or a reset */
- if (gser->port.in->driver_data) {
- DBG(cdev, "reset generic ttyGS%d\n", gser->port_num);
- gserial_disconnect(&gser->port);
- } else {
- DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- gser->port.in_desc = ep_choose(cdev->gadget,
- gser->hs.in, gser->fs.in);
- gser->port.out_desc = ep_choose(cdev->gadget,
- gser->hs.out, gser->fs.out);
+#ifdef CONFIG_MODEM_SUPPORT
+ if (gser->notify->driver_data) {
+ DBG(cdev, "reset generic ctl ttyGS%d\n", gser->port_num);
+ usb_ep_disable(gser->notify);
}
- gserial_connect(&gser->port, gser->port_num);
- return 0;
+ gser->notify_desc = ep_choose(cdev->gadget,
+ gser->hs.notify,
+ gser->fs.notify);
+ rc = usb_ep_enable(gser->notify, gser->notify_desc);
+ if (rc) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ gser->notify->name, rc);
+ return rc;
+ }
+ gser->notify->driver_data = gser;
+#endif
+
+ if (gser->port.in->driver_data) {
+ DBG(cdev, "reset generic data ttyGS%d\n", gser->port_num);
+ gport_disconnect(gser);
+ } else {
+ DBG(cdev, "activate generic data ttyGS%d\n", gser->port_num);
+ }
+ gser->port.in_desc = ep_choose(cdev->gadget,
+ gser->hs.in, gser->fs.in);
+ gser->port.out_desc = ep_choose(cdev->gadget,
+ gser->hs.out, gser->fs.out);
+
+ gport_connect(gser);
+
+ gser->online = 1;
+ return rc;
}
static void gser_disable(struct usb_function *f)
@@ -153,9 +487,180 @@
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "generic ttyGS%d deactivated\n", gser->port_num);
- gserial_disconnect(&gser->port);
+
+ gport_disconnect(gser);
+
+#ifdef CONFIG_MODEM_SUPPORT
+ usb_ep_fifo_flush(gser->notify);
+ usb_ep_disable(gser->notify);
+#endif
+ gser->online = 0;
+}
+#ifdef CONFIG_MODEM_SUPPORT
+static int gser_notify(struct f_gser *gser, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = gser->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ req = gser->notify_req;
+ gser->notify_req = NULL;
+ gser->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(gser->data_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
+ gser->port_num, status);
+ gser->notify_req = req;
+ }
+
+ return status;
}
+static int gser_notify_serial_state(struct f_gser *gser)
+{
+ int status;
+ unsigned long flags;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (gser->notify_req) {
+ DBG(cdev, "gser ttyGS%d serial state %04x\n",
+ gser->port_num, gser->serial_state);
+ status = gser_notify(gser, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &gser->serial_state,
+ sizeof(gser->serial_state));
+ } else {
+ gser->pending = true;
+ status = 0;
+ }
+ spin_unlock_irqrestore(&gser->lock, flags);
+ return status;
+}
+
+static void gser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gser *gser = req->context;
+ u8 doit = false;
+ unsigned long flags;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+ spin_lock_irqsave(&gser->lock, flags);
+ if (req->status != -ESHUTDOWN)
+ doit = gser->pending;
+ gser->notify_req = req;
+ spin_unlock_irqrestore(&gser->lock, flags);
+
+ if (doit && gser->online)
+ gser_notify_serial_state(gser);
+}
+static void gser_connect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ gser_notify_serial_state(gser);
+}
+
+unsigned int gser_get_dtr(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_DTR)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gser_get_rts(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_RTS)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gser_send_carrier_detect(struct gserial *port, unsigned int yes)
+{
+ struct f_gser *gser = port_to_gser(port);
+ u16 state;
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_DCD;
+ if (yes)
+ state |= ACM_CTRL_DCD;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+
+}
+
+unsigned int gser_send_ring_indicator(struct gserial *port, unsigned int yes)
+{
+ struct f_gser *gser = port_to_gser(port);
+ u16 state;
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_RI;
+ if (yes)
+ state |= ACM_CTRL_RI;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+
+}
+static void gser_disconnect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ gser_notify_serial_state(gser);
+}
+
+static int gser_send_break(struct gserial *port, int duration)
+{
+ struct f_gser *gser = port_to_gser(port);
+ u16 state;
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static int gser_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state = ctrl_bits;
+
+ return gser_notify_serial_state(gser);
+}
+#endif
/*-------------------------------------------------------------------------*/
/* serial function driver setup/binding */
@@ -190,6 +695,23 @@
gser->port.out = ep;
ep->driver_data = cdev; /* claim */
+#ifdef CONFIG_MODEM_SUPPORT
+ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ gser->notify = ep;
+ ep->driver_data = cdev; /* claim */
+ /* allocate notification */
+ gser->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!gser->notify_req)
+ goto fail;
+
+ gser->notify_req->complete = gser_notify_complete;
+ gser->notify_req->context = gser;
+#endif
+
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(gser_fs_function);
@@ -197,6 +719,10 @@
f->descriptors, &gser_fs_in_desc);
gser->fs.out = usb_find_endpoint(gser_fs_function,
f->descriptors, &gser_fs_out_desc);
+#ifdef CONFIG_MODEM_SUPPORT
+ gser->fs.notify = usb_find_endpoint(gser_fs_function,
+ f->descriptors, &gser_fs_notify_desc);
+#endif
/* support all relevant hardware speeds... we expect that when
@@ -208,6 +734,10 @@
gser_fs_in_desc.bEndpointAddress;
gser_hs_out_desc.bEndpointAddress =
gser_fs_out_desc.bEndpointAddress;
+#ifdef CONFIG_MODEM_SUPPORT
+ gser_hs_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+#endif
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(gser_hs_function);
@@ -216,6 +746,10 @@
f->hs_descriptors, &gser_hs_in_desc);
gser->hs.out = usb_find_endpoint(gser_hs_function,
f->hs_descriptors, &gser_hs_out_desc);
+#ifdef CONFIG_MODEM_SUPPORT
+ gser->hs.notify = usb_find_endpoint(gser_hs_function,
+ f->hs_descriptors, &gser_hs_notify_desc);
+#endif
}
DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
@@ -225,6 +759,14 @@
return 0;
fail:
+#ifdef CONFIG_MODEM_SUPPORT
+ if (gser->notify_req)
+ gs_free_req(gser->notify, gser->notify_req);
+
+ /* we might as well release our claims on endpoints */
+ if (gser->notify)
+ gser->notify->driver_data = NULL;
+#endif
/* we might as well release our claims on endpoints */
if (gser->port.out)
gser->port.out->driver_data = NULL;
@@ -239,9 +781,15 @@
static void
gser_unbind(struct usb_configuration *c, struct usb_function *f)
{
+#ifdef CONFIG_MODEM_SUPPORT
+ struct f_gser *gser = func_to_gser(f);
+#endif
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
+#ifdef CONFIG_MODEM_SUPPORT
+ gs_free_req(gser->notify, gser->notify_req);
+#endif
kfree(func_to_gser(f));
}
@@ -279,6 +827,9 @@
if (!gser)
return -ENOMEM;
+#ifdef CONFIG_MODEM_SUPPORT
+ spin_lock_init(&gser->lock);
+#endif
gser->port_num = port_num;
gser->port.func.name = "gser";
@@ -287,9 +838,130 @@
gser->port.func.unbind = gser_unbind;
gser->port.func.set_alt = gser_set_alt;
gser->port.func.disable = gser_disable;
+ gser->transport = gserial_ports[port_num].transport;
+#ifdef CONFIG_MODEM_SUPPORT
+ /* We support only two ports for now */
+ if (port_num == 0)
+ gser->port.func.name = "modem";
+ else
+ gser->port.func.name = "nmea";
+ gser->port.func.setup = gser_setup;
+ gser->port.connect = gser_connect;
+ gser->port.get_dtr = gser_get_dtr;
+ gser->port.get_rts = gser_get_rts;
+ gser->port.send_carrier_detect = gser_send_carrier_detect;
+ gser->port.send_ring_indicator = gser_send_ring_indicator;
+ gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
+ gser->port.disconnect = gser_disconnect;
+ gser->port.send_break = gser_send_break;
+#endif
status = usb_add_function(c, &gser->port.func);
if (status)
kfree(gser);
return status;
}
+
+#ifdef CONFIG_USB_F_SERIAL
+
+int fserial_nmea_bind_config(struct usb_configuration *c)
+{
+ return gser_bind_config(c, 1);
+}
+
+static struct android_usb_function nmea_function = {
+ .name = "nmea",
+ .bind_config = fserial_nmea_bind_config,
+};
+
+int fserial_modem_bind_config(struct usb_configuration *c)
+{
+ int ret;
+
+ /* See if composite driver can allocate
+ * serial ports. But for now allocate
+ * two ports for modem and nmea.
+ */
+ ret = gport_setup(c);
+
+ if (ret)
+ return ret;
+ return gser_bind_config(c, 0);
+}
+
+static struct android_usb_function modem_function = {
+ .name = "modem",
+ .bind_config = fserial_modem_bind_config,
+};
+
+static int fserial_remove(struct platform_device *dev)
+{
+ gserial_cleanup();
+
+ return 0;
+}
+
+static struct platform_driver usb_fserial = {
+ .remove = fserial_remove,
+ .driver = {
+ .name = "usb_fserial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init fserial_probe(struct platform_device *pdev)
+{
+ struct usb_gadget_fserial_platform_data *pdata =
+ pdev->dev.platform_data;
+ int i;
+
+ dev_dbg(&pdev->dev, "%s: probe\n", __func__);
+
+ if (!pdata)
+ goto probe_android_register;
+
+ for (i = 0; i < GSERIAL_NO_PORTS; i++) {
+ gserial_ports[i].transport = pdata->transport[i];
+ gserial_ports[i].port_num = i;
+
+ switch (gserial_ports[i].transport) {
+ case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+ gserial_ports[i].client_port_num = no_tty_ports;
+ no_tty_ports++;
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+ gserial_ports[i].client_port_num = no_sdio_ports;
+ no_sdio_ports++;
+ break;
+ case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+ gserial_ports[i].client_port_num = no_smd_ports;
+ no_smd_ports++;
+ break;
+ default:
+ pr_err("%s: Un-supported transport transport: %u\n",
+ __func__, gserial_ports[i].transport);
+ return -ENODEV;
+ }
+
+ nr_ports++;
+ }
+
+ pr_info("%s:gport:tty_ports:%u sdio_ports:%u "
+ "smd_ports:%u nr_ports:%u\n",
+ __func__, no_tty_ports, no_sdio_ports,
+ no_smd_ports, nr_ports);
+
+probe_android_register:
+ android_register_function(&modem_function);
+ android_register_function(&nmea_function);
+
+ return 0;
+}
+
+static int __init fserial_init(void)
+{
+ return platform_driver_probe(&usb_fserial, fserial_probe);
+}
+module_init(fserial_init);
+
+#endif /* CONFIG_USB_ANDROID_ACM */
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcdac7c..05692bb 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -120,6 +120,12 @@
#define gadget_is_ci13xxx_pci(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_MSM_72K
+#define gadget_is_msm72k(g) !strcmp("msm72k_udc", (g)->name)
+#else
+#define gadget_is_msm72k(g) 0
+#endif
+
// CONFIG_USB_GADGET_SX2
// CONFIG_USB_GADGET_AU1X00
// ...
@@ -223,6 +229,8 @@
return 0x29;
else if (gadget_is_s3c_hsudc(gadget))
return 0x30;
+ else if (gadget_is_msm72k(gadget))
+ return 0x31;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
new file mode 100644
index 0000000..24ba619
--- /dev/null
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -0,0 +1,2653 @@
+/*
+ * Driver for HighSpeed USB Client Controller in MSM7K
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/switch.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/msm72k_otg.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/board.h>
+#include <mach/msm_hsusb.h>
+#include <linux/device.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/clk.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+
+static const char driver_name[] = "msm72k_udc";
+
+/* #define DEBUG */
+/* #define VERBOSE */
+
+#define MSM_USB_BASE ((unsigned) ui->addr)
+
+#define DRIVER_DESC "MSM 72K USB Peripheral Controller"
+#define DRIVER_NAME "MSM72K_UDC"
+
+#define EPT_FLAG_IN 0x0001
+
+#define SETUP_BUF_SIZE 8
+
+
+static const char *const ep_name[] = {
+ "ep0out", "ep1out", "ep2out", "ep3out",
+ "ep4out", "ep5out", "ep6out", "ep7out",
+ "ep8out", "ep9out", "ep10out", "ep11out",
+ "ep12out", "ep13out", "ep14out", "ep15out",
+ "ep0in", "ep1in", "ep2in", "ep3in",
+ "ep4in", "ep5in", "ep6in", "ep7in",
+ "ep8in", "ep9in", "ep10in", "ep11in",
+ "ep12in", "ep13in", "ep14in", "ep15in"
+};
+
+/*To release the wakelock from debugfs*/
+static int release_wlocks;
+
+struct msm_request {
+ struct usb_request req;
+
+ /* saved copy of req.complete */
+ void (*gadget_complete)(struct usb_ep *ep,
+ struct usb_request *req);
+
+
+ struct usb_info *ui;
+ struct msm_request *next;
+ struct msm_request *prev;
+
+ unsigned busy:1;
+ unsigned live:1;
+ unsigned alloced:1;
+
+ dma_addr_t dma;
+ dma_addr_t item_dma;
+
+ struct ept_queue_item *item;
+};
+
+#define to_msm_request(r) container_of(r, struct msm_request, req)
+#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep)
+#define to_msm_otg(xceiv) container_of(xceiv, struct msm_otg, otg)
+#define is_b_sess_vld() ((OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0)
+#define is_usb_online(ui) (ui->usb_state != USB_STATE_NOTATTACHED)
+
+struct msm_endpoint {
+ struct usb_ep ep;
+ struct usb_info *ui;
+ struct msm_request *req; /* head of pending requests */
+ struct msm_request *last;
+ unsigned flags;
+
+ /* bit number (0-31) in various status registers
+ ** as well as the index into the usb_info's array
+ ** of all endpoints
+ */
+ unsigned char bit;
+ unsigned char num;
+
+ unsigned wedged:1;
+ /* pointers to DMA transfer list area */
+ /* these are allocated from the usb_info dma space */
+ struct ept_queue_head *head;
+};
+
+/* PHY status check timer to monitor phy stuck up on reset */
+static struct timer_list phy_status_timer;
+
+static void usb_do_work(struct work_struct *w);
+static void usb_do_remote_wakeup(struct work_struct *w);
+
+
+#define USB_STATE_IDLE 0
+#define USB_STATE_ONLINE 1
+#define USB_STATE_OFFLINE 2
+
+#define USB_FLAG_START 0x0001
+#define USB_FLAG_VBUS_ONLINE 0x0002
+#define USB_FLAG_VBUS_OFFLINE 0x0004
+#define USB_FLAG_RESET 0x0008
+#define USB_FLAG_SUSPEND 0x0010
+#define USB_FLAG_CONFIGURED 0x0020
+
+#define USB_CHG_DET_DELAY msecs_to_jiffies(1000)
+#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(1000)
+#define PHY_STATUS_CHECK_DELAY (jiffies + msecs_to_jiffies(1000))
+
+struct usb_info {
+ /* lock for register/queue/device state changes */
+ spinlock_t lock;
+
+ /* single request used for handling setup transactions */
+ struct usb_request *setup_req;
+
+ struct platform_device *pdev;
+ int irq;
+ void *addr;
+
+ unsigned state;
+ unsigned flags;
+
+ atomic_t configured;
+ atomic_t running;
+
+ struct dma_pool *pool;
+
+ /* dma page to back the queue heads and items */
+ unsigned char *buf;
+ dma_addr_t dma;
+
+ struct ept_queue_head *head;
+
+ /* used for allocation */
+ unsigned next_item;
+ unsigned next_ifc_num;
+
+ /* endpoints are ordered based on their status bits,
+ ** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15
+ */
+ struct msm_endpoint ept[32];
+
+
+ /* max power requested by selected configuration */
+ unsigned b_max_pow;
+ unsigned chg_current;
+ struct delayed_work chg_det;
+ struct delayed_work chg_stop;
+ struct msm_hsusb_gadget_platform_data *pdata;
+ struct work_struct phy_status_check;
+
+ struct work_struct work;
+ unsigned phy_status;
+ unsigned phy_fail_count;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct switch_dev sdev;
+
+#define ep0out ept[0]
+#define ep0in ept[16]
+
+ atomic_t ep0_dir;
+ atomic_t test_mode;
+ atomic_t offline_pending;
+ atomic_t softconnect;
+#ifdef CONFIG_USB_OTG
+ u8 hnp_avail;
+#endif
+
+ atomic_t remote_wakeup;
+ atomic_t self_powered;
+ struct delayed_work rw_work;
+
+ struct otg_transceiver *xceiv;
+ enum usb_device_state usb_state;
+ struct wake_lock wlock;
+};
+
+static const struct usb_ep_ops msm72k_ep_ops;
+static struct usb_info *the_usb_info;
+
+static int msm72k_wakeup(struct usb_gadget *_gadget);
+static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active);
+static int msm72k_set_halt(struct usb_ep *_ep, int value);
+static void flush_endpoint(struct msm_endpoint *ept);
+static void usb_reset(struct usb_info *ui);
+static int usb_ept_set_halt(struct usb_ep *_ep, int value);
+
+static void msm_hsusb_set_speed(struct usb_info *ui)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) {
+ case PORTSC_PSPD_FS:
+ dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_FULL\n");
+ ui->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSC_PSPD_LS:
+ dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_LOW\n");
+ ui->gadget.speed = USB_SPEED_LOW;
+ break;
+ case PORTSC_PSPD_HS:
+ dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_HIGH\n");
+ ui->gadget.speed = USB_SPEED_HIGH;
+ break;
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void msm_hsusb_set_state(enum usb_device_state state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&the_usb_info->lock, flags);
+ the_usb_info->usb_state = state;
+ spin_unlock_irqrestore(&the_usb_info->lock, flags);
+}
+
+static enum usb_device_state msm_hsusb_get_state(void)
+{
+ unsigned long flags;
+ enum usb_device_state state;
+
+ spin_lock_irqsave(&the_usb_info->lock, flags);
+ state = the_usb_info->usb_state;
+ spin_unlock_irqrestore(&the_usb_info->lock, flags);
+
+ return state;
+}
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+ return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+ return sprintf(buf, "%s\n", sdev->state ? "online" : "offline");
+}
+
+static inline enum chg_type usb_get_chg_type(struct usb_info *ui)
+{
+ if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS)
+ return USB_CHG_TYPE__WALLCHARGER;
+ else
+ return USB_CHG_TYPE__SDP;
+}
+
+#define USB_WALLCHARGER_CHG_CURRENT 1800
+static int usb_get_max_power(struct usb_info *ui)
+{
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+ unsigned long flags;
+ enum chg_type temp;
+ int suspended;
+ int configured;
+ unsigned bmaxpow;
+
+ if (ui->gadget.is_a_peripheral)
+ return -EINVAL;
+
+ temp = atomic_read(&otg->chg_type);
+ spin_lock_irqsave(&ui->lock, flags);
+ suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0;
+ configured = atomic_read(&ui->configured);
+ bmaxpow = ui->b_max_pow;
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ if (temp == USB_CHG_TYPE__INVALID)
+ return -ENODEV;
+
+ if (temp == USB_CHG_TYPE__WALLCHARGER)
+ return USB_WALLCHARGER_CHG_CURRENT;
+
+ if (suspended || !configured)
+ return 0;
+
+ return bmaxpow;
+}
+
+static int usb_phy_stuck_check(struct usb_info *ui)
+{
+ /*
+ * write some value (0xAA) into scratch reg (0x16) and read it back,
+ * If the read value is same as written value, means PHY is normal
+ * otherwise, PHY seems to have stuck.
+ */
+
+ if (otg_io_write(ui->xceiv, 0xAA, 0x16) == -1) {
+ dev_dbg(&ui->pdev->dev,
+ "%s(): ulpi write timeout\n", __func__);
+ return -EIO;
+ }
+
+ if (otg_io_read(ui->xceiv, 0x16) != 0xAA) {
+ dev_dbg(&ui->pdev->dev,
+ "%s(): read value is incorrect\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * This function checks the phy status by reading/writing to the
+ * phy scratch register. If the phy is stuck resets the HW
+ * */
+static void usb_phy_stuck_recover(struct work_struct *w)
+{
+ struct usb_info *ui = the_usb_info;
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ if (ui->gadget.speed != USB_SPEED_UNKNOWN ||
+ ui->usb_state == USB_STATE_NOTATTACHED ||
+ ui->driver == NULL) {
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ disable_irq(otg->irq);
+ if (usb_phy_stuck_check(ui)) {
+#ifdef CONFIG_USB_MSM_ACA
+ del_timer_sync(&otg->id_timer);
+#endif
+ ui->phy_fail_count++;
+ dev_err(&ui->pdev->dev,
+ "%s():PHY stuck, resetting HW\n", __func__);
+ /*
+ * PHY seems to have stuck,
+ * reset the PHY and HW link to recover the PHY
+ */
+ usb_reset(ui);
+#ifdef CONFIG_USB_MSM_ACA
+ mod_timer(&otg->id_timer, jiffies +
+ msecs_to_jiffies(OTG_ID_POLL_MS));
+#endif
+ msm72k_pullup_internal(&ui->gadget, 1);
+ }
+ enable_irq(otg->irq);
+}
+
+static void usb_phy_status_check_timer(unsigned long data)
+{
+ struct usb_info *ui = the_usb_info;
+
+ schedule_work(&ui->phy_status_check);
+}
+
+static void usb_chg_stop(struct work_struct *w)
+{
+ struct usb_info *ui = container_of(w, struct usb_info, chg_stop.work);
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+ enum chg_type temp;
+
+ temp = atomic_read(&otg->chg_type);
+
+ if (temp == USB_CHG_TYPE__SDP)
+ otg_set_power(ui->xceiv, 0);
+}
+
+static void usb_chg_detect(struct work_struct *w)
+{
+ struct usb_info *ui = container_of(w, struct usb_info, chg_det.work);
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+ enum chg_type temp = USB_CHG_TYPE__INVALID;
+ unsigned long flags;
+ int maxpower;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ if (ui->usb_state == USB_STATE_NOTATTACHED) {
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return;
+ }
+
+ temp = usb_get_chg_type(ui);
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ atomic_set(&otg->chg_type, temp);
+ maxpower = usb_get_max_power(ui);
+ if (maxpower > 0)
+ otg_set_power(ui->xceiv, maxpower);
+
+ /* USB driver prevents idle and suspend power collapse(pc)
+ * while USB cable is connected. But when dedicated charger is
+ * connected, driver can vote for idle and suspend pc.
+ * OTG driver handles idle pc as part of above otg_set_power call
+ * when wallcharger is attached. To allow suspend pc, release the
+ * wakelock which will be re-acquired for any sub-sequent usb interrupts
+ * */
+ if (temp == USB_CHG_TYPE__WALLCHARGER) {
+ pm_runtime_put_sync(&ui->pdev->dev);
+ wake_unlock(&ui->wlock);
+ }
+}
+
+static int usb_ep_get_stall(struct msm_endpoint *ept)
+{
+ unsigned int n;
+ struct usb_info *ui = ept->ui;
+
+ n = readl(USB_ENDPTCTRL(ept->num));
+ if (ept->flags & EPT_FLAG_IN)
+ return (CTRL_TXS & n) ? 1 : 0;
+ else
+ return (CTRL_RXS & n) ? 1 : 0;
+}
+
+static void init_endpoints(struct usb_info *ui)
+{
+ unsigned n;
+
+ for (n = 0; n < 32; n++) {
+ struct msm_endpoint *ept = ui->ept + n;
+
+ ept->ui = ui;
+ ept->bit = n;
+ ept->num = n & 15;
+ ept->ep.name = ep_name[n];
+ ept->ep.ops = &msm72k_ep_ops;
+
+ if (ept->bit > 15) {
+ /* IN endpoint */
+ ept->head = ui->head + (ept->num << 1) + 1;
+ ept->flags = EPT_FLAG_IN;
+ } else {
+ /* OUT endpoint */
+ ept->head = ui->head + (ept->num << 1);
+ ept->flags = 0;
+ }
+
+ }
+}
+
+static void config_ept(struct msm_endpoint *ept)
+{
+ struct usb_info *ui = ept->ui;
+ unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT;
+
+ /* ep0 out needs interrupt-on-setup */
+ if (ept->bit == 0)
+ cfg |= CONFIG_IOS;
+
+ ept->head->config = cfg;
+ ept->head->next = TERMINATE;
+
+ if (ept->ep.maxpacket)
+ dev_dbg(&ui->pdev->dev,
+ "ept #%d %s max:%d head:%p bit:%d\n",
+ ept->num,
+ (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+ ept->ep.maxpacket, ept->head, ept->bit);
+}
+
+static void configure_endpoints(struct usb_info *ui)
+{
+ unsigned n;
+
+ for (n = 0; n < 32; n++)
+ config_ept(ui->ept + n);
+}
+
+struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept,
+ unsigned bufsize, gfp_t gfp_flags)
+{
+ struct usb_info *ui = ept->ui;
+ struct msm_request *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ goto fail1;
+
+ req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma);
+ if (!req->item)
+ goto fail2;
+
+ if (bufsize) {
+ req->req.buf = kmalloc(bufsize, gfp_flags);
+ if (!req->req.buf)
+ goto fail3;
+ req->alloced = 1;
+ }
+
+ return &req->req;
+
+fail3:
+ dma_pool_free(ui->pool, req->item, req->item_dma);
+fail2:
+ kfree(req);
+fail1:
+ return 0;
+}
+
+static void usb_ept_enable(struct msm_endpoint *ept, int yes,
+ unsigned char ep_type)
+{
+ struct usb_info *ui = ept->ui;
+ int in = ept->flags & EPT_FLAG_IN;
+ unsigned n;
+
+ n = readl(USB_ENDPTCTRL(ept->num));
+
+ if (in) {
+ if (yes) {
+ n = (n & (~CTRL_TXT_MASK)) |
+ (ep_type << CTRL_TXT_EP_TYPE_SHIFT);
+ n |= CTRL_TXE | CTRL_TXR;
+ } else
+ n &= (~CTRL_TXE);
+ } else {
+ if (yes) {
+ n = (n & (~CTRL_RXT_MASK)) |
+ (ep_type << CTRL_RXT_EP_TYPE_SHIFT);
+ n |= CTRL_RXE | CTRL_RXR;
+ } else
+ n &= ~(CTRL_RXE);
+ }
+ /* complete all the updates to ept->head before enabling endpoint*/
+ mb();
+ writel(n, USB_ENDPTCTRL(ept->num));
+
+ /* Ensure endpoint is enabled before returning */
+ mb();
+
+ dev_dbg(&ui->pdev->dev, "ept %d %s %s\n",
+ ept->num, in ? "in" : "out", yes ? "enabled" : "disabled");
+}
+
+static void usb_ept_start(struct msm_endpoint *ept)
+{
+ struct usb_info *ui = ept->ui;
+ struct msm_request *req = ept->req;
+ struct msm_request *f_req = ept->req;
+ unsigned n = 1 << ept->bit;
+ unsigned info;
+ int reprime_cnt = 0;
+
+ BUG_ON(req->live);
+
+ while (req) {
+ req->live = 1;
+ /* prepare the transaction descriptor item for the hardware */
+ req->item->info =
+ INFO_BYTES(req->req.length) | INFO_IOC | INFO_ACTIVE;
+ req->item->page0 = req->dma;
+ req->item->page1 = (req->dma + 0x1000) & 0xfffff000;
+ req->item->page2 = (req->dma + 0x2000) & 0xfffff000;
+ req->item->page3 = (req->dma + 0x3000) & 0xfffff000;
+
+ if (req->next == NULL) {
+ req->item->next = TERMINATE;
+ break;
+ }
+ req->item->next = req->next->item_dma;
+ req = req->next;
+ }
+
+ rmb();
+ /* link the hw queue head to the request's transaction item */
+ ept->head->next = ept->req->item_dma;
+ ept->head->info = 0;
+
+reprime_ept:
+ /* flush buffers before priming ept */
+ mb();
+ /* during high throughput testing it is observed that
+ * ept stat bit is not set even thoguh all the data
+ * structures are updated properly and ept prime bit
+ * is set. To workaround the issue, use dTD INFO bit
+ * to make decision on re-prime or not.
+ */
+ writel_relaxed(n, USB_ENDPTPRIME);
+ /* busy wait till endptprime gets clear */
+ while ((readl_relaxed(USB_ENDPTPRIME) & n))
+ ;
+ if (readl_relaxed(USB_ENDPTSTAT) & n)
+ return;
+
+ rmb();
+ info = f_req->item->info;
+ if (info & INFO_ACTIVE) {
+ if (reprime_cnt++ < 3)
+ goto reprime_ept;
+ else
+ pr_err("%s(): ept%d%s prime failed. ept: config: %x"
+ "active: %x next: %x info: %x\n"
+ " req@ %x next: %x info: %x\n",
+ __func__, ept->num,
+ ept->flags & EPT_FLAG_IN ? "in" : "out",
+ ept->head->config, ept->head->active,
+ ept->head->next, ept->head->info,
+ f_req->item_dma, f_req->item->next, info);
+ }
+}
+
+int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req)
+{
+ unsigned long flags;
+ struct msm_request *req = to_msm_request(_req);
+ struct msm_request *last;
+ struct usb_info *ui = ept->ui;
+ unsigned length = req->req.length;
+
+ if (length > 0x4000)
+ return -EMSGSIZE;
+
+ spin_lock_irqsave(&ui->lock, flags);
+
+ if (req->busy) {
+ req->req.status = -EBUSY;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ dev_err(&ui->pdev->dev,
+ "usb_ept_queue_xfer() tried to queue busy request\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&ui->configured) && (ept->num != 0)) {
+ req->req.status = -ESHUTDOWN;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ if (printk_ratelimit())
+ dev_err(&ui->pdev->dev,
+ "%s: called while offline\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ if (ui->usb_state == USB_STATE_SUSPENDED) {
+ if (!atomic_read(&ui->remote_wakeup)) {
+ req->req.status = -EAGAIN;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ if (printk_ratelimit())
+ dev_err(&ui->pdev->dev,
+ "%s: cannot queue as bus is suspended "
+ "ept #%d %s max:%d head:%p bit:%d\n",
+ __func__, ept->num,
+ (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+ ept->ep.maxpacket, ept->head, ept->bit);
+
+ return -EAGAIN;
+ }
+
+ wake_lock(&ui->wlock);
+ otg_set_suspend(ui->xceiv, 0);
+ schedule_delayed_work(&ui->rw_work, REMOTE_WAKEUP_DELAY);
+ }
+
+ req->busy = 1;
+ req->live = 0;
+ req->next = 0;
+ req->req.status = -EBUSY;
+
+ req->dma = dma_map_single(NULL, req->req.buf, length,
+ (ept->flags & EPT_FLAG_IN) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+
+ /* Add the new request to the end of the queue */
+ last = ept->last;
+ if (last) {
+ /* Already requests in the queue. add us to the
+ * end, but let the completion interrupt actually
+ * start things going, to avoid hw issues
+ */
+ last->next = req;
+ req->prev = last;
+
+ } else {
+ /* queue was empty -- kick the hardware */
+ ept->req = req;
+ req->prev = NULL;
+ usb_ept_start(ept);
+ }
+ ept->last = req;
+
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return 0;
+}
+
+/* --- endpoint 0 handling --- */
+
+static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct msm_request *r = to_msm_request(req);
+ struct msm_endpoint *ept = to_msm_endpoint(ep);
+ struct usb_info *ui = ept->ui;
+
+ req->complete = r->gadget_complete;
+ r->gadget_complete = 0;
+ if (req->complete)
+ req->complete(&ui->ep0in.ep, req);
+}
+
+static void ep0_status_complete(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct usb_request *req = _req->context;
+ struct msm_request *r;
+ struct msm_endpoint *ept;
+ struct usb_info *ui;
+
+ pr_debug("%s:\n", __func__);
+ if (!req)
+ return;
+
+ r = to_msm_request(req);
+ ept = to_msm_endpoint(ep);
+ ui = ept->ui;
+ _req->context = 0;
+
+ req->complete = r->gadget_complete;
+ req->zero = 0;
+ r->gadget_complete = 0;
+ if (req->complete)
+ req->complete(&ui->ep0in.ep, req);
+
+}
+
+static void ep0_status_phase(struct usb_ep *ep, struct usb_request *req)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(ep);
+ struct usb_info *ui = ept->ui;
+
+ pr_debug("%s:\n", __func__);
+
+ req->length = 0;
+ req->complete = ep0_status_complete;
+
+ /* status phase */
+ if (atomic_read(&ui->ep0_dir) == USB_DIR_IN)
+ usb_ept_queue_xfer(&ui->ep0out, req);
+ else
+ usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0in_send_zero_leng_pkt(struct msm_endpoint *ept)
+{
+ struct usb_info *ui = ept->ui;
+ struct usb_request *req = ui->setup_req;
+
+ pr_debug("%s:\n", __func__);
+
+ req->length = 0;
+ req->complete = ep0_status_phase;
+ usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_queue_ack_complete(struct usb_ep *ep,
+ struct usb_request *_req)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(ep);
+ struct usb_info *ui = ept->ui;
+ struct usb_request *req = ui->setup_req;
+
+ pr_debug("%s: _req:%p actual:%d length:%d zero:%d\n",
+ __func__, _req, _req->actual,
+ _req->length, _req->zero);
+
+ /* queue up the receive of the ACK response from the host */
+ if (_req->status == 0 && _req->actual == _req->length) {
+ req->context = _req;
+ if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) {
+ if (_req->zero && _req->length &&
+ !(_req->length % ep->maxpacket)) {
+ ep0in_send_zero_leng_pkt(&ui->ep0in);
+ return;
+ }
+ }
+ ep0_status_phase(ep, req);
+ } else
+ ep0_complete(ep, _req);
+}
+
+static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(ep);
+ struct usb_info *ui = ept->ui;
+ unsigned int temp;
+ int test_mode = atomic_read(&ui->test_mode);
+
+ if (!test_mode)
+ return;
+
+ switch (test_mode) {
+ case J_TEST:
+ dev_info(&ui->pdev->dev, "usb electrical test mode: (J)\n");
+ temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+ writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC);
+ break;
+
+ case K_TEST:
+ dev_info(&ui->pdev->dev, "usb electrical test mode: (K)\n");
+ temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+ writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC);
+ break;
+
+ case SE0_NAK_TEST:
+ dev_info(&ui->pdev->dev,
+ "usb electrical test mode: (SE0-NAK)\n");
+ temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+ writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC);
+ break;
+
+ case TST_PKT_TEST:
+ dev_info(&ui->pdev->dev,
+ "usb electrical test mode: (TEST_PKT)\n");
+ temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+ writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC);
+ break;
+ }
+}
+
+static void ep0_setup_ack(struct usb_info *ui)
+{
+ struct usb_request *req = ui->setup_req;
+ req->length = 0;
+ req->complete = ep0_setup_ack_complete;
+ usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_setup_stall(struct usb_info *ui)
+{
+ writel((1<<16) | (1<<0), USB_ENDPTCTRL(0));
+}
+
+static void ep0_setup_send(struct usb_info *ui, unsigned length)
+{
+ struct usb_request *req = ui->setup_req;
+ struct msm_request *r = to_msm_request(req);
+ struct msm_endpoint *ept = &ui->ep0in;
+
+ req->length = length;
+ req->complete = ep0_queue_ack_complete;
+ r->gadget_complete = 0;
+ usb_ept_queue_xfer(ept, req);
+}
+
+static void handle_setup(struct usb_info *ui)
+{
+ struct usb_ctrlrequest ctl;
+ struct usb_request *req = ui->setup_req;
+ int ret;
+#ifdef CONFIG_USB_OTG
+ u8 hnp;
+ unsigned long flags;
+#endif
+
+ memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl));
+ /* Ensure buffer is read before acknowledging to h/w */
+ mb();
+
+ writel(EPT_RX(0), USB_ENDPTSETUPSTAT);
+
+ if (ctl.bRequestType & USB_DIR_IN)
+ atomic_set(&ui->ep0_dir, USB_DIR_IN);
+ else
+ atomic_set(&ui->ep0_dir, USB_DIR_OUT);
+
+ /* any pending ep0 transactions must be canceled */
+ flush_endpoint(&ui->ep0out);
+ flush_endpoint(&ui->ep0in);
+
+ dev_dbg(&ui->pdev->dev,
+ "setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n",
+ ctl.bRequestType, ctl.bRequest, ctl.wValue,
+ ctl.wIndex, ctl.wLength);
+
+ if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) ==
+ (USB_DIR_IN | USB_TYPE_STANDARD)) {
+ if (ctl.bRequest == USB_REQ_GET_STATUS) {
+ /* OTG supplement Rev 2.0 introduces another device
+ * GET_STATUS request for HNP polling with length = 1.
+ */
+ u8 len = 2;
+ switch (ctl.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_ENDPOINT:
+ {
+ struct msm_endpoint *ept;
+ unsigned num =
+ ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ u16 temp = 0;
+
+ if (num == 0) {
+ memset(req->buf, 0, 2);
+ break;
+ }
+ if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+ num += 16;
+ ept = &ui->ep0out + num;
+ temp = usb_ep_get_stall(ept);
+ temp = temp << USB_ENDPOINT_HALT;
+ memcpy(req->buf, &temp, 2);
+ break;
+ }
+ case USB_RECIP_DEVICE:
+ {
+ u16 temp = 0;
+
+ if (ctl.wIndex == OTG_STATUS_SELECTOR) {
+#ifdef CONFIG_USB_OTG
+ spin_lock_irqsave(&ui->lock, flags);
+ hnp = (ui->gadget.host_request <<
+ HOST_REQUEST_FLAG);
+ ui->hnp_avail = 1;
+ spin_unlock_irqrestore(&ui->lock,
+ flags);
+ memcpy(req->buf, &hnp, 1);
+ len = 1;
+#else
+ goto stall;
+#endif
+ } else {
+ temp = (atomic_read(&ui->self_powered)
+ << USB_DEVICE_SELF_POWERED);
+ temp |= (atomic_read(&ui->remote_wakeup)
+ << USB_DEVICE_REMOTE_WAKEUP);
+ memcpy(req->buf, &temp, 2);
+ }
+ break;
+ }
+ case USB_RECIP_INTERFACE:
+ memset(req->buf, 0, 2);
+ break;
+ default:
+ goto stall;
+ }
+ ep0_setup_send(ui, len);
+ return;
+ }
+ }
+ if (ctl.bRequestType ==
+ (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) {
+ if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) ||
+ (ctl.bRequest == USB_REQ_SET_FEATURE)) {
+ if ((ctl.wValue == 0) && (ctl.wLength == 0)) {
+ unsigned num = ctl.wIndex & 0x0f;
+
+ if (num != 0) {
+ struct msm_endpoint *ept;
+
+ if (ctl.wIndex & 0x80)
+ num += 16;
+ ept = &ui->ep0out + num;
+
+ if (ept->wedged)
+ goto ack;
+ if (ctl.bRequest == USB_REQ_SET_FEATURE)
+ usb_ept_set_halt(&ept->ep, 1);
+ else
+ usb_ept_set_halt(&ept->ep, 0);
+ }
+ goto ack;
+ }
+ }
+ }
+ if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) {
+ if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) {
+ atomic_set(&ui->configured, !!ctl.wValue);
+ msm_hsusb_set_state(USB_STATE_CONFIGURED);
+ } else if (ctl.bRequest == USB_REQ_SET_ADDRESS) {
+ /*
+ * Gadget speed should be set when PCI interrupt
+ * occurs. But sometimes, PCI interrupt is not
+ * occuring after reset. Hence update the gadget
+ * speed here.
+ */
+ if (ui->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_info(&ui->pdev->dev,
+ "PCI intr missed"
+ "set speed explictly\n");
+ msm_hsusb_set_speed(ui);
+ }
+ msm_hsusb_set_state(USB_STATE_ADDRESS);
+
+ /* write address delayed (will take effect
+ ** after the next IN txn)
+ */
+ writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR);
+ goto ack;
+ } else if (ctl.bRequest == USB_REQ_SET_FEATURE) {
+ switch (ctl.wValue) {
+ case USB_DEVICE_TEST_MODE:
+ switch (ctl.wIndex) {
+ case J_TEST:
+ case K_TEST:
+ case SE0_NAK_TEST:
+ case TST_PKT_TEST:
+ atomic_set(&ui->test_mode, ctl.wIndex);
+ goto ack;
+ }
+ goto stall;
+ case USB_DEVICE_REMOTE_WAKEUP:
+ atomic_set(&ui->remote_wakeup, 1);
+ goto ack;
+#ifdef CONFIG_USB_OTG
+ case USB_DEVICE_B_HNP_ENABLE:
+ ui->gadget.b_hnp_enable = 1;
+ goto ack;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ /* B-devices compliant to OTG spec
+ * Rev 2.0 are not required to
+ * suppport these features.
+ */
+ goto stall;
+#endif
+ }
+ } else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) &&
+ (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) {
+ atomic_set(&ui->remote_wakeup, 0);
+ goto ack;
+ }
+ }
+
+ /* delegate if we get here */
+ if (ui->driver) {
+ ret = ui->driver->setup(&ui->gadget, &ctl);
+ if (ret >= 0)
+ return;
+ }
+
+stall:
+ /* stall ep0 on error */
+ ep0_setup_stall(ui);
+ return;
+
+ack:
+ ep0_setup_ack(ui);
+}
+
+static void handle_endpoint(struct usb_info *ui, unsigned bit)
+{
+ struct msm_endpoint *ept = ui->ept + bit;
+ struct msm_request *req;
+ unsigned long flags;
+ unsigned info;
+
+ /*
+ INFO("handle_endpoint() %d %s req=%p(%08x)\n",
+ ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+ ept->req, ept->req ? ept->req->item_dma : 0);
+ */
+
+ /* expire all requests that are no longer active */
+ spin_lock_irqsave(&ui->lock, flags);
+ while ((req = ept->req)) {
+ /* if we've processed all live requests, time to
+ * restart the hardware on the next non-live request
+ */
+ if (!req->live) {
+ usb_ept_start(ept);
+ break;
+ }
+
+ /* clean speculative fetches on req->item->info */
+ dma_coherent_post_ops();
+ info = req->item->info;
+ /* if the transaction is still in-flight, stop here */
+ if (info & INFO_ACTIVE)
+ break;
+
+ /* advance ept queue to the next request */
+ ept->req = req->next;
+ if (ept->req == 0)
+ ept->last = 0;
+
+ dma_unmap_single(NULL, req->dma, req->req.length,
+ (ept->flags & EPT_FLAG_IN) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
+ /* XXX pass on more specific error code */
+ req->req.status = -EIO;
+ req->req.actual = 0;
+ dev_err(&ui->pdev->dev,
+ "ept %d %s error. info=%08x\n",
+ ept->num,
+ (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+ info);
+ } else {
+ req->req.status = 0;
+ req->req.actual =
+ req->req.length - ((info >> 16) & 0x7FFF);
+ }
+ req->busy = 0;
+ req->live = 0;
+
+ if (req->req.complete) {
+ spin_unlock_irqrestore(&ui->lock, flags);
+ req->req.complete(&ept->ep, &req->req);
+ spin_lock_irqsave(&ui->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint_hw(struct usb_info *ui, unsigned bits)
+{
+ /* flush endpoint, canceling transactions
+ ** - this can take a "large amount of time" (per databook)
+ ** - the flush can fail in some cases, thus we check STAT
+ ** and repeat if we're still operating
+ ** (does the fact that this doesn't use the tripwire matter?!)
+ */
+ do {
+ writel(bits, USB_ENDPTFLUSH);
+ while (readl(USB_ENDPTFLUSH) & bits)
+ udelay(100);
+ } while (readl(USB_ENDPTSTAT) & bits);
+}
+
+static void flush_endpoint_sw(struct msm_endpoint *ept)
+{
+ struct usb_info *ui = ept->ui;
+ struct msm_request *req, *next_req = NULL;
+ unsigned long flags;
+
+ /* inactive endpoints have nothing to do here */
+ if (ept->ep.maxpacket == 0)
+ return;
+
+ /* put the queue head in a sane state */
+ ept->head->info = 0;
+ ept->head->next = TERMINATE;
+
+ /* cancel any pending requests */
+ spin_lock_irqsave(&ui->lock, flags);
+ req = ept->req;
+ ept->req = 0;
+ ept->last = 0;
+ while (req != 0) {
+ req->busy = 0;
+ req->live = 0;
+ req->req.status = -ESHUTDOWN;
+ req->req.actual = 0;
+
+ /* Gadget driver may free the request in completion
+ * handler. So keep a copy of next req pointer
+ * before calling completion handler.
+ */
+ next_req = req->next;
+ if (req->req.complete) {
+ spin_unlock_irqrestore(&ui->lock, flags);
+ req->req.complete(&ept->ep, &req->req);
+ spin_lock_irqsave(&ui->lock, flags);
+ }
+ req = next_req;
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint(struct msm_endpoint *ept)
+{
+ flush_endpoint_hw(ept->ui, (1 << ept->bit));
+ flush_endpoint_sw(ept);
+}
+
+static irqreturn_t usb_interrupt(int irq, void *data)
+{
+ struct usb_info *ui = data;
+ unsigned n;
+ unsigned long flags;
+
+ n = readl(USB_USBSTS);
+ writel(n, USB_USBSTS);
+
+ /* somehow we got an IRQ while in the reset sequence: ignore it */
+ if (!atomic_read(&ui->running))
+ return IRQ_HANDLED;
+
+ if (n & STS_PCI) {
+ msm_hsusb_set_speed(ui);
+ if (atomic_read(&ui->configured)) {
+ wake_lock(&ui->wlock);
+
+ spin_lock_irqsave(&ui->lock, flags);
+ ui->usb_state = USB_STATE_CONFIGURED;
+ ui->flags = USB_FLAG_CONFIGURED;
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ ui->driver->resume(&ui->gadget);
+ schedule_work(&ui->work);
+ } else {
+ msm_hsusb_set_state(USB_STATE_DEFAULT);
+ }
+
+#ifdef CONFIG_USB_OTG
+ /* notify otg to clear A_BIDL_ADIS timer */
+ if (ui->gadget.is_a_peripheral)
+ otg_set_suspend(ui->xceiv, 0);
+#endif
+ }
+
+ if (n & STS_URI) {
+ dev_dbg(&ui->pdev->dev, "reset\n");
+ spin_lock_irqsave(&ui->lock, flags);
+ ui->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock_irqrestore(&ui->lock, flags);
+#ifdef CONFIG_USB_OTG
+ /* notify otg to clear A_BIDL_ADIS timer */
+ if (ui->gadget.is_a_peripheral)
+ otg_set_suspend(ui->xceiv, 0);
+ spin_lock_irqsave(&ui->lock, flags);
+ /* Host request is persistent across reset */
+ ui->gadget.b_hnp_enable = 0;
+ ui->hnp_avail = 0;
+ spin_unlock_irqrestore(&ui->lock, flags);
+#endif
+ msm_hsusb_set_state(USB_STATE_DEFAULT);
+ atomic_set(&ui->remote_wakeup, 0);
+ if (!ui->gadget.is_a_peripheral)
+ schedule_delayed_work(&ui->chg_stop, 0);
+
+ writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT);
+ writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE);
+ writel(0xffffffff, USB_ENDPTFLUSH);
+ writel(0, USB_ENDPTCTRL(1));
+
+ wake_lock(&ui->wlock);
+ if (atomic_read(&ui->configured)) {
+ /* marking us offline will cause ept queue attempts
+ ** to fail
+ */
+ atomic_set(&ui->configured, 0);
+ /* Defer sending offline uevent to userspace */
+ atomic_set(&ui->offline_pending, 1);
+
+ /* XXX: we can't seem to detect going offline,
+ * XXX: so deconfigure on reset for the time being
+ */
+ if (ui->driver) {
+ dev_dbg(&ui->pdev->dev,
+ "usb: notify offline\n");
+ ui->driver->disconnect(&ui->gadget);
+ }
+ /* cancel pending ep0 transactions */
+ flush_endpoint(&ui->ep0out);
+ flush_endpoint(&ui->ep0in);
+
+ }
+ /* Start phy stuck timer */
+ if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+ mod_timer(&phy_status_timer, PHY_STATUS_CHECK_DELAY);
+ }
+
+ if (n & STS_SLI) {
+ dev_dbg(&ui->pdev->dev, "suspend\n");
+
+ spin_lock_irqsave(&ui->lock, flags);
+ ui->usb_state = USB_STATE_SUSPENDED;
+ ui->flags = USB_FLAG_SUSPEND;
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ ui->driver->suspend(&ui->gadget);
+ schedule_work(&ui->work);
+#ifdef CONFIG_USB_OTG
+ /* notify otg for
+ * 1. kicking A_BIDL_ADIS timer in case of A-peripheral
+ * 2. disabling pull-up and kicking B_ASE0_RST timer
+ */
+ if (ui->gadget.b_hnp_enable || ui->gadget.is_a_peripheral)
+ otg_set_suspend(ui->xceiv, 1);
+#endif
+ }
+
+ if (n & STS_UI) {
+ n = readl(USB_ENDPTSETUPSTAT);
+ if (n & EPT_RX(0))
+ handle_setup(ui);
+
+ n = readl(USB_ENDPTCOMPLETE);
+ writel(n, USB_ENDPTCOMPLETE);
+ while (n) {
+ unsigned bit = __ffs(n);
+ handle_endpoint(ui, bit);
+ n = n & (~(1 << bit));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void usb_prepare(struct usb_info *ui)
+{
+ spin_lock_init(&ui->lock);
+
+ memset(ui->buf, 0, 4096);
+ ui->head = (void *) (ui->buf + 0);
+
+ /* only important for reset/reinit */
+ memset(ui->ept, 0, sizeof(ui->ept));
+ ui->next_item = 0;
+ ui->next_ifc_num = 0;
+
+ init_endpoints(ui);
+
+ ui->ep0in.ep.maxpacket = 64;
+ ui->ep0out.ep.maxpacket = 64;
+
+ ui->setup_req =
+ usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL);
+
+ INIT_WORK(&ui->work, usb_do_work);
+ INIT_DELAYED_WORK(&ui->chg_det, usb_chg_detect);
+ INIT_DELAYED_WORK(&ui->chg_stop, usb_chg_stop);
+ INIT_DELAYED_WORK(&ui->rw_work, usb_do_remote_wakeup);
+ if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+ INIT_WORK(&ui->phy_status_check, usb_phy_stuck_recover);
+}
+
+static void usb_reset(struct usb_info *ui)
+{
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+ dev_dbg(&ui->pdev->dev, "reset controller\n");
+
+ atomic_set(&ui->running, 0);
+
+ /*
+ * PHY reset takes minimum 100 msec. Hence reset only link
+ * during HNP. Reset PHY and link in B-peripheral mode.
+ */
+ if (ui->gadget.is_a_peripheral)
+ otg->reset(ui->xceiv, 0);
+ else
+ otg->reset(ui->xceiv, 1);
+
+ /* set usb controller interrupt threshold to zero*/
+ writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0),
+ USB_USBCMD);
+
+ writel(ui->dma, USB_ENDPOINTLISTADDR);
+
+ configure_endpoints(ui);
+
+ /* marking us offline will cause ept queue attempts to fail */
+ atomic_set(&ui->configured, 0);
+
+ if (ui->driver) {
+ dev_dbg(&ui->pdev->dev, "usb: notify offline\n");
+ ui->driver->disconnect(&ui->gadget);
+ }
+
+ /* cancel pending ep0 transactions */
+ flush_endpoint(&ui->ep0out);
+ flush_endpoint(&ui->ep0in);
+
+ /* enable interrupts */
+ writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR);
+
+ /* Ensure that h/w RESET is completed before returning */
+ mb();
+
+ atomic_set(&ui->running, 1);
+}
+
+static void usb_start(struct usb_info *ui)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ ui->flags |= USB_FLAG_START;
+ schedule_work(&ui->work);
+ spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static int usb_free(struct usb_info *ui, int ret)
+{
+ dev_dbg(&ui->pdev->dev, "usb_free(%d)\n", ret);
+
+ if (ui->xceiv)
+ otg_put_transceiver(ui->xceiv);
+
+ if (ui->irq)
+ free_irq(ui->irq, 0);
+ if (ui->pool)
+ dma_pool_destroy(ui->pool);
+ if (ui->dma)
+ dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma);
+ kfree(ui);
+ return ret;
+}
+
+static void usb_do_work_check_vbus(struct usb_info *ui)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ui->lock, iflags);
+ if (is_usb_online(ui))
+ ui->flags |= USB_FLAG_VBUS_ONLINE;
+ else
+ ui->flags |= USB_FLAG_VBUS_OFFLINE;
+ spin_unlock_irqrestore(&ui->lock, iflags);
+}
+
+static void usb_do_work(struct work_struct *w)
+{
+ struct usb_info *ui = container_of(w, struct usb_info, work);
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+ unsigned long iflags;
+ unsigned flags, _vbus;
+
+ for (;;) {
+ spin_lock_irqsave(&ui->lock, iflags);
+ flags = ui->flags;
+ ui->flags = 0;
+ _vbus = is_usb_online(ui);
+ spin_unlock_irqrestore(&ui->lock, iflags);
+
+ /* give up if we have nothing to do */
+ if (flags == 0)
+ break;
+
+ switch (ui->state) {
+ case USB_STATE_IDLE:
+ if (flags & USB_FLAG_START) {
+ int ret;
+
+ if (!_vbus) {
+ ui->state = USB_STATE_OFFLINE;
+ break;
+ }
+
+ pm_runtime_get_noresume(&ui->pdev->dev);
+ pm_runtime_resume(&ui->pdev->dev);
+ dev_dbg(&ui->pdev->dev,
+ "msm72k_udc: IDLE -> ONLINE\n");
+ usb_reset(ui);
+ ret = request_irq(otg->irq, usb_interrupt,
+ IRQF_SHARED,
+ ui->pdev->name, ui);
+ /* FIXME: should we call BUG_ON when
+ * requst irq fails
+ */
+ if (ret) {
+ dev_err(&ui->pdev->dev,
+ "hsusb: peripheral: request irq"
+ " failed:(%d)", ret);
+ break;
+ }
+ ui->irq = otg->irq;
+ ui->state = USB_STATE_ONLINE;
+ usb_do_work_check_vbus(ui);
+
+ if (!atomic_read(&ui->softconnect))
+ break;
+
+ msm72k_pullup_internal(&ui->gadget, 1);
+
+ if (!ui->gadget.is_a_peripheral)
+ schedule_delayed_work(
+ &ui->chg_det,
+ USB_CHG_DET_DELAY);
+
+ }
+ break;
+ case USB_STATE_ONLINE:
+ if (atomic_read(&ui->offline_pending)) {
+ switch_set_state(&ui->sdev, 0);
+ atomic_set(&ui->offline_pending, 0);
+ }
+
+ /* If at any point when we were online, we received
+ * the signal to go offline, we must honor it
+ */
+ if (flags & USB_FLAG_VBUS_OFFLINE) {
+
+ ui->chg_current = 0;
+ /* wait incase chg_detect is running */
+ if (!ui->gadget.is_a_peripheral)
+ cancel_delayed_work_sync(&ui->chg_det);
+
+ dev_dbg(&ui->pdev->dev,
+ "msm72k_udc: ONLINE -> OFFLINE\n");
+
+ atomic_set(&ui->running, 0);
+ atomic_set(&ui->remote_wakeup, 0);
+ atomic_set(&ui->configured, 0);
+
+ if (ui->driver) {
+ dev_dbg(&ui->pdev->dev,
+ "usb: notify offline\n");
+ ui->driver->disconnect(&ui->gadget);
+ }
+ /* cancel pending ep0 transactions */
+ flush_endpoint(&ui->ep0out);
+ flush_endpoint(&ui->ep0in);
+
+ /* synchronize with irq context */
+ spin_lock_irqsave(&ui->lock, iflags);
+#ifdef CONFIG_USB_OTG
+ ui->gadget.host_request = 0;
+ ui->gadget.b_hnp_enable = 0;
+ ui->hnp_avail = 0;
+#endif
+ msm72k_pullup_internal(&ui->gadget, 0);
+ spin_unlock_irqrestore(&ui->lock, iflags);
+
+
+ /* if charger is initialized to known type
+ * we must let modem know about charger
+ * disconnection
+ */
+ otg_set_power(ui->xceiv, 0);
+
+ if (ui->irq) {
+ free_irq(ui->irq, ui);
+ ui->irq = 0;
+ }
+
+
+ switch_set_state(&ui->sdev, 0);
+
+ ui->state = USB_STATE_OFFLINE;
+ usb_do_work_check_vbus(ui);
+ pm_runtime_put_noidle(&ui->pdev->dev);
+ pm_runtime_suspend(&ui->pdev->dev);
+ wake_unlock(&ui->wlock);
+ break;
+ }
+ if (flags & USB_FLAG_SUSPEND) {
+ int maxpower = usb_get_max_power(ui);
+
+ if (maxpower < 0)
+ break;
+
+ otg_set_power(ui->xceiv, 0);
+ /* To support TCXO during bus suspend
+ * This might be dummy check since bus suspend
+ * is not implemented as of now
+ * */
+ if (release_wlocks)
+ wake_unlock(&ui->wlock);
+
+ /* TBD: Initiate LPM at usb bus suspend */
+ break;
+ }
+ if (flags & USB_FLAG_CONFIGURED) {
+ int maxpower = usb_get_max_power(ui);
+
+ /* We may come here even when no configuration
+ * is selected. Send online/offline event
+ * accordingly.
+ */
+ switch_set_state(&ui->sdev,
+ atomic_read(&ui->configured));
+
+ if (maxpower < 0)
+ break;
+
+ ui->chg_current = maxpower;
+ otg_set_power(ui->xceiv, maxpower);
+ break;
+ }
+ if (flags & USB_FLAG_RESET) {
+ dev_dbg(&ui->pdev->dev,
+ "msm72k_udc: ONLINE -> RESET\n");
+ msm72k_pullup_internal(&ui->gadget, 0);
+ usb_reset(ui);
+ msm72k_pullup_internal(&ui->gadget, 1);
+ dev_dbg(&ui->pdev->dev,
+ "msm72k_udc: RESET -> ONLINE\n");
+ break;
+ }
+ break;
+ case USB_STATE_OFFLINE:
+ /* If we were signaled to go online and vbus is still
+ * present when we received the signal, go online.
+ */
+ if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) {
+ int ret;
+
+ pm_runtime_get_noresume(&ui->pdev->dev);
+ pm_runtime_resume(&ui->pdev->dev);
+ dev_dbg(&ui->pdev->dev,
+ "msm72k_udc: OFFLINE -> ONLINE\n");
+
+ usb_reset(ui);
+ ui->state = USB_STATE_ONLINE;
+ usb_do_work_check_vbus(ui);
+ ret = request_irq(otg->irq, usb_interrupt,
+ IRQF_SHARED,
+ ui->pdev->name, ui);
+ /* FIXME: should we call BUG_ON when
+ * requst irq fails
+ */
+ if (ret) {
+ dev_err(&ui->pdev->dev,
+ "hsusb: peripheral: request irq"
+ " failed:(%d)", ret);
+ break;
+ }
+ ui->irq = otg->irq;
+ enable_irq_wake(otg->irq);
+
+ if (!atomic_read(&ui->softconnect))
+ break;
+ msm72k_pullup_internal(&ui->gadget, 1);
+
+ if (!ui->gadget.is_a_peripheral)
+ schedule_delayed_work(
+ &ui->chg_det,
+ USB_CHG_DET_DELAY);
+ }
+ break;
+ }
+ }
+}
+
+/* FIXME - the callers of this function should use a gadget API instead.
+ * This is called from htc_battery.c and board-halibut.c
+ * WARNING - this can get called before this driver is initialized.
+ */
+void msm_hsusb_set_vbus_state(int online)
+{
+ unsigned long flags;
+ struct usb_info *ui = the_usb_info;
+
+ if (!ui) {
+ pr_err("%s called before driver initialized\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ui->lock, flags);
+
+ if (is_usb_online(ui) == online)
+ goto out;
+
+ if (online) {
+ ui->usb_state = USB_STATE_POWERED;
+ ui->flags |= USB_FLAG_VBUS_ONLINE;
+ } else {
+ ui->gadget.speed = USB_SPEED_UNKNOWN;
+ ui->usb_state = USB_STATE_NOTATTACHED;
+ ui->flags |= USB_FLAG_VBUS_OFFLINE;
+ }
+ if (in_interrupt()) {
+ schedule_work(&ui->work);
+ } else {
+ spin_unlock_irqrestore(&ui->lock, flags);
+ usb_do_work(&ui->work);
+ return;
+ }
+out:
+ spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+void usb_function_reenumerate(void)
+{
+ struct usb_info *ui = the_usb_info;
+
+ /* disable and re-enable the D+ pullup */
+ dev_dbg(&ui->pdev->dev, "disable pullup\n");
+ writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+
+ msleep(10);
+
+ dev_dbg(&ui->pdev->dev, "enable pullup\n");
+ writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+}
+
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_info *ui = file->private_data;
+ char *buf = debug_buffer;
+ unsigned long flags;
+ struct msm_endpoint *ept;
+ struct msm_request *req;
+ int n;
+ int i = 0;
+
+ spin_lock_irqsave(&ui->lock, flags);
+
+ i += scnprintf(buf + i, PAGE_SIZE - i,
+ "regs: setup=%08x prime=%08x stat=%08x done=%08x\n",
+ readl(USB_ENDPTSETUPSTAT),
+ readl(USB_ENDPTPRIME),
+ readl(USB_ENDPTSTAT),
+ readl(USB_ENDPTCOMPLETE));
+ i += scnprintf(buf + i, PAGE_SIZE - i,
+ "regs: cmd=%08x sts=%08x intr=%08x port=%08x\n\n",
+ readl(USB_USBCMD),
+ readl(USB_USBSTS),
+ readl(USB_USBINTR),
+ readl(USB_PORTSC));
+
+
+ for (n = 0; n < 32; n++) {
+ ept = ui->ept + n;
+ if (ept->ep.maxpacket == 0)
+ continue;
+
+ i += scnprintf(buf + i, PAGE_SIZE - i,
+ "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n",
+ ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
+ ept->head->config, ept->head->active,
+ ept->head->next, ept->head->info);
+
+ for (req = ept->req; req; req = req->next)
+ i += scnprintf(buf + i, PAGE_SIZE - i,
+ " req @%08x next=%08x info=%08x page0=%08x %c %c\n",
+ req->item_dma, req->item->next,
+ req->item->info, req->item->page0,
+ req->busy ? 'B' : ' ',
+ req->live ? 'L' : ' ');
+ }
+
+ i += scnprintf(buf + i, PAGE_SIZE - i,
+ "phy failure count: %d\n", ui->phy_fail_count);
+
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_info *ui = file->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ ui->flags |= USB_FLAG_RESET;
+ schedule_work(&ui->work);
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ return count;
+}
+
+static ssize_t debug_write_cycle(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ usb_function_reenumerate();
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+const struct file_operations debug_stat_ops = {
+ .open = debug_open,
+ .read = debug_read_status,
+};
+
+const struct file_operations debug_reset_ops = {
+ .open = debug_open,
+ .write = debug_write_reset,
+};
+
+const struct file_operations debug_cycle_ops = {
+ .open = debug_open,
+ .write = debug_write_cycle,
+};
+
+static ssize_t debug_read_release_wlocks(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char kbuf[10];
+ size_t c = 0;
+
+ memset(kbuf, 0, 10);
+
+ c = scnprintf(kbuf, 10, "%d", release_wlocks);
+
+ if (copy_to_user(ubuf, kbuf, c))
+ return -EFAULT;
+
+ return c;
+}
+static ssize_t debug_write_release_wlocks(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ char kbuf[10];
+ long temp;
+
+ memset(kbuf, 0, 10);
+
+ if (copy_from_user(kbuf, buf, count > 10 ? 10 : count))
+ return -EFAULT;
+
+ if (strict_strtol(kbuf, 10, &temp))
+ return -EINVAL;
+
+ if (temp)
+ release_wlocks = 1;
+ else
+ release_wlocks = 0;
+
+ return count;
+}
+static int debug_wake_lock_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+const struct file_operations debug_wlocks_ops = {
+ .open = debug_wake_lock_open,
+ .read = debug_read_release_wlocks,
+ .write = debug_write_release_wlocks,
+};
+static void usb_debugfs_init(struct usb_info *ui)
+{
+ struct dentry *dent;
+ dent = debugfs_create_dir(dev_name(&ui->pdev->dev), 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops);
+ debugfs_create_file("reset", 0222, dent, ui, &debug_reset_ops);
+ debugfs_create_file("cycle", 0222, dent, ui, &debug_cycle_ops);
+ debugfs_create_file("release_wlocks", 0666, dent, ui,
+ &debug_wlocks_ops);
+}
+#else
+static void usb_debugfs_init(struct usb_info *ui) {}
+#endif
+
+static int
+msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(_ep);
+ unsigned char ep_type =
+ desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ _ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ config_ept(ept);
+ ept->wedged = 0;
+ usb_ept_enable(ept, 1, ep_type);
+ return 0;
+}
+
+static int msm72k_disable(struct usb_ep *_ep)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(_ep);
+
+ usb_ept_enable(ept, 0, 0);
+ flush_endpoint(ept);
+ return 0;
+}
+
+static struct usb_request *
+msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags);
+}
+
+static void
+msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct msm_request *req = to_msm_request(_req);
+ struct msm_endpoint *ept = to_msm_endpoint(_ep);
+ struct usb_info *ui = ept->ui;
+
+ /* request should not be busy */
+ BUG_ON(req->busy);
+ if (req->alloced)
+ kfree(req->req.buf);
+ dma_pool_free(ui->pool, req->item, req->item_dma);
+ kfree(req);
+}
+
+static int
+msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags)
+{
+ struct msm_endpoint *ep = to_msm_endpoint(_ep);
+ struct usb_info *ui = ep->ui;
+
+ if (ep == &ui->ep0in) {
+ struct msm_request *r = to_msm_request(req);
+ if (!req->length)
+ goto ep_queue_done;
+ r->gadget_complete = req->complete;
+ /* ep0_queue_ack_complete queue a receive for ACK before
+ ** calling req->complete
+ */
+ req->complete = ep0_queue_ack_complete;
+ if (atomic_read(&ui->ep0_dir) == USB_DIR_OUT)
+ ep = &ui->ep0out;
+ goto ep_queue_done;
+ }
+
+ep_queue_done:
+ return usb_ept_queue_xfer(ep, req);
+}
+
+static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct msm_endpoint *ep = to_msm_endpoint(_ep);
+ struct msm_request *req = to_msm_request(_req);
+ struct usb_info *ui = ep->ui;
+
+ struct msm_request *temp_req;
+ unsigned long flags;
+
+ if (!(ui && req && ep->req))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ if (!req->busy) {
+ dev_dbg(&ui->pdev->dev, "%s: !req->busy\n", __func__);
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return -EINVAL;
+ }
+ /* Stop the transfer */
+ do {
+ writel((1 << ep->bit), USB_ENDPTFLUSH);
+ while (readl(USB_ENDPTFLUSH) & (1 << ep->bit))
+ udelay(100);
+ } while (readl(USB_ENDPTSTAT) & (1 << ep->bit));
+
+ req->req.status = 0;
+ req->busy = 0;
+
+ if (ep->req == req) {
+ ep->req = req->next;
+ ep->head->next = req->item->next;
+ } else {
+ req->prev->next = req->next;
+ if (req->next)
+ req->next->prev = req->prev;
+ req->prev->item->next = req->item->next;
+ }
+
+ if (!req->next)
+ ep->last = req->prev;
+
+ /* initialize request to default */
+ req->item->next = TERMINATE;
+ req->item->info = 0;
+ req->live = 0;
+ dma_unmap_single(NULL, req->dma, req->req.length,
+ (ep->flags & EPT_FLAG_IN) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (req->req.complete) {
+ req->req.status = -ECONNRESET;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ req->req.complete(&ep->ep, &req->req);
+ spin_lock_irqsave(&ui->lock, flags);
+ }
+
+ if (!req->live) {
+ /* Reprime the endpoint for the remaining transfers */
+ for (temp_req = ep->req ; temp_req ; temp_req = temp_req->next)
+ temp_req->live = 0;
+ if (ep->req)
+ usb_ept_start(ep);
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return 0;
+}
+
+static int
+usb_ept_set_halt(struct usb_ep *_ep, int value)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(_ep);
+ struct usb_info *ui = ept->ui;
+ unsigned int in = ept->flags & EPT_FLAG_IN;
+ unsigned int n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui->lock, flags);
+
+ n = readl(USB_ENDPTCTRL(ept->num));
+
+ if (in) {
+ if (value)
+ n |= CTRL_TXS;
+ else {
+ n &= ~CTRL_TXS;
+ n |= CTRL_TXR;
+ }
+ } else {
+ if (value)
+ n |= CTRL_RXS;
+ else {
+ n &= ~CTRL_RXS;
+ n |= CTRL_RXR;
+ }
+ }
+ writel(n, USB_ENDPTCTRL(ept->num));
+ if (!value)
+ ept->wedged = 0;
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ return 0;
+}
+
+static int
+msm72k_set_halt(struct usb_ep *_ep, int value)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(_ep);
+ unsigned int in = ept->flags & EPT_FLAG_IN;
+
+ if (value && in && ept->req)
+ return -EAGAIN;
+
+ usb_ept_set_halt(_ep, value);
+
+ return 0;
+}
+
+static int
+msm72k_fifo_status(struct usb_ep *_ep)
+{
+ return -EOPNOTSUPP;
+}
+
+static void
+msm72k_fifo_flush(struct usb_ep *_ep)
+{
+ flush_endpoint(to_msm_endpoint(_ep));
+}
+static int msm72k_set_wedge(struct usb_ep *_ep)
+{
+ struct msm_endpoint *ept = to_msm_endpoint(_ep);
+
+ if (ept->num == 0)
+ return -EINVAL;
+
+ ept->wedged = 1;
+
+ return msm72k_set_halt(_ep, 1);
+}
+
+static const struct usb_ep_ops msm72k_ep_ops = {
+ .enable = msm72k_enable,
+ .disable = msm72k_disable,
+
+ .alloc_request = msm72k_alloc_request,
+ .free_request = msm72k_free_request,
+
+ .queue = msm72k_queue,
+ .dequeue = msm72k_dequeue,
+
+ .set_halt = msm72k_set_halt,
+ .set_wedge = msm72k_set_wedge,
+ .fifo_status = msm72k_fifo_status,
+ .fifo_flush = msm72k_fifo_flush,
+};
+
+static int msm72k_get_frame(struct usb_gadget *_gadget)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+
+ /* frame number is in bits 13:3 */
+ return (readl(USB_FRINDEX) >> 3) & 0x000007FF;
+}
+
+/* VBUS reporting logically comes from a transceiver */
+static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+ if (is_active || atomic_read(&otg->chg_type)
+ == USB_CHG_TYPE__WALLCHARGER)
+ wake_lock(&ui->wlock);
+
+ msm_hsusb_set_vbus_state(is_active);
+ return 0;
+}
+
+/* SW workarounds
+Issue #1 - USB Spoof Disconnect Failure
+Symptom - Writing 0 to run/stop bit of USBCMD doesn't cause disconnect
+SW workaround - Making opmode non-driving and SuspendM set in function
+ register of SMSC phy
+*/
+/* drivers may have software control over D+ pullup */
+static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ unsigned long flags;
+
+ if (is_active) {
+ spin_lock_irqsave(&ui->lock, flags);
+ if (is_usb_online(ui) && ui->driver)
+ writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+ spin_unlock_irqrestore(&ui->lock, flags);
+ } else {
+ writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+ /* S/W workaround, Issue#1 */
+ otg_io_write(ui->xceiv, 0x48, 0x04);
+ }
+
+ /* Ensure pull-up operation is completed before returning */
+ mb();
+
+ return 0;
+}
+
+static int msm72k_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ unsigned long flags;
+
+
+ atomic_set(&ui->softconnect, is_active);
+
+ spin_lock_irqsave(&ui->lock, flags);
+ if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL) {
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ msm72k_pullup_internal(_gadget, is_active);
+
+ if (is_active && !ui->gadget.is_a_peripheral)
+ schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY);
+
+ return 0;
+}
+
+static int msm72k_wakeup(struct usb_gadget *_gadget)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+ if (!atomic_read(&ui->remote_wakeup)) {
+ dev_err(&ui->pdev->dev,
+ "%s: remote wakeup not supported\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ if (!atomic_read(&ui->configured)) {
+ dev_err(&ui->pdev->dev,
+ "%s: device is not configured\n", __func__);
+ return -ENODEV;
+ }
+ otg_set_suspend(ui->xceiv, 0);
+
+ disable_irq(otg->irq);
+
+ if (!is_usb_active())
+ writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+
+ /* Ensure that USB port is resumed before enabling the IRQ */
+ mb();
+
+ enable_irq(otg->irq);
+
+ return 0;
+}
+
+/* when Gadget is configured, it will indicate how much power
+ * can be pulled from vbus, as specified in configuiration descriptor
+ */
+static int msm72k_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&ui->lock, flags);
+ ui->b_max_pow = mA;
+ ui->flags = USB_FLAG_CONFIGURED;
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ schedule_work(&ui->work);
+
+ return 0;
+}
+
+static int msm72k_set_selfpowered(struct usb_gadget *_gadget, int set)
+{
+ struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ if (set) {
+ if (ui->pdata && ui->pdata->self_powered)
+ atomic_set(&ui->self_powered, 1);
+ else
+ ret = -EOPNOTSUPP;
+ } else {
+ /* We can always work as a bus powered device */
+ atomic_set(&ui->self_powered, 0);
+ }
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ return ret;
+
+}
+
+static const struct usb_gadget_ops msm72k_ops = {
+ .get_frame = msm72k_get_frame,
+ .vbus_session = msm72k_udc_vbus_session,
+ .vbus_draw = msm72k_udc_vbus_draw,
+ .pullup = msm72k_pullup,
+ .wakeup = msm72k_wakeup,
+ .set_selfpowered = msm72k_set_selfpowered,
+};
+
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+ struct usb_info *ui = the_usb_info;
+
+ msm72k_wakeup(&ui->gadget);
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_info *ui = the_usb_info;
+
+ msm72k_wakeup(&ui->gadget);
+
+ return count;
+}
+
+static ssize_t show_usb_state(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ size_t i;
+ char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED",
+ "USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED",
+ "USB_STATE_RECONNECTING", "USB_STATE_DEFAULT",
+ "USB_STATE_ADDRESS", "USB_STATE_CONFIGURED",
+ "USB_STATE_SUSPENDED"
+ };
+
+ i = scnprintf(buf, PAGE_SIZE, "%s\n", state[msm_hsusb_get_state()]);
+ return i;
+}
+
+static ssize_t show_usb_speed(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_info *ui = the_usb_info;
+ size_t i;
+ char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW",
+ "USB_SPEED_FULL", "USB_SPEED_HIGH"};
+
+ i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->gadget.speed]);
+ return i;
+}
+
+static ssize_t store_usb_chg_current(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_info *ui = the_usb_info;
+ unsigned long mA;
+
+ if (ui->gadget.is_a_peripheral)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &mA))
+ return -EINVAL;
+
+ ui->chg_current = mA;
+ otg_set_power(ui->xceiv, mA);
+
+ return count;
+}
+
+static ssize_t show_usb_chg_current(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_info *ui = the_usb_info;
+ size_t count;
+
+ count = sprintf(buf, "%d", ui->chg_current);
+
+ return count;
+}
+
+static ssize_t show_usb_chg_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_info *ui = the_usb_info;
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
+ size_t count;
+ char *chg_type[] = {"STD DOWNSTREAM PORT",
+ "CARKIT",
+ "DEDICATED CHARGER",
+ "INVALID"};
+
+ count = sprintf(buf, "%s",
+ chg_type[atomic_read(&otg->chg_type)]);
+
+ return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+static DEVICE_ATTR(usb_state, S_IRUSR, show_usb_state, 0);
+static DEVICE_ATTR(usb_speed, S_IRUSR, show_usb_speed, 0);
+static DEVICE_ATTR(chg_type, S_IRUSR, show_usb_chg_type, 0);
+static DEVICE_ATTR(chg_current, S_IWUSR | S_IRUSR,
+ show_usb_chg_current, store_usb_chg_current);
+
+#ifdef CONFIG_USB_OTG
+static ssize_t store_host_req(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_info *ui = the_usb_info;
+ unsigned long val, flags;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ dev_dbg(&ui->pdev->dev, "%s host request\n",
+ val ? "set" : "clear");
+
+ spin_lock_irqsave(&ui->lock, flags);
+ if (ui->hnp_avail)
+ ui->gadget.host_request = !!val;
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR(host_request, S_IWUSR, NULL, store_host_req);
+
+/* How do we notify user space about HNP availability?
+ * As we are compliant to Rev 2.0, Host will not set a_hnp_support.
+ * Introduce hnp_avail flag and set when HNP polling request arrives.
+ * The expectation is that user space checks hnp availability before
+ * requesting host role via above sysfs node.
+ */
+static ssize_t show_host_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_info *ui = the_usb_info;
+ size_t count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui->lock, flags);
+ count = sprintf(buf, "%d\n", ui->hnp_avail);
+ spin_unlock_irqrestore(&ui->lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR(host_avail, S_IRUSR, show_host_avail, NULL);
+
+static struct attribute *otg_attrs[] = {
+ &dev_attr_host_request.attr,
+ &dev_attr_host_avail.attr,
+ NULL,
+};
+
+static struct attribute_group otg_attr_grp = {
+ .name = "otg",
+ .attrs = otg_attrs,
+};
+#endif
+
+static int msm72k_probe(struct platform_device *pdev)
+{
+ struct usb_info *ui;
+ struct msm_otg *otg;
+ int retval;
+
+ dev_dbg(&pdev->dev, "msm72k_probe\n");
+ ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
+ if (!ui)
+ return -ENOMEM;
+
+ ui->pdev = pdev;
+ ui->pdata = pdev->dev.platform_data;
+
+ ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
+ if (!ui->buf)
+ return usb_free(ui, -ENOMEM);
+
+ ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0);
+ if (!ui->pool)
+ return usb_free(ui, -ENOMEM);
+
+ ui->xceiv = otg_get_transceiver();
+ if (!ui->xceiv)
+ return usb_free(ui, -ENODEV);
+
+ otg = to_msm_otg(ui->xceiv);
+ ui->addr = otg->regs;
+
+ ui->gadget.ops = &msm72k_ops;
+ ui->gadget.is_dualspeed = 1;
+ device_initialize(&ui->gadget.dev);
+ dev_set_name(&ui->gadget.dev, "gadget");
+ ui->gadget.dev.parent = &pdev->dev;
+ ui->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+#ifdef CONFIG_USB_OTG
+ ui->gadget.is_otg = 1;
+#endif
+
+ ui->sdev.name = DRIVER_NAME;
+ ui->sdev.print_name = print_switch_name;
+ ui->sdev.print_state = print_switch_state;
+
+ retval = switch_dev_register(&ui->sdev);
+ if (retval)
+ return usb_free(ui, retval);
+
+ the_usb_info = ui;
+
+ wake_lock_init(&ui->wlock,
+ WAKE_LOCK_SUSPEND, "usb_bus_active");
+
+ usb_debugfs_init(ui);
+
+ usb_prepare(ui);
+
+#ifdef CONFIG_USB_OTG
+ retval = sysfs_create_group(&pdev->dev.kobj, &otg_attr_grp);
+ if (retval) {
+ dev_err(&ui->pdev->dev,
+ "failed to create otg sysfs directory:"
+ "err:(%d)\n", retval);
+ }
+#endif
+
+ retval = otg_set_peripheral(ui->xceiv, &ui->gadget);
+ if (retval) {
+ dev_err(&ui->pdev->dev,
+ "%s: Cannot bind the transceiver, retval:(%d)\n",
+ __func__, retval);
+ switch_dev_unregister(&ui->sdev);
+ wake_lock_destroy(&ui->wlock);
+ return usb_free(ui, retval);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* Setup phy stuck timer */
+ if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+ setup_timer(&phy_status_timer, usb_phy_status_check_timer, 0);
+ return 0;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+{
+ struct usb_info *ui = the_usb_info;
+ int retval, n;
+
+ if (!driver
+ || driver->speed < USB_SPEED_FULL
+ || !bind
+ || !driver->disconnect
+ || !driver->setup)
+ return -EINVAL;
+ if (!ui)
+ return -ENODEV;
+ if (ui->driver)
+ return -EBUSY;
+
+ /* first hook up the driver ... */
+ ui->driver = driver;
+ ui->gadget.dev.driver = &driver->driver;
+ ui->gadget.name = driver_name;
+ INIT_LIST_HEAD(&ui->gadget.ep_list);
+ ui->gadget.ep0 = &ui->ep0in.ep;
+ INIT_LIST_HEAD(&ui->gadget.ep0->ep_list);
+ ui->gadget.speed = USB_SPEED_UNKNOWN;
+ atomic_set(&ui->softconnect, 1);
+
+ for (n = 1; n < 16; n++) {
+ struct msm_endpoint *ept = ui->ept + n;
+ list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list);
+ ept->ep.maxpacket = 512;
+ }
+ for (n = 17; n < 32; n++) {
+ struct msm_endpoint *ept = ui->ept + n;
+ list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list);
+ ept->ep.maxpacket = 512;
+ }
+
+ retval = device_add(&ui->gadget.dev);
+ if (retval)
+ goto fail;
+
+ retval = bind(&ui->gadget);
+ if (retval) {
+ dev_err(&ui->pdev->dev, "bind to driver %s --> error %d\n",
+ driver->driver.name, retval);
+ device_del(&ui->gadget.dev);
+ goto fail;
+ }
+
+ retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup);
+ if (retval != 0)
+ dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+ "(wakeup) error: (%d)\n", retval);
+ retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_state);
+ if (retval != 0)
+ dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+ " (usb_state) error: (%d)\n", retval);
+
+ retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_speed);
+ if (retval != 0)
+ dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+ " (usb_speed) error: (%d)\n", retval);
+
+ retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_type);
+ if (retval != 0)
+ dev_err(&ui->pdev->dev,
+ "failed to create sysfs entry(chg_type): err:(%d)\n",
+ retval);
+ retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_current);
+ if (retval != 0)
+ dev_err(&ui->pdev->dev,
+ "failed to create sysfs entry(chg_current):"
+ "err:(%d)\n", retval);
+
+ dev_dbg(&ui->pdev->dev, "registered gadget driver '%s'\n",
+ driver->driver.name);
+ usb_start(ui);
+
+ return 0;
+
+fail:
+ ui->driver = NULL;
+ ui->gadget.dev.driver = NULL;
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct usb_info *dev = the_usb_info;
+
+ if (!dev)
+ return -ENODEV;
+ if (!driver || driver != dev->driver || !driver->unbind)
+ return -EINVAL;
+
+ msm72k_pullup_internal(&dev->gadget, 0);
+ if (dev->irq) {
+ free_irq(dev->irq, dev);
+ dev->irq = 0;
+ }
+ dev->state = USB_STATE_IDLE;
+ atomic_set(&dev->configured, 0);
+ switch_set_state(&dev->sdev, 0);
+ /* cancel pending ep0 transactions */
+ flush_endpoint(&dev->ep0out);
+ flush_endpoint(&dev->ep0in);
+
+ device_remove_file(&dev->gadget.dev, &dev_attr_wakeup);
+ device_remove_file(&dev->gadget.dev, &dev_attr_usb_state);
+ device_remove_file(&dev->gadget.dev, &dev_attr_usb_speed);
+ device_remove_file(&dev->gadget.dev, &dev_attr_chg_type);
+ device_remove_file(&dev->gadget.dev, &dev_attr_chg_current);
+ driver->disconnect(&dev->gadget);
+ driver->unbind(&dev->gadget);
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
+
+ device_del(&dev->gadget.dev);
+
+ dev_dbg(&dev->pdev->dev,
+ "unregistered gadget driver '%s'\n", driver->driver.name);
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+static int msm72k_udc_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int msm72k_udc_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static int msm72k_udc_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: idling...\n");
+ return 0;
+}
+
+static struct dev_pm_ops msm72k_udc_dev_pm_ops = {
+ .runtime_suspend = msm72k_udc_runtime_suspend,
+ .runtime_resume = msm72k_udc_runtime_resume,
+ .runtime_idle = msm72k_udc_runtime_idle
+};
+
+static struct platform_driver usb_driver = {
+ .probe = msm72k_probe,
+ .driver = { .name = "msm_hsusb",
+ .pm = &msm72k_udc_dev_pm_ops, },
+};
+
+static int __init init(void)
+{
+ return platform_driver_register(&usb_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ platform_driver_unregister(&usb_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Mike Lockwood, Brian Swetland");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/qcom_maemo.c b/drivers/usb/gadget/qcom_maemo.c
new file mode 100644
index 0000000..39686c4
--- /dev/null
+++ b/drivers/usb/gadget/qcom_maemo.c
@@ -0,0 +1,304 @@
+/*
+ * Qualcomm Maemo Composite driver
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program from the Code Aurora Forum is free software; you can
+ * redistribute it and/or modify it under the GNU General Public License
+ * version 2 and only version 2 as published by the Free Software Foundation.
+ * The original work available from [git.kernel.org ] is subject to the
+ * notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+
+
+#define DRIVER_DESC "Qcom Maemo Composite Gadget"
+#define VENDOR_ID 0x05c6
+#define PRODUCT_ID 0x902E
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#define USB_ETH
+
+#define USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
+# include "f_rndis.c"
+# include "rndis.c"
+#endif
+
+
+#include "u_serial.c"
+#include "f_serial.c"
+
+#include "u_ether.c"
+
+#undef DBG /* u_ether.c has broken idea about macros */
+#undef VDBG /* so clean up after it */
+#undef ERROR
+#undef INFO
+
+#include "f_mass_storage.c"
+#include "f_diag.c"
+#include "f_rmnet.c"
+
+/*-------------------------------------------------------------------------*/
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_SERIAL_IDX 2
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+ /* These dummy values should be overridden by platform data */
+ [STRING_MANUFACTURER_IDX].s = "Qualcomm Incorporated",
+ [STRING_PRODUCT_IDX].s = "Usb composition",
+ [STRING_SERIAL_IDX].s = "0123456789ABCDEF",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof(device_desc),
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+ .bDeviceSubClass = 0,
+ .bDeviceProtocol = 0,
+ .idVendor = __constant_cpu_to_le16(VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(PRODUCT_ID),
+ .bcdDevice = __constant_cpu_to_le16(0xffff),
+ .bNumConfigurations = 1,
+};
+
+static u8 hostaddr[ETH_ALEN];
+static struct usb_diag_ch *diag_ch;
+static struct usb_diag_platform_data usb_diag_pdata = {
+ .ch_name = DIAG_LEGACY,
+};
+
+/****************************** Configurations ******************************/
+static struct fsg_module_parameters mod_data = {
+ .stall = 0
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static struct fsg_common *fsg_common;
+static int maemo_setup_config(struct usb_configuration *c,
+ const struct usb_ctrlrequest *ctrl);
+
+static int maemo_do_config(struct usb_configuration *c)
+{
+ int ret;
+
+ ret = rndis_bind_config(c, hostaddr);
+ if (ret < 0)
+ return ret;
+
+ ret = diag_function_add(c);
+ if (ret < 0)
+ return ret;
+
+ ret = gser_bind_config(c, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = gser_bind_config(c, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = rmnet_function_add(c);
+ if (ret < 0)
+ return ret;
+
+ ret = fsg_add(c->cdev, c, fsg_common);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct usb_configuration maemo_config_driver = {
+ .label = "Qcom Maemo Gadget",
+ .bind = maemo_do_config,
+ .setup = maemo_setup_config,
+ .bConfigurationValue = 1,
+ .bMaxPower = 0xFA,
+};
+static int maemo_setup_config(struct usb_configuration *c,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int i;
+ int ret = -EOPNOTSUPP;
+
+ for (i = 0; i < maemo_config_driver.next_interface_id; i++) {
+ if (maemo_config_driver.interface[i]->setup) {
+ ret = maemo_config_driver.interface[i]->setup(
+ maemo_config_driver.interface[i], ctrl);
+ if (ret >= 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int maemo_bind(struct usb_composite_dev *cdev)
+{
+ struct usb_gadget *gadget = cdev->gadget;
+ int status, gcnum;
+
+ /* set up diag channel */
+ diag_ch = diag_setup(&usb_diag_pdata);
+ if (IS_ERR(diag_ch))
+ return PTR_ERR(diag_ch);
+
+ /* set up network link layer */
+ status = gether_setup(cdev->gadget, hostaddr);
+ if (status < 0)
+ goto diag_clean;
+
+ /* set up serial link layer */
+ status = gserial_setup(cdev->gadget, 2);
+ if (status < 0)
+ goto fail0;
+
+ /* set up mass storage function */
+ fsg_common = fsg_common_from_params(0, cdev, &mod_data);
+ if (IS_ERR(fsg_common)) {
+ status = PTR_ERR(fsg_common);
+ goto fail1;
+ }
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ else {
+ /* gadget zero is so simple (for now, no altsettings) that
+ * it SHOULD NOT have problems with bulk-capable hardware.
+ * so just warn about unrcognized controllers -- don't panic.
+ *
+ * things like configuration and altsetting numbering
+ * can need hardware-specific attention though.
+ */
+ WARNING(cdev, "controller '%s' not recognized\n",
+ gadget->name);
+ device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+ }
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail2;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail2;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+ if (!usb_gadget_set_selfpowered(gadget))
+ maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_SELFPOWER;
+
+ if (gadget->ops->wakeup)
+ maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+
+ /* register our first configuration */
+ status = usb_add_config(cdev, &maemo_config_driver);
+ if (status < 0)
+ goto fail2;
+
+ usb_gadget_set_selfpowered(gadget);
+ dev_info(&gadget->dev, DRIVER_DESC "\n");
+ fsg_common_put(fsg_common);
+ return 0;
+
+fail2:
+ fsg_common_put(fsg_common);
+fail1:
+ gserial_cleanup();
+fail0:
+ gether_cleanup();
+diag_clean:
+ diag_cleanup(diag_ch);
+
+ return status;
+}
+
+static int __exit maemo_unbind(struct usb_composite_dev *cdev)
+{
+ gserial_cleanup();
+ gether_cleanup();
+ diag_cleanup(diag_ch);
+ return 0;
+}
+
+static struct usb_composite_driver qcom_maemo_driver = {
+ .name = "Qcom Maemo Gadget",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = maemo_bind,
+ .unbind = __exit_p(maemo_unbind),
+};
+
+static int __init qcom_maemo_usb_init(void)
+{
+ return usb_composite_register(&qcom_maemo_driver);
+}
+module_init(qcom_maemo_usb_init);
+
+static void __exit qcom_maemo_usb_cleanup(void)
+{
+ usb_composite_unregister(&qcom_maemo_driver);
+}
+module_exit(qcom_maemo_usb_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index a872248..c3ccb72 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -262,8 +262,13 @@
#define EP0_BUFSIZE 256
#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
-/* Number of buffers we will use. 2 is enough for double-buffering */
-#define FSG_NUM_BUFFERS 2
+/* Number of buffers for CBW, DATA and CSW */
+#ifdef CONFIG_USB_CSW_HACK
+#define FSG_NUM_BUFFERS 4
+#else
+#define FSG_NUM_BUFFERS 2
+#endif
+
/* Default size of buffer length. */
#define FSG_BUFLEN ((u32)16384)
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
new file mode 100644
index 0000000..a047cfc
--- /dev/null
+++ b/drivers/usb/gadget/u_bam.c
@@ -0,0 +1,812 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/netdevice.h>
+#include <mach/bam_dmux.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include "u_rmnet.h"
+
+#define BAM_N_PORTS 1
+
+static struct workqueue_struct *gbam_wq;
+static int n_bam_ports;
+static unsigned bam_ch_ids[] = { 8 };
+
+#define TX_PKT_DROP_THRESHOLD 1000
+#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
+#define RX_PKT_FLOW_CTRL_DISABLE 500
+#define RX_PKT_FLOW_CTRL_SUPPORT 1
+
+#define BAM_MUX_HDR 8
+
+#define RX_Q_SIZE 16
+#define TX_Q_SIZE 200
+#define RX_REQ_SIZE (2048 - BAM_MUX_HDR)
+
+unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
+module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_support = RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
+module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int tx_q_size = TX_Q_SIZE;
+module_param(tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_q_size = RX_Q_SIZE;
+module_param(rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_req_size = RX_REQ_SIZE;
+module_param(rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+struct bam_ch_info {
+ atomic_t opened;
+ unsigned id;
+
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_q;
+
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+
+ struct gbam_port *port;
+ struct work_struct write_tobam_w;
+
+ /* stats */
+ unsigned int pending_with_bam;
+ unsigned int tohost_drp_cnt;
+ unsigned int tomodem_drp_cnt;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ unsigned long to_modem;
+ unsigned long to_host;
+};
+
+struct gbam_port {
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ struct grmnet *port_usb;
+
+ struct bam_ch_info data_ch;
+
+ struct work_struct connect_w;
+};
+
+static struct bam_portmaster {
+ struct gbam_port *port;
+} bam_ports[N_PORTS];
+
+static void gbam_start_rx(struct gbam_port *port);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
+ ep, head, num, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+ unsigned long flags;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb;
+ int ret;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ ep = port->port_usb->in;
+
+ while (!list_empty(&d->tx_idle)) {
+ skb = __skb_dequeue(&d->tx_skb_q);
+ if (!skb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ req = list_first_entry(&d->tx_idle,
+ struct usb_request,
+ list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+
+ list_del(&req->list);
+
+ spin_unlock(&port->port_lock);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock);
+ if (ret) {
+ pr_err("%s: usb epIn failed\n", __func__);
+ list_add(&req->list, &d->tx_idle);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ d->to_host++;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
+ port, port->port_num, d, skb->len);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (d->tx_skb_q.qlen > tx_pkt_drop_thld) {
+ d->tohost_drp_cnt++;
+ if (printk_ratelimit())
+ pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
+ __func__, d->tohost_drp_cnt);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ __skb_queue_tail(&d->tx_skb_q, skb);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ dev_kfree_skb_any(skb);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d->pending_with_bam--;
+
+ pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
+ port, d, d->to_modem,
+ d->pending_with_bam, port->port_num);
+
+ if (rx_fctrl_support &&
+ d->pending_with_bam >= rx_fctrl_dis_thld) {
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ gbam_start_rx(port);
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+
+ d = container_of(w, struct bam_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ while ((skb = __skb_dequeue(&d->rx_skb_q))) {
+ d->pending_with_bam++;
+ d->to_modem++;
+
+ pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
+ port, d, d->to_modem, d->pending_with_bam,
+ port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = msm_bam_dmux_write(d->id, skb);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ pr_debug("%s: write error:%d\n", __func__, ret);
+ d->pending_with_bam--;
+ d->to_modem--;
+ d->tomodem_drp_cnt++;
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ break;
+ default:
+ pr_err("%s: data tx ep error %d\n",
+ __func__, status);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ if (!port)
+ return;
+
+ spin_lock(&port->port_lock);
+ d = &port->data_ch;
+ list_add_tail(&req->list, &d->tx_idle);
+ spin_unlock(&port->port_lock);
+
+ gbam_write_data_tohost(port);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ if (printk_ratelimit())
+ pr_err("%s: %s response error %d, %d/%d\n",
+ __func__, ep->name, status,
+ req->actual, req->length);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ spin_lock(&port->port_lock);
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+
+ /* TODO: Handle flow control gracefully by having
+ * having call back mechanism from bam driver
+ */
+ if (rx_fctrl_support &&
+ d->pending_with_bam >= rx_fctrl_en_thld) {
+
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+ spin_unlock(&port->port_lock);
+
+ skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+ if (!skb) {
+ spin_lock(&port->port_lock);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ req->buf = skb->data;
+ req->length = rx_req_size;
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ dev_kfree_skb_any(skb);
+
+ if (printk_ratelimit())
+ pr_err("%s: data rx enqueue err %d\n",
+ __func__, status);
+
+ spin_lock(&port->port_lock);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ }
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+ struct usb_request *req;
+ struct bam_ch_info *d;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+ skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+ if (!skb)
+ break;
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->length = rx_req_size;
+ req->context = skb;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+
+ if (printk_ratelimit())
+ pr_err("%s: rx queue failed\n", __func__);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+ unsigned long flags;
+ struct usb_ep *ep;
+ int ret;
+ struct bam_ch_info *d;
+
+ pr_debug("%s: port:%p\n", __func__, port);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+ ret = gbam_alloc_requests(ep, &d->rx_idle, rx_q_size,
+ gbam_epout_complete, GFP_ATOMIC);
+ if (ret) {
+ pr_err("%s: rx req allocation failed\n", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ ret = gbam_alloc_requests(ep, &d->tx_idle, tx_q_size,
+ gbam_epin_complete, GFP_ATOMIC);
+ if (ret) {
+ pr_err("%s: tx req allocation failed\n", __func__);
+ gbam_free_requests(ep, &d->rx_idle);
+ return;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue out requests */
+ gbam_start_rx(port);
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct bam_ch_info *d = &port->data_ch;
+ int ret;
+
+ ret = msm_bam_dmux_open(d->id, port,
+ gbam_data_recv_cb,
+ gbam_data_write_done);
+ if (ret) {
+ pr_err("%s: unable open bam ch:%d err:%d\n",
+ __func__, d->id, ret);
+ return;
+ }
+ atomic_set(&d->opened, 1);
+
+ gbam_start_io(port);
+
+ pr_debug("%s: done\n", __func__);
+}
+
+static void gbam_port_free(int portno)
+{
+ struct gbam_port *port = bam_ports[portno].port;
+
+ if (!port)
+ kfree(port);
+}
+
+static int gbam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ spin_lock_init(&port->port_lock);
+ INIT_WORK(&port->connect_w, gbam_connect_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ INIT_LIST_HEAD(&d->tx_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+ skb_queue_head_init(&d->tx_skb_q);
+ skb_queue_head_init(&d->rx_skb_q);
+ d->id = bam_ch_ids[portno];
+
+ bam_ports[portno].port = port;
+
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port:%p data_ch:%p#\n"
+ "dpkts_to_usbhost: %lu\n"
+ "dpkts_to_modem: %lu\n"
+ "dpkts_pwith_bam: %u\n"
+ "to_usbhost_dcnt: %u\n"
+ "tomodem__dcnt: %u\n"
+ "tx_buf_len: %u\n"
+ "data_ch_opened: %d\n",
+ i, port, &port->data_ch,
+ d->to_host, d->to_modem,
+ d->pending_with_bam,
+ d->tohost_drp_cnt, d->tomodem_drp_cnt,
+ d->tx_skb_q.qlen, atomic_read(&d->opened));
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+ return count;
+}
+
+const struct file_operations gbam_stats_ops = {
+ .read = gbam_read_stats,
+ .write = gbam_reset_stats,
+};
+
+static void gbam_debugfs_init(void)
+{
+ struct dentry *dent;
+ struct dentry *dfile;
+
+ dent = debugfs_create_dir("usb_rmnet", 0);
+ if (IS_ERR(dent))
+ return;
+
+ /* TODO: Implement cleanup function to remove created file */
+ dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
+ if (!dfile || IS_ERR(dfile))
+ debugfs_remove(dent);
+}
+#else
+static void gam_debugfs_init(void) { }
+#endif
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port || !port->port_usb)
+ goto free_buf_out;
+
+ d = &port->data_ch;
+
+ gbam_free_requests(port->port_usb->in, &d->tx_idle);
+ gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+ while ((skb = __skb_dequeue(&d->tx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+free_buf_out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_disconnect(struct grmnet *gr, u8 port_num)
+{
+ struct gbam_port *port;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+ if (port_num >= n_bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+
+ port = bam_ports[port_num].port;
+ d = &port->data_ch;
+
+ gbam_free_buffers(port);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* disable endpoints */
+ usb_ep_disable(gr->out);
+ usb_ep_disable(gr->in);
+
+ if (atomic_read(&d->opened))
+ msm_bam_dmux_close(d->id);
+
+ atomic_set(&d->opened, 0);
+}
+
+int gbam_connect(struct grmnet *gr, u8 port_num)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+ if (port_num >= n_bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ port = bam_ports[port_num].port;
+ d = &port->data_ch;
+
+ ret = usb_ep_enable(gr->in, gr->in_desc);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+ __func__, gr->in);
+ return ret;
+ }
+ gr->in->driver_data = port;
+
+ ret = usb_ep_enable(gr->out, gr->out_desc);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+ __func__, gr->out);
+ gr->in->driver_data = 0;
+ return ret;
+ }
+ gr->out->driver_data = port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gr;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+
+ queue_work(gbam_wq, &port->connect_w);
+
+ return 0;
+}
+
+int gbam_setup(unsigned int count)
+{
+ int i;
+ int ret;
+
+ pr_debug("%s: requested ports:%d\n", __func__, count);
+
+ if (!count || count > BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = gbam_port_alloc(i);
+ if (ret) {
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam_ports;
+ }
+ n_bam_ports++;
+ }
+
+ gbam_debugfs_init();
+
+ return 0;
+free_bam_ports:
+ for (i = 0; i < n_bam_ports; i++)
+ gbam_port_free(i);
+
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
new file mode 100644
index 0000000..aeaddee
--- /dev/null
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct rmnet_ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct grmnet {
+ struct usb_function func;
+
+ struct usb_ep *in;
+ struct usb_ep *out;
+ struct usb_endpoint_descriptor *in_desc;
+ struct usb_endpoint_descriptor *out_desc;
+
+ /* to usb host, aka laptop, windows pc etc. Will
+ * be filled by usb driver of rmnet functionality
+ */
+ int (*send_cpkt_response)(struct grmnet *g,
+ struct rmnet_ctrl_pkt *pkt);
+
+ /* to modem, and to be filled by driver implementing
+ * control function
+ */
+ int (*send_cpkt_request)(struct grmnet *g,
+ u8 port_num,
+ struct rmnet_ctrl_pkt *pkt);
+
+ void (*send_cbits_tomodem)(struct grmnet *g,
+ u8 port_num,
+ int cbits);
+};
+
+int gbam_setup(unsigned int count);
+int gbam_connect(struct grmnet *, u8 port_num);
+void gbam_disconnect(struct grmnet *, u8 port_num);
+
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num);
+int gsmd_ctrl_setup(unsigned int count);
+
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
new file mode 100644
index 0000000..4449d9e
--- /dev/null
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include "u_rmnet.h"
+
+#define NR_PORTS 1
+static int n_ports;
+static char *rmnet_ctrl_names[] = { "DATA40_CNTL" };
+static struct workqueue_struct *grmnet_ctrl_wq;
+
+#define SMD_CH_MAX_LEN 20
+#define CH_OPENED 0
+#define CH_READY 1
+struct smd_ch_info {
+ struct smd_channel *ch;
+ char *name;
+ unsigned long flags;
+ wait_queue_head_t wait;
+ unsigned dtr;
+
+ struct list_head tx_q;
+ unsigned long tx_len;
+
+ struct work_struct read_w;
+ struct work_struct write_w;
+
+ struct rmnet_ctrl_port *port;
+
+ int cbits_tomodem;
+ /* stats */
+ unsigned long to_modem;
+ unsigned long to_host;
+};
+
+struct rmnet_ctrl_port {
+ struct smd_ch_info ctrl_ch;
+ unsigned int port_num;
+ struct grmnet *port_usb;
+
+ spinlock_t port_lock;
+ struct work_struct connect_w;
+};
+
+static struct rmnet_ctrl_ports {
+ struct rmnet_ctrl_port *port;
+ struct platform_driver pdrv;
+} ports[NR_PORTS];
+
+
+/*---------------misc functions---------------- */
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void rmnet_ctrl_pkt_free(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/*--------------------------------------------- */
+
+/*---------------control/smd channel functions---------------- */
+
+static void grmnet_ctrl_smd_read_w(struct work_struct *w)
+{
+ struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w);
+ struct rmnet_ctrl_port *port = c->port;
+ int sz;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ while (1) {
+ sz = smd_cur_packet_size(c->ch);
+ if (sz == 0)
+ break;
+
+ if (smd_read_avail(c->ch) < sz)
+ break;
+
+ cpkt = rmnet_alloc_ctrl_pkt(sz, GFP_KERNEL);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: unable to allocate rmnet control pkt\n",
+ __func__);
+ return;
+ }
+ cpkt->len = smd_read(c->ch, cpkt->buf, sz);
+
+ /* send it to USB here */
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb && port->port_usb->send_cpkt_response) {
+ port->port_usb->send_cpkt_response(
+ port->port_usb,
+ cpkt);
+ c->to_host++;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+}
+
+static void grmnet_ctrl_smd_write_w(struct work_struct *w)
+{
+ struct smd_ch_info *c = container_of(w, struct smd_ch_info, write_w);
+ struct rmnet_ctrl_port *port = c->port;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ while (1) {
+ if (list_empty(&c->tx_q))
+ break;
+
+ cpkt = list_first_entry(&c->tx_q, struct rmnet_ctrl_pkt, list);
+
+ if (smd_write_avail(c->ch) < cpkt->len)
+ break;
+
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = smd_write(c->ch, cpkt->buf, cpkt->len);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret != cpkt->len) {
+ pr_err("%s: smd_write failed err:%d\n",
+ __func__, ret);
+ rmnet_ctrl_pkt_free(cpkt);
+ break;
+ }
+ rmnet_ctrl_pkt_free(cpkt);
+ c->to_modem++;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int
+grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno,
+ struct rmnet_ctrl_pkt *cpkt)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+
+ if (portno >= n_ports) {
+ pr_err("%s: Invalid portno#%d\n", __func__, portno);
+ return -ENODEV;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet is null\n", __func__);
+ return -ENODEV;
+ }
+
+ port = ports[portno].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ c = &port->ctrl_ch;
+
+ /* drop cpkt if ch is not open */
+ if (!test_bit(CH_OPENED, &c->flags)) {
+ rmnet_ctrl_pkt_free(cpkt);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return 0;
+ }
+
+ list_add_tail(&cpkt->list, &c->tx_q);
+ queue_work(grmnet_ctrl_wq, &c->write_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return 0;
+}
+
+#define ACM_CTRL_DTR 0x01
+static void
+gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ int set_bits = 0;
+ int clear_bits = 0;
+ int temp = 0;
+
+ if (portno >= n_ports) {
+ pr_err("%s: Invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet is null\n", __func__);
+ return;
+ }
+
+ port = ports[portno].port;
+ cbits = cbits & ACM_CTRL_DTR;
+ c = &port->ctrl_ch;
+
+ /* host driver will only send DTR, but to have generic
+ * set and clear bit implementation using two separate
+ * checks
+ */
+ if (cbits & ACM_CTRL_DTR)
+ set_bits |= TIOCM_DTR;
+ else
+ clear_bits |= TIOCM_DTR;
+
+ temp |= set_bits;
+ temp &= ~clear_bits;
+
+ if (temp == c->cbits_tomodem)
+ return;
+
+ c->cbits_tomodem = temp;
+
+ if (!test_bit(CH_OPENED, &c->flags))
+ return;
+
+ pr_debug("%s: ctrl_tomodem:%d ctrl_bits:%d setbits:%d clearbits:%d\n",
+ __func__, temp, cbits, set_bits, clear_bits);
+
+ smd_tiocmset(c->ch, set_bits, clear_bits);
+}
+
+static char *get_smd_event(unsigned event)
+{
+ switch (event) {
+ case SMD_EVENT_DATA:
+ return "DATA";
+ case SMD_EVENT_OPEN:
+ return "OPEN";
+ case SMD_EVENT_CLOSE:
+ return "CLOSE";
+ }
+
+ return "UNDEFINED";
+}
+
+static void grmnet_ctrl_smd_notify(void *p, unsigned event)
+{
+ struct rmnet_ctrl_port *port = p;
+ struct smd_ch_info *c = &port->ctrl_ch;
+
+ pr_debug("%s: EVENT_(%s)\n", __func__, get_smd_event(event));
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ if (smd_read_avail(c->ch))
+ queue_work(grmnet_ctrl_wq, &c->read_w);
+ if (smd_write_avail(c->ch))
+ queue_work(grmnet_ctrl_wq, &c->write_w);
+ break;
+ case SMD_EVENT_OPEN:
+ set_bit(CH_OPENED, &c->flags);
+ wake_up(&c->wait);
+ break;
+ case SMD_EVENT_CLOSE:
+ clear_bit(CH_OPENED, &c->flags);
+ break;
+ }
+}
+/*------------------------------------------------------------ */
+
+static void grmnet_ctrl_smd_connect_w(struct work_struct *w)
+{
+ struct rmnet_ctrl_port *port =
+ container_of(w, struct rmnet_ctrl_port, connect_w);
+ struct smd_ch_info *c = &port->ctrl_ch;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s:\n", __func__);
+
+ if (!test_bit(CH_READY, &c->flags))
+ return;
+
+ ret = smd_open(c->name, &c->ch, port, grmnet_ctrl_smd_notify);
+ if (ret) {
+ pr_err("%s: Unable to open smd ch:%s err:%d\n",
+ __func__, c->name, ret);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb)
+ smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ unsigned long flags;
+
+ pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+ if (port_num >= n_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ port = ports[port_num].port;
+ c = &port->ctrl_ch;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gr;
+ gr->send_cpkt_request = grmnet_ctrl_smd_send_cpkt_tomodem;
+ gr->send_cbits_tomodem = gsmd_ctrl_send_cbits_tomodem;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ queue_work(grmnet_ctrl_wq, &port->connect_w);
+
+ return 0;
+}
+
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num)
+{
+ struct rmnet_ctrl_port *port;
+ unsigned long flags;
+ struct smd_ch_info *c;
+
+ pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+ if (port_num >= n_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+
+ port = ports[port_num].port;
+ c = &port->ctrl_ch;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = 0;
+ gr->send_cpkt_request = 0;
+ gr->send_cbits_tomodem = 0;
+ c->cbits_tomodem = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (test_bit(CH_OPENED, &c->flags)) {
+ /* this should send the dtr zero */
+ smd_close(c->ch);
+ clear_bit(CH_OPENED, &c->flags);
+ }
+}
+
+#define SMD_CH_MAX_LEN 20
+static int grmnet_ctrl_smd_ch_probe(struct platform_device *pdev)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ int i;
+ unsigned long flags;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_ports; i++) {
+ port = ports[i].port;
+ c = &port->ctrl_ch;
+
+ if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) {
+ set_bit(CH_READY, &c->flags);
+
+ /* if usb is online, try opening smd_ch */
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb)
+ queue_work(grmnet_ctrl_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int grmnet_ctrl_smd_ch_remove(struct platform_device *pdev)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ int i;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_ports; i++) {
+ port = ports[i].port;
+ c = &port->ctrl_ch;
+
+ if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) {
+ clear_bit(CH_READY, &c->flags);
+ clear_bit(CH_OPENED, &c->flags);
+ smd_close(c->ch);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+static void grmnet_ctrl_smd_port_free(int portno)
+{
+ struct rmnet_ctrl_port *port = ports[portno].port;
+
+ if (!port)
+ kfree(port);
+}
+
+static int grmnet_ctrl_smd_port_alloc(int portno)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ struct platform_driver *pdrv;
+
+ port = kzalloc(sizeof(struct rmnet_ctrl_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ spin_lock_init(&port->port_lock);
+ INIT_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w);
+
+ c = &port->ctrl_ch;
+ c->name = rmnet_ctrl_names[portno];
+ c->port = port;
+ init_waitqueue_head(&c->wait);
+ INIT_LIST_HEAD(&c->tx_q);
+ INIT_WORK(&c->read_w, grmnet_ctrl_smd_read_w);
+ INIT_WORK(&c->write_w, grmnet_ctrl_smd_write_w);
+
+ ports[portno].port = port;
+
+ pdrv = &ports[portno].pdrv;
+ pdrv->probe = grmnet_ctrl_smd_ch_probe;
+ pdrv->remove = grmnet_ctrl_smd_ch_remove;
+ pdrv->driver.name = c->name;
+ pdrv->driver.owner = THIS_MODULE;
+
+ platform_driver_register(pdrv);
+
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+int gsmd_ctrl_setup(unsigned int count)
+{
+ int i;
+ int ret;
+
+ pr_debug("%s: requested ports:%d\n", __func__, count);
+
+ if (!count || count > NR_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!grmnet_ctrl_wq) {
+ pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = grmnet_ctrl_smd_port_alloc(i);
+ if (ret) {
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_ports;
+ }
+ n_ports++;
+ }
+
+ return 0;
+
+free_ports:
+ for (i = 0; i < n_ports; i++)
+ grmnet_ctrl_smd_port_free(i);
+
+ destroy_workqueue(grmnet_ctrl_wq);
+
+ return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t gsmd_ctrl_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_ports; i++) {
+ port = ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ c = &port->ctrl_ch;
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port:%p ctrl_ch:%p#\n"
+ "to_usbhost: %lu\n"
+ "to_modem: %lu\n"
+ "DTR: %s\n"
+ "ch_open: %d\n"
+ "ch_ready: %d\n"
+ "read_avail: %d\n"
+ "write_avail:%d\n",
+ i, port, &port->ctrl_ch,
+ c->to_host, c->to_modem,
+ c->cbits_tomodem ? "HIGH" : "LOW",
+ test_bit(CH_OPENED, &c->flags),
+ test_bit(CH_READY, &c->flags),
+ smd_read_avail(c->ch),
+ smd_write_avail(c->ch));
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gsmd_ctrl_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rmnet_ctrl_port *port;
+ struct smd_ch_info *c;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < n_ports; i++) {
+ port = ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ c = &port->ctrl_ch;
+
+ c->to_host = 0;
+ c->to_modem = 0;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+ return count;
+}
+
+const struct file_operations gsmd_ctrl_stats_ops = {
+ .read = gsmd_ctrl_read_stats,
+ .write = gsmd_ctrl_reset_stats,
+};
+
+struct dentry *smd_ctrl_dent;
+struct dentry *smd_ctrl_dfile;
+static void gsmd_ctrl_debugfs_init(void)
+{
+ smd_ctrl_dent = debugfs_create_dir("usb_rmnet_ctrl_smd", 0);
+ if (IS_ERR(smd_ctrl_dent))
+ return;
+
+ smd_ctrl_dfile = debugfs_create_file("status", 0444, smd_ctrl_dent, 0,
+ &gsmd_ctrl_stats_ops);
+ if (!smd_ctrl_dfile || IS_ERR(smd_ctrl_dfile))
+ debugfs_remove(smd_ctrl_dent);
+}
+
+static void gsmd_ctrl_debugfs_exit(void)
+{
+ debugfs_remove(smd_ctrl_dfile);
+ debugfs_remove(smd_ctrl_dent);
+}
+
+#else
+static void gsmd_ctrl_debugfs_init(void) { }
+static void gsmd_ctrl_debugfs_exit(void) { }
+#endif
+
+static int __init gsmd_ctrl_init(void)
+{
+ gsmd_ctrl_debugfs_init();
+
+ return 0;
+}
+module_init(gsmd_ctrl_init);
+
+static void __exit gsmd_ctrl_exit(void)
+{
+ gsmd_ctrl_debugfs_exit();
+}
+module_exit(gsmd_ctrl_exit);
+MODULE_DESCRIPTION("smd control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
new file mode 100644
index 0000000..09d898f
--- /dev/null
+++ b/drivers/usb/gadget/u_sdio.c
@@ -0,0 +1,1097 @@
+/*
+ * u_sdio.c - utilities for USB gadget serial over sdio
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program from the Code Aurora Forum is free software; you can
+ * redistribute it and/or modify it under the GNU General Public License
+ * version 2 and only version 2 as published by the Free Software Foundation.
+ * The original work available from [kernel.org] is subject to the notice below.
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/sdio_al.h>
+#include <mach/sdio_cmux.h>
+#include "u_serial.h"
+
+#define SDIO_RX_QUEUE_SIZE 8
+#define SDIO_RX_BUF_SIZE 2048
+
+#define SDIO_TX_QUEUE_SIZE 8
+#define SDIO_TX_BUF_SIZE 2048
+
+/* 1 - DUN, 2-NMEA/GPS */
+#define SDIO_N_PORTS 2
+static struct sdio_portmaster {
+ struct mutex lock;
+ struct gsdio_port *port;
+ struct platform_driver gsdio_ch;
+} sdio_ports[SDIO_N_PORTS];
+static unsigned n_sdio_ports;
+
+struct sdio_port_info {
+ /* data channel info */
+ char *data_ch_name;
+ struct sdio_channel *ch;
+
+ /* control channel info */
+ int ctrl_ch_id;
+};
+
+struct sdio_port_info sport_info[SDIO_N_PORTS] = {
+ {
+ .data_ch_name = "SDIO_DUN",
+ .ctrl_ch_id = 9,
+ },
+ {
+ .data_ch_name = "SDIO_NMEA",
+ .ctrl_ch_id = 10,
+ },
+};
+
+static struct workqueue_struct *gsdio_wq;
+
+struct gsdio_port {
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ unsigned n_read;
+ struct list_head read_pool;
+ struct list_head read_queue;
+ struct work_struct push;
+ unsigned long rp_len;
+ unsigned long rq_len;
+
+ struct list_head write_pool;
+ struct work_struct pull;
+ unsigned long wp_len;
+
+ struct work_struct notify_modem;
+
+ struct gserial *port_usb;
+ struct usb_cdc_line_coding line_coding;
+
+ int sdio_open;
+ int ctrl_ch_err;
+ struct sdio_port_info *sport_info;
+ struct delayed_work sdio_open_work;
+
+#define SDIO_ACM_CTRL_RI (1 << 3)
+#define SDIO_ACM_CTRL_DSR (1 << 1)
+#define SDIO_ACM_CTRL_DCD (1 << 0)
+ int cbits_to_laptop;
+
+#define SDIO_ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define SDIO_ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+ int cbits_to_modem;
+
+ /* pkt logging */
+ unsigned long nbytes_tolaptop;
+ unsigned long nbytes_tomodem;
+};
+
+void gsdio_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+struct usb_request *
+gsdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("%s: usb alloc request failed\n", __func__);
+ return NULL;
+ }
+
+ req->length = len;
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ pr_err("%s: request buf allocation failed\n", __func__);
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+void gsdio_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ gsdio_free_req(ep, req);
+ }
+}
+
+int gsdio_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num, int size,
+ void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
+ ep, head, num, size, cb);
+
+ for (i = 0; i < num; i++) {
+ req = gsdio_alloc_req(ep, size, GFP_ATOMIC);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+void gsdio_start_rx(struct gsdio_port *port)
+{
+ struct list_head *pool;
+ struct usb_ep *out;
+ int ret;
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+ spin_lock_irq(&port->port_lock);
+
+ if (!port->port_usb) {
+ pr_debug("%s: usb is disconnected\n", __func__);
+ goto start_rx_end;
+ }
+
+ pool = &port->read_pool;
+ out = port->port_usb->out;
+
+ while (!list_empty(pool)) {
+ struct usb_request *req;
+
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del(&req->list);
+ req->length = SDIO_RX_BUF_SIZE;
+ port->rp_len--;
+
+ spin_unlock_irq(&port->port_lock);
+ ret = usb_ep_queue(out, req, GFP_ATOMIC);
+ spin_lock_irq(&port->port_lock);
+ if (ret) {
+ pr_err("%s: usb ep out queue failed"
+ "port:%p, port#%d\n",
+ __func__, port, port->port_num);
+ list_add_tail(&req->list, pool);
+ port->rp_len++;
+ break;
+ }
+
+ /* usb could have disconnected while we released spin lock */
+ if (!port->port_usb) {
+ pr_debug("%s: usb is disconnected\n", __func__);
+ goto start_rx_end;
+ }
+ }
+
+start_rx_end:
+ spin_unlock_irq(&port->port_lock);
+}
+
+int gsdio_write(struct gsdio_port *port, struct usb_request *req)
+{
+ unsigned avail;
+ char *packet = req->buf;
+ unsigned size = req->actual;
+ unsigned n;
+ int ret = 0;
+
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!req) {
+ pr_err("%s: usb request is null port#%d\n",
+ __func__, port->port_num);
+ return -ENODEV;
+ }
+
+ pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n",
+ __func__, port, port->port_num, req,
+ req->actual, port->n_read);
+
+ if (!port->sdio_open) {
+ pr_debug("%s: SDIO IO is not supported\n", __func__);
+ return -ENODEV;
+ }
+
+ avail = sdio_write_avail(port->sport_info->ch);
+
+ pr_debug("%s: sdio_write_avail:%d", __func__, avail);
+
+ if (!avail)
+ return -EBUSY;
+
+ if (!req->actual) {
+ pr_debug("%s: req->actual is already zero,update bytes read\n",
+ __func__);
+ port->n_read = 0;
+ return -ENODEV;
+ }
+
+ packet = req->buf;
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ if (size > avail)
+ size = avail;
+
+ spin_unlock_irq(&port->port_lock);
+ ret = sdio_write(port->sport_info->ch, packet, size);
+ spin_lock_irq(&port->port_lock);
+ if (ret) {
+ pr_err("%s: port#%d sdio write failed err:%d",
+ __func__, port->port_num, ret);
+ /* try again later */
+ return ret;
+ }
+
+ port->nbytes_tomodem += size;
+
+ if (size + n == req->actual)
+ port->n_read = 0;
+ else
+ port->n_read += size;
+
+ return ret;
+}
+
+void gsdio_rx_push(struct work_struct *w)
+{
+ struct gsdio_port *port = container_of(w, struct gsdio_port, push);
+ struct list_head *q = &port->read_queue;
+ struct usb_ep *out;
+ int ret;
+
+ pr_debug("%s: port:%p port#%d read_queue:%p", __func__,
+ port, port->port_num, q);
+
+ spin_lock_irq(&port->port_lock);
+
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconencted\n", __func__);
+ spin_unlock_irq(&port->port_lock);
+ return;
+ }
+
+ out = port->port_usb->out;
+
+ while (!list_empty(q)) {
+ struct usb_request *req;
+
+ req = list_first_entry(q, struct usb_request, list);
+
+ switch (req->status) {
+ case -ESHUTDOWN:
+ pr_debug("%s: req status shutdown portno#%d port:%p",
+ __func__, port->port_num, port);
+ goto rx_push_end;
+ default:
+ pr_warning("%s: port:%p port#%d"
+ " Unexpected Rx Status:%d\n", __func__,
+ port, port->port_num, req->status);
+ /* FALL THROUGH */
+ case 0:
+ /* normal completion */
+ break;
+ }
+
+ if (!port->sdio_open) {
+ pr_err("%s: sio channel is not open\n", __func__);
+ list_move(&req->list, &port->read_pool);
+ port->rp_len++;
+ port->rq_len--;
+ goto rx_push_end;
+ }
+
+
+ list_del(&req->list);
+ port->rq_len--;
+
+ ret = gsdio_write(port, req);
+ /* as gsdio_write drops spin_lock while writing data
+ * to sdio usb cable may have been disconnected
+ */
+ if (!port->port_usb) {
+ port->n_read = 0;
+ gsdio_free_req(out, req);
+ spin_unlock_irq(&port->port_lock);
+ return;
+ }
+
+ if (ret || port->n_read) {
+ list_add(&req->list, &port->read_queue);
+ port->rq_len++;
+ goto rx_push_end;
+ }
+
+ list_add(&req->list, &port->read_pool);
+ port->rp_len++;
+ }
+
+ if (port->sdio_open && !list_empty(q)) {
+ if (sdio_write_avail(port->sport_info->ch))
+ queue_work(gsdio_wq, &port->push);
+ }
+rx_push_end:
+ spin_unlock_irq(&port->port_lock);
+
+ /* start queuing out requests again to host */
+ gsdio_start_rx(port);
+}
+
+void gsdio_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gsdio_port *port = ep->driver_data;
+ unsigned long flags;
+
+ pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ list_add_tail(&req->list, &port->read_queue);
+ port->rq_len++;
+ queue_work(gsdio_wq, &port->push);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return;
+}
+
+void gsdio_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gsdio_port *port = ep->driver_data;
+ unsigned long flags;
+
+ pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ list_add(&req->list, &port->write_pool);
+ port->wp_len++;
+
+ switch (req->status) {
+ default:
+ pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
+ __func__, port, port->port_num,
+ ep->name, req->status);
+ /* FALL THROUGH */
+ case 0:
+ queue_work(gsdio_wq, &port->pull);
+ break;
+
+ case -ESHUTDOWN:
+ /* disconnect */
+ pr_debug("%s: %s shutdown\n", __func__, ep->name);
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return;
+}
+
+void gsdio_read_pending(struct gsdio_port *port)
+{
+ struct sdio_channel *ch;
+ char buf[1024];
+ int avail;
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ ch = port->sport_info->ch;
+
+ if (!ch)
+ return;
+
+ while ((avail = sdio_read_avail(ch))) {
+ if (avail > 1024)
+ avail = 1024;
+ sdio_read(ch, buf, avail);
+
+ pr_debug("%s: flushed out %d bytes\n", __func__, avail);
+ }
+}
+
+void gsdio_tx_pull(struct work_struct *w)
+{
+ struct gsdio_port *port = container_of(w, struct gsdio_port, pull);
+ struct list_head *pool = &port->write_pool;
+
+ pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
+ port, port->port_num, pool);
+
+ if (!port->port_usb) {
+ pr_err("%s: usb disconnected\n", __func__);
+
+ /* take out all the pending data from sdio */
+ gsdio_read_pending(port);
+
+ return;
+ }
+
+ spin_lock_irq(&port->port_lock);
+
+ while (!list_empty(pool)) {
+ int avail;
+ struct usb_ep *in = port->port_usb->in;
+ struct sdio_channel *ch = port->sport_info->ch;
+ struct usb_request *req;
+ unsigned len = SDIO_TX_BUF_SIZE;
+ int ret;
+
+
+ req = list_entry(pool->next, struct usb_request, list);
+
+ if (!port->sdio_open) {
+ pr_debug("%s: SDIO channel is not open\n", __func__);
+ goto tx_pull_end;
+ }
+
+ avail = sdio_read_avail(ch);
+ if (!avail) {
+ /* REVISIT: for ZLP */
+ pr_debug("%s: read_avail:%d port:%p port#%d\n",
+ __func__, avail, port, port->port_num);
+ goto tx_pull_end;
+ }
+
+ if (avail > len)
+ avail = len;
+
+ list_del(&req->list);
+ port->wp_len--;
+
+ spin_unlock_irq(&port->port_lock);
+ ret = sdio_read(ch, req->buf, avail);
+ spin_lock_irq(&port->port_lock);
+ if (ret) {
+ pr_err("%s: port:%p port#%d sdio read failed err:%d",
+ __func__, port, port->port_num, ret);
+
+ /* check if usb is still active */
+ if (!port->port_usb) {
+ gsdio_free_req(in, req);
+ } else {
+ list_add(&req->list, pool);
+ port->wp_len++;
+ }
+ goto tx_pull_end;
+ }
+
+ req->length = avail;
+
+ spin_unlock_irq(&port->port_lock);
+ ret = usb_ep_queue(in, req, GFP_KERNEL);
+ spin_lock_irq(&port->port_lock);
+ if (ret) {
+ pr_err("%s: usb ep out queue failed"
+ "port:%p, port#%d err:%d\n",
+ __func__, port, port->port_num, ret);
+
+ /* could be usb disconnected */
+ if (!port->port_usb) {
+ gsdio_free_req(in, req);
+ } else {
+ list_add(&req->list, pool);
+ port->wp_len++;
+ }
+ goto tx_pull_end;
+ }
+
+ port->nbytes_tolaptop += avail;
+ }
+tx_pull_end:
+ spin_unlock_irq(&port->port_lock);
+}
+
+int gsdio_start_io(struct gsdio_port *port)
+{
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s:\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENODEV;
+ }
+
+ /* start usb out queue */
+ ret = gsdio_alloc_requests(port->port_usb->out,
+ &port->read_pool,
+ SDIO_RX_QUEUE_SIZE, SDIO_RX_BUF_SIZE,
+ gsdio_read_complete);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: unable to allocate out reqs\n", __func__);
+ return ret;
+ }
+ port->rp_len = SDIO_RX_QUEUE_SIZE;
+
+ ret = gsdio_alloc_requests(port->port_usb->in,
+ &port->write_pool,
+ SDIO_TX_QUEUE_SIZE, SDIO_TX_BUF_SIZE,
+ gsdio_write_complete);
+ if (ret) {
+ gsdio_free_requests(port->port_usb->out, &port->read_pool);
+ port->rp_len = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: unable to allocate in reqs\n", __func__);
+ return ret;
+ }
+ port->wp_len = SDIO_TX_QUEUE_SIZE;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ gsdio_start_rx(port);
+ queue_work(gsdio_wq, &port->pull);
+
+ return 0;
+}
+
+void gsdio_port_free(unsigned portno)
+{
+ struct gsdio_port *port = sdio_ports[portno].port;
+ struct platform_driver *pdriver = &sdio_ports[portno].gsdio_ch;
+
+ if (!port) {
+ pr_err("%s: invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ platform_driver_unregister(pdriver);
+
+ kfree(port);
+}
+
+void gsdio_ctrl_wq(struct work_struct *w)
+{
+ struct gsdio_port *port;
+
+ port = container_of(w, struct gsdio_port, notify_modem);
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ if (!port->sdio_open || port->ctrl_ch_err)
+ return;
+
+ sdio_cmux_tiocmset(port->sport_info->ctrl_ch_id,
+ port->cbits_to_modem, ~(port->cbits_to_modem));
+}
+
+void gsdio_ctrl_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits)
+{
+ struct gsdio_port *port;
+ int temp;
+
+ if (portno >= n_sdio_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ if (!gser) {
+ pr_err("%s: gser is null\n", __func__);
+ return;
+ }
+
+ port = sdio_ports[portno].port;
+
+ temp = ctrl_bits & SDIO_ACM_CTRL_DTR ? TIOCM_DTR : 0;
+
+ if (port->cbits_to_modem == temp)
+ return;
+
+ port->cbits_to_modem = temp;
+
+ /* TIOCM_DTR - 0x002 - bit(1) */
+ pr_debug("%s: port:%p port#%d ctrl_bits:%08x\n", __func__,
+ port, port->port_num, ctrl_bits);
+
+ if (!port->sdio_open) {
+ pr_err("%s: port:%p port#%d sdio not connected\n",
+ __func__, port, port->port_num);
+ return;
+ }
+
+ /* whenever DTR is high let laptop know that modem status */
+ if (port->cbits_to_modem && gser->send_modem_ctrl_bits)
+ gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+
+ queue_work(gsdio_wq, &port->notify_modem);
+}
+
+void gsdio_ctrl_modem_status(int ctrl_bits, void *_dev)
+{
+ struct gsdio_port *port = _dev;
+
+ /* TIOCM_CD - 0x040 - bit(6)
+ * TIOCM_RI - 0x080 - bit(7)
+ * TIOCM_DSR- 0x100 - bit(8)
+ */
+ pr_debug("%s: port:%p port#%d event:%08x\n", __func__,
+ port, port->port_num, ctrl_bits);
+
+ port->cbits_to_laptop = 0;
+ ctrl_bits &= TIOCM_RI | TIOCM_CD | TIOCM_DSR;
+ if (ctrl_bits & TIOCM_RI)
+ port->cbits_to_laptop |= SDIO_ACM_CTRL_RI;
+ if (ctrl_bits & TIOCM_CD)
+ port->cbits_to_laptop |= SDIO_ACM_CTRL_DCD;
+ if (ctrl_bits & TIOCM_DSR)
+ port->cbits_to_laptop |= SDIO_ACM_CTRL_DSR;
+
+ if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
+ port->port_usb->send_modem_ctrl_bits(port->port_usb,
+ port->cbits_to_laptop);
+}
+
+void gsdio_ch_notify(void *_dev, unsigned event)
+{
+ struct gsdio_port *port = _dev;
+
+ pr_debug("%s: port:%p port#%d event:%s\n", __func__,
+ port, port->port_num,
+ event == 1 ? "READ AVAIL" : "WRITE_AVAIL");
+
+ if (event == SDIO_EVENT_DATA_WRITE_AVAIL)
+ queue_work(gsdio_wq, &port->push);
+ if (event == SDIO_EVENT_DATA_READ_AVAIL)
+ queue_work(gsdio_wq, &port->pull);
+}
+
+static void gsdio_open_work(struct work_struct *w)
+{
+ struct gsdio_port *port =
+ container_of(w, struct gsdio_port, sdio_open_work.work);
+ struct sdio_port_info *pi = port->sport_info;
+ struct gserial *gser;
+ int ret;
+ int ctrl_bits;
+ int startio;
+
+ ret = sdio_open(pi->data_ch_name, &pi->ch, port, gsdio_ch_notify);
+ if (ret) {
+ pr_err("%s: port:%p port#%d unable to open sdio ch:%s\n",
+ __func__, port, port->port_num,
+ pi->data_ch_name);
+ return;
+ }
+
+ ret = sdio_cmux_open(pi->ctrl_ch_id, 0, 0,
+ gsdio_ctrl_modem_status, port);
+ if (ret) {
+ pr_err("%s: port:%p port#%d unable to open ctrl ch:%d\n",
+ __func__, port, port->port_num, pi->ctrl_ch_id);
+ port->ctrl_ch_err = 1;
+ }
+
+ /* check for latest status update from modem */
+ if (!port->ctrl_ch_err) {
+ ctrl_bits = sdio_cmux_tiocmget(pi->ctrl_ch_id);
+ gsdio_ctrl_modem_status(ctrl_bits, port);
+ }
+
+ pr_debug("%s: SDIO data:%s ctrl:%d are open\n", __func__,
+ pi->data_ch_name,
+ pi->ctrl_ch_id);
+
+ port->sdio_open = 1;
+
+ /* start tx if usb is open already */
+ spin_lock_irq(&port->port_lock);
+ startio = port->port_usb ? 1 : 0;
+ gser = port->port_usb;
+ spin_unlock_irq(&port->port_lock);
+
+ if (startio) {
+ pr_debug("%s: USB is already open, start io\n", __func__);
+ gsdio_start_io(port);
+ if (gser->send_modem_ctrl_bits)
+ gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+ }
+}
+
+#define SDIO_CH_NAME_MAX_LEN 9
+#define SDIO_OPEN_DELAY msecs_to_jiffies(10000)
+static int gsdio_ch_probe(struct platform_device *dev)
+{
+ struct gsdio_port *port;
+ struct sdio_port_info *pi;
+ int i;
+
+ pr_debug("%s: name:%s\n", __func__, dev->name);
+
+ for (i = 0; i < n_sdio_ports; i++) {
+ port = sdio_ports[i].port;
+ pi = port->sport_info;
+
+ pr_debug("%s: sdio_ch_name:%s dev_name:%s\n", __func__,
+ pi->data_ch_name, dev->name);
+
+ /* unfortunately cmux channle might not be ready even if
+ * sdio channel is ready. as we dont have good notification
+ * mechanism schedule a delayed work
+ */
+ if (!strncmp(pi->data_ch_name, dev->name,
+ SDIO_CH_NAME_MAX_LEN)) {
+ queue_delayed_work(gsdio_wq,
+ &port->sdio_open_work, SDIO_OPEN_DELAY);
+ return 0;
+ }
+ }
+
+ pr_info("%s: name:%s is not found\n", __func__, dev->name);
+
+ return -ENODEV;
+}
+
+int gsdio_port_alloc(unsigned portno,
+ struct usb_cdc_line_coding *coding,
+ struct sdio_port_info *pi)
+{
+ struct gsdio_port *port;
+ struct platform_driver *pdriver;
+
+ port = kzalloc(sizeof(struct gsdio_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("%s: port allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ port->port_num = portno;
+ spin_lock_init(&port->port_lock);
+ port->line_coding = *coding;
+
+ /* READ: read from usb and write into sdio */
+ INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queue);
+ INIT_WORK(&port->push, gsdio_rx_push);
+
+ INIT_LIST_HEAD(&port->write_pool);
+ INIT_WORK(&port->pull, gsdio_tx_pull);
+
+ INIT_WORK(&port->notify_modem, gsdio_ctrl_wq);
+
+ INIT_DELAYED_WORK(&port->sdio_open_work, gsdio_open_work);
+
+ sdio_ports[portno].port = port;
+
+ port->sport_info = pi;
+ pdriver = &sdio_ports[portno].gsdio_ch;
+
+ pdriver->probe = gsdio_ch_probe;
+ pdriver->driver.name = pi->data_ch_name;
+ pdriver->driver.owner = THIS_MODULE;
+
+ pr_debug("%s: port:%p port#%d sdio_name: %s\n", __func__,
+ port, port->port_num, pi->data_ch_name);
+
+ platform_driver_register(pdriver);
+
+ pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+ return 0;
+}
+
+int gsdio_connect(struct gserial *gser, u8 portno)
+{
+ struct gsdio_port *port;
+ int ret = 0;
+ unsigned long flags;
+
+ if (portno >= n_sdio_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, portno);
+ return -EINVAL;
+ }
+
+ if (!gser) {
+ pr_err("%s: gser is null\n", __func__);
+ return -EINVAL;
+ }
+
+ port = sdio_ports[portno].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gser;
+ gser->notify_modem = gsdio_ctrl_notify_modem;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = usb_ep_enable(gser->in, gser->in_desc);
+ if (ret) {
+ pr_err("%s: failed to enable in ep w/ err:%d\n",
+ __func__, ret);
+ port->port_usb = 0;
+ return ret;
+ }
+ gser->in->driver_data = port;
+
+ ret = usb_ep_enable(gser->out, gser->out_desc);
+ if (ret) {
+ pr_err("%s: failed to enable in ep w/ err:%d\n",
+ __func__, ret);
+ usb_ep_disable(gser->in);
+ port->port_usb = 0;
+ gser->in->driver_data = 0;
+ return ret;
+ }
+ gser->out->driver_data = port;
+
+ if (port->sdio_open) {
+ pr_debug("%s: sdio is already open, start io\n", __func__);
+ gsdio_start_io(port);
+ if (gser->send_modem_ctrl_bits)
+ gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+ }
+
+ return 0;
+}
+
+void gsdio_disconnect(struct gserial *gser, u8 portno)
+{
+ unsigned long flags;
+ struct gsdio_port *port;
+
+ if (portno >= n_sdio_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ if (!gser) {
+ pr_err("%s: gser is null\n", __func__);
+ return;
+ }
+
+ port = sdio_ports[portno].port;
+
+ /* send dtr zero to modem to notify disconnect */
+ port->cbits_to_modem = 0;
+ queue_work(gsdio_wq, &port->notify_modem);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = 0;
+ port->nbytes_tomodem = 0;
+ port->nbytes_tolaptop = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(gser->out);
+
+ usb_ep_disable(gser->in);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ gsdio_free_requests(gser->out, &port->read_pool);
+ gsdio_free_requests(gser->out, &port->read_queue);
+ gsdio_free_requests(gser->in, &port->write_pool);
+
+ port->rp_len = 0;
+ port->rq_len = 0;
+ port->wp_len = 0;
+ port->n_read = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_sdio_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gsdio_port *port;
+ char *buf = debug_buffer;
+ unsigned long flags;
+ int i = 0;
+ int temp = 0;
+
+ while (i < n_sdio_ports) {
+ port = sdio_ports[i].port;
+ spin_lock_irqsave(&port->port_lock, flags);
+ temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+ "###PORT:%d port:%p###\n"
+ "nbytes_tolaptop: %lu\n"
+ "nbytes_tomodem: %lu\n"
+ "cbits_to_modem: %u\n"
+ "cbits_to_laptop: %u\n"
+ "read_pool_len: %lu\n"
+ "read_queue_len: %lu\n"
+ "write_pool_len: %lu\n"
+ "n_read: %u\n",
+ i, port,
+ port->nbytes_tolaptop, port->nbytes_tomodem,
+ port->cbits_to_modem, port->cbits_to_laptop,
+ port->rp_len, port->rq_len, port->wp_len,
+ port->n_read);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ i++;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_sdio_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gsdio_port *port;
+ unsigned long flags;
+ int i = 0;
+
+ while (i < n_sdio_ports) {
+ port = sdio_ports[i].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_tolaptop = 0;
+ port->nbytes_tomodem = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ i++;
+ }
+
+ return count;
+}
+
+static int debug_sdio_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations debug_gsdio_ops = {
+ .open = debug_sdio_open,
+ .read = debug_sdio_read_stats,
+ .write = debug_sdio_reset_stats,
+};
+
+static void gsdio_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("usb_gsdio", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("status", 0444, dent, 0, &debug_gsdio_ops);
+}
+#else
+static void gsdio_debugfs_init(void)
+{
+ return;
+}
+#endif
+
+/* connect, disconnect, alloc_requests, free_requests */
+int gsdio_setup(struct usb_gadget *g, unsigned count)
+{
+ struct usb_cdc_line_coding coding;
+ int i;
+ int ret = 0;
+ struct sdio_port_info *port_info;
+
+ pr_debug("%s: gadget:(%p) count:%d\n", __func__, g, count);
+
+ if (count == 0 || count > SDIO_N_PORTS) {
+ pr_err("%s: invalid number of ports count:%d max_ports:%d\n",
+ __func__, count, SDIO_N_PORTS);
+ return -EINVAL;
+ }
+
+ coding.dwDTERate = cpu_to_le32(9600);
+ coding.bCharFormat = 8;
+ coding.bParityType = USB_CDC_NO_PARITY;
+ coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+ gsdio_wq = create_singlethread_workqueue("k_gserial");
+ if (!gsdio_wq) {
+ pr_err("%s: unable to create workqueue gsdio_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ mutex_init(&sdio_ports[i].lock);
+ ret = gsdio_port_alloc(i, &coding, sport_info + i);
+ if (ret) {
+ pr_err("%s: sdio logical port allocation failed\n",
+ __func__);
+ goto free_sdio_ports;
+ }
+ n_sdio_ports++;
+ port_info++;
+
+#ifdef DEBUG
+ /* REVISIT: create one file per port
+ * or do not create any file
+ */
+ if (i == 0) {
+ ret = device_create_file(&g->dev, &dev_attr_input);
+ if (ret)
+ pr_err("%s: unable to create device file\n",
+ __func__);
+ }
+#endif
+
+ }
+
+ gsdio_debugfs_init();
+
+ return 0;
+
+free_sdio_ports:
+ for (i = 0; i < n_sdio_ports; i++)
+ gsdio_port_free(i);
+ destroy_workqueue(gsdio_wq);
+
+ return ret;
+}
+
+/* TODO: Add gserial_cleanup */
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 3fdcc9a..7bd9f33 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -25,6 +25,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include "u_serial.h"
@@ -77,9 +78,14 @@
* next layer of buffering. For TX that's a circular buffer; for RX
* consider it a NOP. A third layer is provided by the TTY code.
*/
-#define QUEUE_SIZE 16
+#define TX_QUEUE_SIZE 8
+#define TX_BUF_SIZE 4096
#define WRITE_BUF_SIZE 8192 /* TX only */
+#define RX_QUEUE_SIZE 8
+#define RX_BUF_SIZE 4096
+
+
/* circular buffer */
struct gs_buf {
unsigned buf_size;
@@ -109,7 +115,7 @@
int read_allocated;
struct list_head read_queue;
unsigned n_read;
- struct tasklet_struct push;
+ struct work_struct push;
struct list_head write_pool;
int write_started;
@@ -119,6 +125,10 @@
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
+ unsigned long nbytes_from_host;
+ unsigned long nbytes_to_tty;
+ unsigned long nbytes_from_tty;
+ unsigned long nbytes_to_host;
};
/* increase N_PORTS if you need more */
@@ -129,6 +139,8 @@
} ports[N_PORTS];
static unsigned n_ports;
+static struct workqueue_struct *gserial_wq;
+
#define GS_CLOSE_TIMEOUT 15 /* seconds */
@@ -361,18 +373,37 @@
struct list_head *pool = &port->write_pool;
struct usb_ep *in = port->port_usb->in;
int status = 0;
+ static long prev_len;
bool do_tty_wake = false;
while (!list_empty(pool)) {
struct usb_request *req;
int len;
- if (port->write_started >= QUEUE_SIZE)
+ if (port->write_started >= TX_QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
- len = gs_send_packet(port, req->buf, in->maxpacket);
+ len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
if (len == 0) {
+ /* Queue zero length packet */
+ if (prev_len && (prev_len % in->maxpacket == 0)) {
+ req->length = 0;
+ list_del(&req->list);
+ spin_unlock(&port->port_lock);
+ status = usb_ep_queue(in, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock);
+ if (!port->port_usb) {
+ gs_free_req(in, req);
+ break;
+ }
+ if (status) {
+ printk(KERN_ERR "%s: %s err %d\n",
+ __func__, "queue", status);
+ list_add(&req->list, pool);
+ }
+ prev_len = 0;
+ }
wake_up_interruptible(&port->drain_wait);
break;
}
@@ -396,19 +427,25 @@
spin_unlock(&port->port_lock);
status = usb_ep_queue(in, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
-
+ /*
+ * If port_usb is NULL, gserial disconnect is called
+ * while the spinlock is dropped and all requests are
+ * freed. Free the current request here.
+ */
+ if (!port->port_usb) {
+ do_tty_wake = false;
+ gs_free_req(in, req);
+ break;
+ }
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", in->name, status);
list_add(&req->list, pool);
break;
}
+ prev_len = req->length;
+ port->nbytes_from_tty += req->length;
- port->write_started++;
-
- /* abort immediately after disconnect */
- if (!port->port_usb)
- break;
}
if (do_tty_wake && port->port_tty)
@@ -427,6 +464,7 @@
{
struct list_head *pool = &port->read_pool;
struct usb_ep *out = port->port_usb->out;
+ unsigned started = 0;
while (!list_empty(pool)) {
struct usb_request *req;
@@ -438,12 +476,12 @@
if (!tty)
break;
- if (port->read_started >= QUEUE_SIZE)
+ if (port->read_started >= RX_QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list);
- req->length = out->maxpacket;
+ req->length = RX_BUF_SIZE;
/* drop lock while we call out; the controller driver
* may need to call us back (e.g. for disconnect)
@@ -451,7 +489,16 @@
spin_unlock(&port->port_lock);
status = usb_ep_queue(out, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
-
+ /*
+ * If port_usb is NULL, gserial disconnect is called
+ * while the spinlock is dropped and all requests are
+ * freed. Free the current request here.
+ */
+ if (!port->port_usb) {
+ started = 0;
+ gs_free_req(out, req);
+ break;
+ }
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", out->name, status);
@@ -460,9 +507,6 @@
}
port->read_started++;
- /* abort immediately after disconnect */
- if (!port->port_usb)
- break;
}
return port->read_started;
}
@@ -477,9 +521,9 @@
* So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
* can be buffered before the TTY layer's buffers (currently 64 KB).
*/
-static void gs_rx_push(unsigned long _port)
+static void gs_rx_push(struct work_struct *w)
{
- struct gs_port *port = (void *)_port;
+ struct gs_port *port = container_of(w, struct gs_port, push);
struct tty_struct *tty;
struct list_head *queue = &port->read_queue;
bool disconnect = false;
@@ -532,6 +576,7 @@
}
count = tty_insert_flip_string(tty, packet, size);
+ port->nbytes_to_tty += count;
if (count)
do_push = true;
if (count != size) {
@@ -549,11 +594,17 @@
port->read_started--;
}
- /* Push from tty to ldisc; without low_latency set this is handled by
- * a workqueue, so we won't get callbacks and can hold port_lock
+ /* Push from tty to ldisc; this is immediate with low_latency, and
+ * may trigger callbacks to this driver ... so drop the spinlock.
*/
if (tty && do_push) {
+ spin_unlock_irq(&port->port_lock);
tty_flip_buffer_push(tty);
+ wake_up_interruptible(&tty->read_wait);
+ spin_lock_irq(&port->port_lock);
+
+ /* tty may have been closed */
+ tty = port->port_tty;
}
@@ -562,13 +613,13 @@
* this time around, there may be trouble unless there's an
* implicit tty_unthrottle() call on its way...
*
- * REVISIT we should probably add a timer to keep the tasklet
+ * REVISIT we should probably add a timer to keep the work queue
* from starving ... but it's not clear that case ever happens.
*/
if (!list_empty(queue) && tty) {
if (!test_bit(TTY_THROTTLED, &tty->flags)) {
if (do_push)
- tasklet_schedule(&port->push);
+ queue_work(gserial_wq, &port->push);
else
pr_warning(PREFIX "%d: RX not scheduled?\n",
port->port_num);
@@ -585,19 +636,23 @@
static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
+ unsigned long flags;
/* Queue all received data until the tty layer is ready for it. */
- spin_lock(&port->port_lock);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_from_host += req->actual;
list_add_tail(&req->list, &port->read_queue);
- tasklet_schedule(&port->push);
- spin_unlock(&port->port_lock);
+ queue_work(gserial_wq, &port->push);
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
+ unsigned long flags;
- spin_lock(&port->port_lock);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host += req->actual;
list_add(&req->list, &port->write_pool);
port->write_started--;
@@ -609,7 +664,8 @@
/* FALL THROUGH */
case 0:
/* normal completion */
- gs_start_tx(port);
+ if (port->port_usb)
+ gs_start_tx(port);
break;
case -ESHUTDOWN:
@@ -618,7 +674,7 @@
break;
}
- spin_unlock(&port->port_lock);
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
@@ -636,19 +692,18 @@
}
static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
- void (*fn)(struct usb_ep *, struct usb_request *),
+ int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
int *allocated)
{
int i;
struct usb_request *req;
- int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
* do quite that many this time, don't fail ... we just won't
* be as speedy as we might otherwise be.
*/
- for (i = 0; i < n; i++) {
- req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ for (i = 0; i < num; i++) {
+ req = gs_alloc_req(ep, size, GFP_ATOMIC);
if (!req)
return list_empty(head) ? -ENOMEM : 0;
req->complete = fn;
@@ -681,13 +736,13 @@
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
- status = gs_alloc_requests(ep, head, gs_read_complete,
- &port->read_allocated);
+ status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
+ gs_read_complete, &port->read_allocated);
if (status)
return status;
status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
- gs_write_complete, &port->write_allocated);
+ TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
if (status) {
gs_free_requests(ep, head, &port->read_allocated);
return status;
@@ -697,6 +752,8 @@
port->n_read = 0;
started = gs_start_rx(port);
+ if (!port->port_usb)
+ return -EIO;
/* unblock any pending writes into our circular buffer */
if (started) {
tty_wakeup(port->port_tty);
@@ -801,6 +858,13 @@
port->open_count = 1;
port->openclose = false;
+ /* low_latency means ldiscs work is carried in the same context
+ * of tty_flip_buffer_push. The same can be called from IRQ with
+ * low_latency = 0. But better to use a dedicated worker thread
+ * to push the data.
+ */
+ tty->low_latency = 1;
+
/* if connected, start the I/O stream */
if (port->port_usb) {
struct gserial *gser = port->port_usb;
@@ -874,7 +938,7 @@
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
- * let the push tasklet fire again until we're re-opened.
+ * let the push work queue fire again until we're re-opened.
*/
if (gser == NULL)
gs_buf_free(&port->port_write_buf);
@@ -890,6 +954,22 @@
port->port_num, tty, file);
wake_up_interruptible(&port->close_wait);
+
+ /*
+ * Freeing the previously queued requests as they are
+ * allocated again as a part of gs_open()
+ */
+ if (port->port_usb) {
+ spin_unlock_irq(&port->port_lock);
+ usb_ep_fifo_flush(gser->out);
+ usb_ep_fifo_flush(gser->in);
+ spin_lock_irq(&port->port_lock);
+ gs_free_requests(gser->out, &port->read_queue, NULL);
+ gs_free_requests(gser->out, &port->read_pool, NULL);
+ gs_free_requests(gser->in, &port->write_pool, NULL);
+ }
+ port->read_allocated = port->read_started =
+ port->write_allocated = port->write_started = 0;
exit:
spin_unlock_irq(&port->port_lock);
}
@@ -988,7 +1068,7 @@
* rts/cts, or other handshaking with the host, but if the
* read queue backs up enough we'll be NAKing OUT packets.
*/
- tasklet_schedule(&port->push);
+ queue_work(gserial_wq, &port->push);
pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
}
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1012,6 +1092,77 @@
return status;
}
+static int gs_tiocmget(struct tty_struct *tty)
+{
+ struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
+ unsigned int result = 0;
+
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (!gser) {
+ result = -ENODEV;
+ goto fail;
+ }
+
+ if (gser->get_dtr)
+ result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+
+ if (gser->get_rts)
+ result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
+
+ if (gser->serial_state & TIOCM_CD)
+ result |= TIOCM_CD;
+
+ if (gser->serial_state & TIOCM_RI)
+ result |= TIOCM_RI;
+fail:
+ spin_unlock_irq(&port->port_lock);
+ return result;
+}
+
+static int gs_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
+ int status = 0;
+
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (!gser) {
+ status = -ENODEV;
+ goto fail;
+ }
+
+ if (set & TIOCM_RI) {
+ if (gser->send_ring_indicator) {
+ gser->serial_state |= TIOCM_RI;
+ status = gser->send_ring_indicator(gser, 1);
+ }
+ }
+ if (clear & TIOCM_RI) {
+ if (gser->send_ring_indicator) {
+ gser->serial_state &= ~TIOCM_RI;
+ status = gser->send_ring_indicator(gser, 0);
+ }
+ }
+ if (set & TIOCM_CD) {
+ if (gser->send_carrier_detect) {
+ gser->serial_state |= TIOCM_CD;
+ status = gser->send_carrier_detect(gser, 1);
+ }
+ }
+ if (clear & TIOCM_CD) {
+ if (gser->send_carrier_detect) {
+ gser->serial_state &= ~TIOCM_CD;
+ status = gser->send_carrier_detect(gser, 0);
+ }
+ }
+fail:
+ spin_unlock_irq(&port->port_lock);
+ return status;
+}
static const struct tty_operations gs_tty_ops = {
.open = gs_open,
.close = gs_close,
@@ -1022,6 +1173,8 @@
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
.break_ctl = gs_break_ctl,
+ .tiocmget = gs_tiocmget,
+ .tiocmset = gs_tiocmset,
};
/*-------------------------------------------------------------------------*/
@@ -1041,7 +1194,7 @@
init_waitqueue_head(&port->close_wait);
init_waitqueue_head(&port->drain_wait);
- tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+ INIT_WORK(&port->push, gs_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
@@ -1055,6 +1208,116 @@
return 0;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define BUF_SIZE 512
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gs_port *ui_dev = file->private_data;
+ struct tty_struct *tty;
+ struct gserial *gser;
+ char *buf;
+ unsigned long flags;
+ int i = 0;
+ int ret;
+ int result = 0;
+
+ tty = ui_dev->port_tty;
+ gser = ui_dev->port_usb;
+
+ buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ui_dev->port_lock, flags);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
+
+ i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
+ (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
+
+ i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
+ (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
+
+ if (tty)
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "tty_flags: %lu\n", tty->flags);
+
+ if (gser->get_dtr) {
+ result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "DTR_status: %d\n", result);
+ }
+
+ spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gs_port *ui_dev = file->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ui_dev->port_lock, flags);
+ ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
+ ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
+ spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+ return count;
+}
+
+static int serial_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+const struct file_operations debug_rst_ops = {
+ .open = serial_debug_open,
+ .write = debug_write_reset,
+};
+
+const struct file_operations debug_adb_ops = {
+ .open = serial_debug_open,
+ .read = debug_read_status,
+};
+
+static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
+{
+ struct dentry *dent;
+ char buf[48];
+
+ snprintf(buf, 48, "usb_serial%d", port_num);
+ dent = debugfs_create_dir(buf, 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
+ debugfs_create_file("reset", 0222, dent, ui_dev, &debug_rst_ops);
+}
+#else
+static void usb_debugfs_init(struct gs_port *ui_dev) {}
+#endif
+
/**
* gserial_setup - initialize TTY driver for one or more ports
* @g: gadget to associate with these ports
@@ -1094,7 +1357,8 @@
gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_RESET_TERMIOS;
gs_tty_driver->init_termios = tty_std_termios;
/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
@@ -1113,6 +1377,12 @@
tty_set_operations(gs_tty_driver, &gs_tty_ops);
+ gserial_wq = create_singlethread_workqueue("k_gserial");
+ if (!gserial_wq) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* make devices be openable */
for (i = 0; i < count; i++) {
mutex_init(&ports[i].lock);
@@ -1127,6 +1397,7 @@
/* export the driver ... */
status = tty_register_driver(gs_tty_driver);
if (status) {
+ put_tty_driver(gs_tty_driver);
pr_err("%s: cannot register, err %d\n",
__func__, status);
goto fail;
@@ -1142,6 +1413,9 @@
__func__, i, PTR_ERR(tty_dev));
}
+ for (i = 0; i < count; i++)
+ usb_debugfs_init(ports[i].port, i);
+
pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
count, (count == 1) ? "" : "s");
@@ -1149,6 +1423,7 @@
fail:
while (count--)
kfree(ports[count].port);
+ destroy_workqueue(gserial_wq);
put_tty_driver(gs_tty_driver);
gs_tty_driver = NULL;
return status;
@@ -1195,7 +1470,7 @@
ports[i].port = NULL;
mutex_unlock(&ports[i].lock);
- tasklet_kill(&port->push);
+ cancel_work_sync(&port->push);
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
@@ -1206,6 +1481,7 @@
}
n_ports = 0;
+ destroy_workqueue(gserial_wq);
tty_unregister_driver(gs_tty_driver);
put_tty_driver(gs_tty_driver);
gs_tty_driver = NULL;
@@ -1344,5 +1620,8 @@
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
+ port->nbytes_from_host = port->nbytes_to_tty =
+ port->nbytes_from_tty = port->nbytes_to_host = 0;
+
spin_unlock_irqrestore(&port->port_lock, flags);
}
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 300f0ed..fea53d8 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -40,11 +40,22 @@
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+ u16 serial_state;
+
+ /* control signal callbacks*/
+ unsigned int (*get_dtr)(struct gserial *p);
+ unsigned int (*get_rts)(struct gserial *p);
/* notification callbacks */
void (*connect)(struct gserial *p);
void (*disconnect)(struct gserial *p);
int (*send_break)(struct gserial *p, int duration);
+ unsigned int (*send_carrier_detect)(struct gserial *p, unsigned int);
+ unsigned int (*send_ring_indicator)(struct gserial *p, unsigned int);
+ int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits);
+
+ /* notification changes to modem */
+ void (*notify_modem)(struct gserial *gser, u8 portno, int ctrl_bits);
};
/* utilities to allocate/free request and buffer */
@@ -59,6 +70,15 @@
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
+/* sdio related functions */
+int gsdio_setup(struct usb_gadget *g, unsigned n_ports);
+int gsdio_connect(struct gserial *, u8 port_num);
+void gsdio_disconnect(struct gserial *, u8 portno);
+
+int gsmd_setup(struct usb_gadget *g, unsigned n_ports);
+int gsmd_connect(struct gserial *, u8 port_num);
+void gsmd_disconnect(struct gserial *, u8 portno);
+
/* functions are bound to configurations by a config or gadget driver */
int acm_bind_config(struct usb_configuration *c, u8 port_num);
int gser_bind_config(struct usb_configuration *c, u8 port_num);
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
new file mode 100644
index 0000000..0e8f247
--- /dev/null
+++ b/drivers/usb/gadget/u_smd.c
@@ -0,0 +1,887 @@
+/*
+ * u_smd.c - utilities for USB gadget serial over smd
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/debugfs.h>
+
+#include "u_serial.h"
+
+#define SMD_RX_QUEUE_SIZE 8
+#define SMD_RX_BUF_SIZE 2048
+
+#define SMD_TX_QUEUE_SIZE 8
+#define SMD_TX_BUF_SIZE 2048
+
+static struct workqueue_struct *gsmd_wq;
+
+#define SMD_N_PORTS 2
+#define CH_OPENED 0
+struct smd_port_info {
+ struct smd_channel *ch;
+ char *name;
+ unsigned long flags;
+ wait_queue_head_t wait;
+};
+
+struct smd_port_info smd_pi[SMD_N_PORTS] = {
+ {
+ .name = "DS",
+ },
+ {
+ .name = "UNUSED",
+ },
+};
+
+struct gsmd_port {
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ unsigned n_read;
+ struct list_head read_pool;
+ struct list_head read_queue;
+ struct work_struct push;
+
+ struct list_head write_pool;
+ struct work_struct pull;
+
+ struct gserial *port_usb;
+
+ struct smd_port_info *pi;
+ struct work_struct connect_work;
+
+ /* At present, smd does not notify
+ * control bit change info from modem
+ */
+ struct work_struct update_modem_ctrl_sig;
+
+#define SMD_ACM_CTRL_DTR 0x01
+#define SMD_ACM_CTRL_RTS 0x02
+ unsigned cbits_to_modem;
+
+#define SMD_ACM_CTRL_DCD 0x01
+#define SMD_ACM_CTRL_DSR 0x02
+#define SMD_ACM_CTRL_BRK 0x04
+#define SMD_ACM_CTRL_RI 0x08
+ unsigned cbits_to_laptop;
+
+ /* pkt counters */
+ unsigned long nbytes_tomodem;
+ unsigned long nbytes_tolaptop;
+};
+
+static struct smd_portmaster {
+ struct mutex lock;
+ struct gsmd_port *port;
+} smd_ports[SMD_N_PORTS];
+static unsigned n_smd_ports;
+
+static void gsmd_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void gsmd_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ gsmd_free_req(ep, req);
+ }
+}
+
+static struct usb_request *
+gsmd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("%s: usb alloc request failed\n", __func__);
+ return 0;
+ }
+
+ req->length = len;
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ pr_err("%s: request buf allocation failed\n", __func__);
+ usb_ep_free_request(ep, req);
+ return 0;
+ }
+
+ return req;
+}
+
+static int gsmd_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num, int size,
+ void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
+ ep, head, num, size, cb);
+
+ for (i = 0; i < num; i++) {
+ req = gsmd_alloc_req(ep, size, GFP_ATOMIC);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+static void gsmd_start_rx(struct gsmd_port *port)
+{
+ struct list_head *pool;
+ struct usb_ep *out;
+ int ret;
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ spin_lock_irq(&port->port_lock);
+
+ if (!port->port_usb) {
+ pr_debug("%s: USB disconnected\n", __func__);
+ goto start_rx_end;
+ }
+
+ pool = &port->read_pool;
+ out = port->port_usb->out;
+
+ while (!list_empty(pool)) {
+ struct usb_request *req;
+
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del(&req->list);
+ req->length = SMD_RX_BUF_SIZE;
+
+ spin_unlock_irq(&port->port_lock);
+ ret = usb_ep_queue(out, req, GFP_KERNEL);
+ spin_lock_irq(&port->port_lock);
+ if (ret) {
+ pr_err("%s: usb ep out queue failed"
+ "port:%p, port#%d\n",
+ __func__, port, port->port_num);
+ list_add_tail(&req->list, pool);
+ break;
+ }
+ }
+start_rx_end:
+ spin_unlock_irq(&port->port_lock);
+}
+
+static void gsmd_rx_push(struct work_struct *w)
+{
+ struct gsmd_port *port = container_of(w, struct gsmd_port, push);
+ struct list_head *q;
+
+ pr_debug("%s: port:%p port#%d", __func__, port, port->port_num);
+
+ spin_lock_irq(&port->port_lock);
+
+ q = &port->read_queue;
+ while (!list_empty(q)) {
+ struct usb_request *req;
+ int avail;
+ struct smd_port_info *pi = port->pi;
+
+ req = list_first_entry(q, struct usb_request, list);
+
+ switch (req->status) {
+ case -ESHUTDOWN:
+ pr_debug("%s: req status shutdown portno#%d port:%p\n",
+ __func__, port->port_num, port);
+ goto rx_push_end;
+ default:
+ pr_warning("%s: port:%p port#%d"
+ " Unexpected Rx Status:%d\n", __func__,
+ port, port->port_num, req->status);
+ case 0:
+ /* normal completion */
+ break;
+ }
+
+ avail = smd_write_avail(pi->ch);
+ if (!avail)
+ goto rx_push_end;
+
+ if (req->actual) {
+ char *packet = req->buf;
+ unsigned size = req->actual;
+ unsigned n;
+ unsigned count;
+
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = smd_write(pi->ch, packet, size);
+ if (count < 0) {
+ pr_err("%s: smd write failed err:%d\n",
+ __func__, count);
+ goto rx_push_end;
+ }
+
+ if (count != size) {
+ port->n_read += count;
+ goto rx_push_end;
+ }
+
+ port->nbytes_tomodem += count;
+ }
+
+ port->n_read = 0;
+ list_move(&req->list, &port->read_pool);
+ }
+
+rx_push_end:
+ spin_unlock_irq(&port->port_lock);
+
+ gsmd_start_rx(port);
+}
+
+static void gsmd_read_pending(struct gsmd_port *port)
+{
+ int avail;
+
+ if (!port || !port->pi->ch)
+ return;
+
+ /* passing null buffer discards the data */
+ while ((avail = smd_read_avail(port->pi->ch)))
+ smd_read(port->pi->ch, 0, avail);
+
+ return;
+}
+
+static void gsmd_tx_pull(struct work_struct *w)
+{
+ struct gsmd_port *port = container_of(w, struct gsmd_port, pull);
+ struct list_head *pool = &port->write_pool;
+
+ pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
+ port, port->port_num, pool);
+
+ if (!port->port_usb) {
+ pr_debug("%s: usb is disconnected\n", __func__);
+ gsmd_read_pending(port);
+ return;
+ }
+
+ spin_lock_irq(&port->port_lock);
+ while (!list_empty(pool)) {
+ struct usb_request *req;
+ struct usb_ep *in = port->port_usb->in;
+ struct smd_port_info *pi = port->pi;
+ int avail;
+ int ret;
+
+ avail = smd_read_avail(pi->ch);
+ if (!avail)
+ break;
+
+ avail = avail > SMD_TX_BUF_SIZE ? SMD_TX_BUF_SIZE : avail;
+
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del(&req->list);
+ req->length = smd_read(pi->ch, req->buf, avail);
+
+ spin_unlock_irq(&port->port_lock);
+ ret = usb_ep_queue(in, req, GFP_KERNEL);
+ spin_lock_irq(&port->port_lock);
+ if (ret) {
+ pr_err("%s: usb ep out queue failed"
+ "port:%p, port#%d err:%d\n",
+ __func__, port, port->port_num, ret);
+ /* could be usb disconnected */
+ if (!port->port_usb)
+ gsmd_free_req(in, req);
+ else
+ list_add(&req->list, pool);
+ goto tx_pull_end;
+ }
+
+ port->nbytes_tolaptop += req->length;
+ }
+
+tx_pull_end:
+ /* TBD: Check how code behaves on USB bus suspend */
+ if (port->port_usb && smd_read_avail(port->pi->ch) && !list_empty(pool))
+ queue_work(gsmd_wq, &port->pull);
+
+ spin_unlock_irq(&port->port_lock);
+
+ return;
+}
+
+static void gsmd_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gsmd_port *port = ep->driver_data;
+ unsigned long flags;
+
+ pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ list_add_tail(&req->list, &port->read_queue);
+ queue_work(gsmd_wq, &port->push);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return;
+}
+
+static void gsmd_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gsmd_port *port = ep->driver_data;
+ unsigned long flags;
+
+ pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ list_add(&req->list, &port->write_pool);
+
+ switch (req->status) {
+ default:
+ pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
+ __func__, port, port->port_num,
+ ep->name, req->status);
+ /* FALL THROUGH */
+ case 0:
+ queue_work(gsmd_wq, &port->pull);
+ break;
+
+ case -ESHUTDOWN:
+ /* disconnect */
+ pr_debug("%s: %s shutdown\n", __func__, ep->name);
+ gsmd_free_req(ep, req);
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return;
+}
+
+static void gsmd_start_io(struct gsmd_port *port)
+{
+ int ret = -ENODEV;
+ unsigned long flags;
+
+ pr_debug("%s: port: %p\n", __func__, port);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb)
+ goto start_io_out;
+
+ ret = gsmd_alloc_requests(port->port_usb->out,
+ &port->read_pool,
+ SMD_RX_QUEUE_SIZE, SMD_RX_BUF_SIZE,
+ gsmd_read_complete);
+ if (ret) {
+ pr_err("%s: unable to allocate out requests\n",
+ __func__);
+ goto start_io_out;
+ }
+
+ ret = gsmd_alloc_requests(port->port_usb->in,
+ &port->write_pool,
+ SMD_TX_QUEUE_SIZE, SMD_TX_BUF_SIZE,
+ gsmd_write_complete);
+ if (ret) {
+ gsmd_free_requests(port->port_usb->out, &port->read_pool);
+ pr_err("%s: unable to allocate IN requests\n",
+ __func__);
+ goto start_io_out;
+ }
+
+start_io_out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (ret)
+ return;
+
+ gsmd_start_rx(port);
+}
+
+static unsigned int convert_uart_sigs_to_acm(unsigned uart_sig)
+{
+ unsigned int acm_sig = 0;
+
+ /* should this needs to be in calling functions ??? */
+ uart_sig &= (TIOCM_RI | TIOCM_CD | TIOCM_DSR);
+
+ if (uart_sig & TIOCM_RI)
+ acm_sig |= SMD_ACM_CTRL_RI;
+ if (uart_sig & TIOCM_CD)
+ acm_sig |= SMD_ACM_CTRL_DCD;
+ if (uart_sig & TIOCM_DSR)
+ acm_sig |= SMD_ACM_CTRL_DSR;
+
+ return acm_sig;
+}
+
+static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig)
+{
+ unsigned int uart_sig = 0;
+
+ /* should this needs to be in calling functions ??? */
+ acm_sig &= (SMD_ACM_CTRL_DTR | SMD_ACM_CTRL_RTS);
+
+ if (acm_sig & SMD_ACM_CTRL_DTR)
+ uart_sig |= TIOCM_DTR;
+ if (acm_sig & SMD_ACM_CTRL_RTS)
+ uart_sig |= TIOCM_RTS;
+
+ return uart_sig;
+}
+
+static void gsmd_notify(void *priv, unsigned event)
+{
+ struct gsmd_port *port = priv;
+ struct smd_port_info *pi = port->pi;
+ int i;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ pr_debug("%s: Event data\n", __func__);
+ if (smd_read_avail(pi->ch))
+ queue_work(gsmd_wq, &port->pull);
+ if (smd_write_avail(pi->ch))
+ queue_work(gsmd_wq, &port->push);
+ break;
+ case SMD_EVENT_OPEN:
+ pr_debug("%s: Event Open\n", __func__);
+ set_bit(CH_OPENED, &pi->flags);
+ wake_up(&pi->wait);
+ break;
+ case SMD_EVENT_CLOSE:
+ pr_debug("%s: Event Close\n", __func__);
+ clear_bit(CH_OPENED, &pi->flags);
+ break;
+ case SMD_EVENT_STATUS:
+ i = smd_tiocmget(port->pi->ch);
+ port->cbits_to_laptop = convert_uart_sigs_to_acm(i);
+ if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
+ port->port_usb->send_modem_ctrl_bits(port->port_usb,
+ port->cbits_to_laptop);
+ break;
+ }
+}
+
+#define MAX_SMD_RETRY_CNT 20
+static void gsmd_connect_work(struct work_struct *w)
+{
+ struct gsmd_port *port;
+ struct smd_port_info *pi;
+ int ret;
+ int retry_cnt = 0;
+
+ port = container_of(w, struct gsmd_port, connect_work);
+ pi = port->pi;
+
+ pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+ /* SMD driver comes online gets initialized and loads modem
+ * 10 seconds after boot up. If USB cable is connected at boot-up,
+ * this might result smd open failure. To work-around, retry
+ * opening multiple times.
+ */
+ do {
+ if (!port->port_usb)
+ return;
+
+ ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM,
+ &pi->ch, port, gsmd_notify);
+ if (!ret)
+ break;
+
+ retry_cnt++;
+ msleep(1000);
+ } while (retry_cnt < MAX_SMD_RETRY_CNT);
+
+ if (ret) {
+ pr_err("%s: unable to open smd port:%s err:%d\n",
+ __func__, pi->name, ret);
+ return;
+ }
+
+ pr_debug("%s: SMD port open successful retrycnt:%d\n",
+ __func__, retry_cnt);
+
+ wait_event(pi->wait, test_bit(CH_OPENED, &pi->flags));
+
+ if (!port->port_usb)
+ return;
+
+ /* update usb control signals to modem */
+ if (port->cbits_to_modem)
+ smd_tiocmset(port->pi->ch,
+ port->cbits_to_modem,
+ ~port->cbits_to_modem);
+
+ gsmd_start_io(port);
+}
+
+static void gsmd_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits)
+{
+ struct gsmd_port *port;
+ int temp;
+
+ if (portno >= n_smd_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ if (!gser) {
+ pr_err("%s: gser is null\n", __func__);
+ return;
+ }
+
+ port = smd_ports[portno].port;
+
+ temp = convert_acm_sigs_to_uart(ctrl_bits);
+
+ if (temp == port->cbits_to_modem)
+ return;
+
+ port->cbits_to_modem = temp;
+
+ /* usb could send control signal before smd is ready */
+ if (!test_bit(CH_OPENED, &port->pi->flags))
+ return;
+
+ /* if DTR is high, update latest modem info to laptop */
+ if (port->cbits_to_modem & TIOCM_DTR) {
+ unsigned i;
+
+ i = smd_tiocmget(port->pi->ch);
+ port->cbits_to_laptop = convert_uart_sigs_to_acm(i);
+
+ if (gser->send_modem_ctrl_bits)
+ gser->send_modem_ctrl_bits(
+ port->port_usb,
+ port->cbits_to_laptop);
+ }
+
+ smd_tiocmset(port->pi->ch,
+ port->cbits_to_modem,
+ ~port->cbits_to_modem);
+}
+
+int gsmd_connect(struct gserial *gser, u8 portno)
+{
+ unsigned long flags;
+ int ret;
+ struct gsmd_port *port;
+
+ pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno);
+
+ if (portno >= n_smd_ports) {
+ pr_err("%s: Invalid port no#%d", __func__, portno);
+ return -EINVAL;
+ }
+
+ if (!gser) {
+ pr_err("%s: gser is null\n", __func__);
+ return -EINVAL;
+ }
+
+ port = smd_ports[portno].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gser;
+ gser->notify_modem = gsmd_notify_modem;
+ port->nbytes_tomodem = 0;
+ port->nbytes_tolaptop = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = usb_ep_enable(gser->in, gser->in_desc);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+ __func__, gser->in);
+ port->port_usb = 0;
+ return ret;
+ }
+ gser->in->driver_data = port;
+
+ ret = usb_ep_enable(gser->out, gser->out_desc);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+ __func__, gser->out);
+ port->port_usb = 0;
+ gser->in->driver_data = 0;
+ return ret;
+ }
+ gser->out->driver_data = port;
+
+ queue_work(gsmd_wq, &port->connect_work);
+
+ return 0;
+}
+
+void gsmd_disconnect(struct gserial *gser, u8 portno)
+{
+ unsigned long flags;
+ struct gsmd_port *port;
+
+ pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno);
+
+ if (portno >= n_smd_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ if (!gser) {
+ pr_err("%s: gser is null\n", __func__);
+ return;
+ }
+
+ port = smd_ports[portno].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(gser->out);
+ usb_ep_disable(gser->in);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ gsmd_free_requests(gser->out, &port->read_pool);
+ gsmd_free_requests(gser->out, &port->read_queue);
+ gsmd_free_requests(gser->in, &port->write_pool);
+ port->n_read = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (!test_bit(CH_OPENED, &port->pi->flags))
+ return;
+
+ /* lower the dtr */
+ port->cbits_to_modem = 0;
+ smd_tiocmset(port->pi->ch,
+ port->cbits_to_modem,
+ ~port->cbits_to_modem);
+
+ smd_close(port->pi->ch);
+ port->pi->flags = 0;
+}
+
+static void gsmd_port_free(int portno)
+{
+ struct gsmd_port *port = smd_ports[portno].port;
+
+ if (!port)
+ kfree(port);
+}
+
+static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding)
+{
+ struct gsmd_port *port;
+
+ port = kzalloc(sizeof(struct gsmd_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+ port->pi = &smd_pi[portno];
+
+ spin_lock_init(&port->port_lock);
+
+ INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queue);
+ INIT_WORK(&port->push, gsmd_rx_push);
+
+ INIT_LIST_HEAD(&port->write_pool);
+ INIT_WORK(&port->pull, gsmd_tx_pull);
+
+ INIT_WORK(&port->connect_work, gsmd_connect_work);
+ init_waitqueue_head(&port->pi->wait);
+
+ smd_ports[portno].port = port;
+
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gsmd_port *port;
+ char *buf;
+ unsigned long flags;
+ int temp = 0;
+ int i;
+ int ret;
+
+ buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_smd_ports; i++) {
+ port = smd_ports[i].port;
+ spin_lock_irqsave(&port->port_lock, flags);
+ temp += scnprintf(buf + temp, 512 - temp,
+ "###PORT:%d###\n"
+ "nbytes_tolaptop: %lu\n"
+ "nbytes_tomodem: %lu\n"
+ "cbits_to_modem: %u\n"
+ "cbits_to_laptop: %u\n"
+ "n_read: %u\n",
+ i, port->nbytes_tolaptop, port->nbytes_tomodem,
+ port->cbits_to_modem, port->cbits_to_laptop,
+ port->n_read);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+
+}
+
+static ssize_t debug_smd_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gsmd_port *port;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < n_smd_ports; i++) {
+ port = smd_ports[i].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_tolaptop = 0;
+ port->nbytes_tomodem = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ return count;
+}
+
+static int debug_smd_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations debug_gsmd_ops = {
+ .open = debug_smd_open,
+ .read = debug_smd_read_stats,
+ .write = debug_smd_reset_stats,
+};
+
+static void gsmd_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("usb_gsmd", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debugfs_create_file("status", 0444, dent, 0, &debug_gsmd_ops);
+}
+#else
+static void gsmd_debugfs_init(void) {}
+#endif
+
+int gsmd_setup(struct usb_gadget *g, unsigned count)
+{
+ struct usb_cdc_line_coding coding;
+ int ret;
+ int i;
+
+ pr_debug("%s: g:%p count: %d\n", __func__, g, count);
+
+ if (!count || count > SMD_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d gadget:%p\n",
+ __func__, count, g);
+ return -EINVAL;
+ }
+
+ coding.dwDTERate = cpu_to_le32(9600);
+ coding.bCharFormat = 8;
+ coding.bParityType = USB_CDC_NO_PARITY;
+ coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+ gsmd_wq = create_singlethread_workqueue("k_gsmd");
+ if (!gsmd_wq) {
+ pr_err("%s: Unable to create workqueue gsmd_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ mutex_init(&smd_ports[i].lock);
+ ret = gsmd_port_alloc(i, &coding);
+ if (ret) {
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_smd_ports;
+ }
+ n_smd_ports++;
+ }
+
+ gsmd_debugfs_init();
+
+ return 0;
+free_smd_ports:
+ for (i = 0; i < n_smd_ports; i++)
+ gsmd_port_free(i);
+
+ destroy_workqueue(gsmd_wq);
+
+ return ret;
+}
+
+void gsmd_cleanup(struct usb_gadget *g, unsigned count)
+{
+ /* TBD */
+}