Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/usb/function/Kconfig b/drivers/usb/function/Kconfig
new file mode 100644
index 0000000..90d776c
--- /dev/null
+++ b/drivers/usb/function/Kconfig
@@ -0,0 +1,163 @@
+menu "USB Function Support"
+	depends on !USB_GADGET
+
+config USB_MSM_OTG
+	bool "OTG support for Qualcomm on-chip USB controller"
+	depends on USB && USB_FUNCTION && USB_EHCI_MSM
+	help
+	  USB OTG driver.
+	  This driver is required if you want to use USB in
+	  Host mode and Device mode.
+
+config USB_FUNCTION
+	boolean "Support for USB Function Drivers"
+	help
+	   The USB Function framework is similar to the Gadget framework
+	   but a little simpler and a little more plugable.  It trades
+	   some flexibility in the framework for smaller and simpler
+	   function drivers that can be combined into a composite driver.
+
+choice
+	prompt "USB Peripheral Controller"
+	depends on USB_FUNCTION
+	help
+	  USB devices interfaces with the host using a controller.
+	  Many controller drivers are platform-specific; these
+	  often need board-specific hooks.
+
+config USB_FUNCTION_MSM_HSUSB
+	boolean "MSM Highspeed USB Peripheral Controller"
+	depends on ARCH_MSM
+	help
+	  High speed USB device controller for Qualcomm chipsets using
+	  USB Function framework. Controller supports IAD and
+	  32 endpoints(16 IN and 16 OUT).
+
+endchoice
+
+config USB_FUNCTION_NULL
+	boolean "Null Function -- eats packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_ZERO
+	boolean "Zero Function -- generates packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_LOOPBACK
+	boolean "Loopback Function -- returns packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_ADB
+	tristate "ADB Transport Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Function Driver for the Android ADB Protocol
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "adb"
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_UMS
+	boolean "USB Mass Storage Function (userspace)"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_MASS_STORAGE
+	tristate "USB Mass Storage Function (kernel based)"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB && SWITCH
+	help
+	  The File-backed Storage function driver acts as a USB Mass Storage
+	  disk drive.  As its storage repository it can use a regular
+	  file or a block device specified as a module parameter. Initial
+	  driver version is derived from Gadget framework and ported to
+	  Function driver framework.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "file_storage".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_CSW_HACK
+	boolean "USB Mass storage csw hack Feature"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MASS_STORAGE
+	help
+	 This csw hack feature is for increasing the performance of the mass
+	 storage
+
+	default n
+
+config USB_FUNCTION_DIAG
+	tristate "USB MSM Diag Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Simple bridge driver between smd and debug client(host side)
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "diag".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_ETHER
+	tristate "USB Ethernet Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Implements the Ethernet style communication using CDC/ECM.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ether".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_SERIAL
+	tristate "USB Serial Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Implements serial communication using single interface; uses
+	  two endpoints(bulk-in and bulk out) for data transfer and a
+	  interrupt endpoint for control data transfer.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "serial".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_RMNET
+	bool "RmNet function driver"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+	help
+	  Implements Rmnet function.
+	  Rmnet is an alternative to CDC-ECM and Windows RNDIS. It uses
+	  QUALCOMM MSM Interface for control transfers. It acts like a
+	  bridge between Host and modem found in MSM chipsets.
+
+config RMNET_SMD_CTL_CHANNEL
+	string "control SMD channel name"
+	depends on USB_FUNCTION_RMNET
+	default ""
+	help
+	  Control SMD channel for transferring QMI messages
+
+config RMNET_SMD_DATA_CHANNEL
+	string "Data SMD channel name"
+	depends on USB_FUNCTION_RMNET
+	default ""
+	help
+	  Data SMD channel for transferring network data
+
+endmenu
diff --git a/drivers/usb/function/Makefile b/drivers/usb/function/Makefile
new file mode 100644
index 0000000..7614d3b
--- /dev/null
+++ b/drivers/usb/function/Makefile
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_USB_MSM_OTG)		+= msm_otg.o
+obj-$(CONFIG_USB_FUNCTION_MSM_HSUSB)	+= msm_hsusb.o
+obj-$(CONFIG_USB_FUNCTION_NULL)		+= null.o
+obj-$(CONFIG_USB_FUNCTION_NULL)		+= zero.o
+obj-$(CONFIG_USB_FUNCTION_LOOPBACK)	+= loopback.o
+obj-$(CONFIG_USB_FUNCTION_ADB)		+= adb.o
+obj-$(CONFIG_USB_FUNCTION_UMS)		+= ums.o
+obj-$(CONFIG_USB_FUNCTION_MASS_STORAGE)	+= mass_storage.o
+obj-$(CONFIG_USB_FUNCTION_DIAG)		+= diag.o
+obj-$(CONFIG_USB_FUNCTION_SERIAL)       += serial.o
+obj-$(CONFIG_USB_FUNCTION_ETHER)	+= ether_cdc_ecm.o
+obj-$(CONFIG_USB_FUNCTION_RMNET)	+= rmnet.o
diff --git a/drivers/usb/function/adb.c b/drivers/usb/function/adb.c
new file mode 100644
index 0000000..dd91be3
--- /dev/null
+++ b/drivers/usb/function/adb.c
@@ -0,0 +1,624 @@
+/* drivers/usb/function/adb.c
+ *
+ * Function Device for the Android ADB Protocol
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "usb_function.h"
+
+#if 1
+#define DBG(x...) do {} while (0)
+#else
+#define DBG(x...) printk(x)
+#endif
+
+#define TXN_MAX 4096
+
+/* number of rx and tx requests to allocate */
+#define RX_REQ_MAX 4
+#define TX_REQ_MAX 4
+
+#define ADB_FUNCTION_NAME "adb"
+
+struct adb_context
+{
+	int online;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	atomic_t enable_excl;
+	spinlock_t lock;
+
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_done;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	/* the request we're currently reading from */
+	struct usb_request *read_req;
+	unsigned char *read_buf;
+	unsigned read_count;
+	unsigned bound;
+};
+
+static struct adb_context _context;
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength =		sizeof intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	0xff,
+	.bInterfaceSubClass =	0x42,
+	.bInterfaceProtocol =	0x01,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+static struct usb_function usb_func_adb;
+
+static inline int _lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void _unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void req_put(struct adb_context *ctxt, struct list_head *head, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *req_get(struct adb_context *ctxt, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return req;
+}
+
+static void adb_complete_in(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct adb_context *ctxt = req->context;
+
+	if (req->status != 0)
+		ctxt->error = 1;
+
+	req_put(ctxt, &ctxt->tx_idle, req);
+
+	wake_up(&ctxt->write_wq);
+}
+
+static void adb_complete_out(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct adb_context *ctxt = req->context;
+
+	if (req->status != 0) {
+		ctxt->error = 1;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	} else {
+		req_put(ctxt, &ctxt->rx_done, req);
+	}
+
+	wake_up(&ctxt->read_wq);
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+			size_t count, loff_t *pos)
+{
+	struct adb_context *ctxt = &_context;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	DBG("adb_read(%d)\n", count);
+
+	if (_lock(&ctxt->read_excl))
+		return -EBUSY;
+
+	/* we will block until we're online */
+	while (!(ctxt->online || ctxt->error)) {
+		DBG("adb_read: waiting for online state\n");
+		ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
+		if (ret < 0) {
+			_unlock(&ctxt->read_excl);
+			return ret;
+		}
+	}
+
+	while (count > 0) {
+		if (ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* if we have idle read requests, get them queued */
+		while ((req = req_get(ctxt, &ctxt->rx_idle))) {
+requeue_req:
+			req->length = TXN_MAX;
+			ret = usb_ept_queue_xfer(ctxt->out, req);
+			if (ret < 0) {
+				DBG("adb_read: failed to queue req %p (%d)\n", req, ret);
+				r = -EIO;
+				ctxt->error = 1;
+				req_put(ctxt, &ctxt->rx_idle, req);
+				goto fail;
+			} else {
+				DBG("rx %p queue\n", req);
+			}
+		}
+
+		/* if we have data pending, give it to userspace */
+		if (ctxt->read_count > 0) {
+			xfer = (ctxt->read_count < count) ? ctxt->read_count : count;
+
+			if (copy_to_user(buf, ctxt->read_buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+			ctxt->read_buf += xfer;
+			ctxt->read_count -= xfer;
+			buf += xfer;
+			count -= xfer;
+
+			/* if we've emptied the buffer, release the request */
+			if (ctxt->read_count == 0) {
+				req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+				ctxt->read_req = 0;
+			}
+			continue;
+		}
+
+		/* wait for a request to complete */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->read_wq,
+					       ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
+
+		if (req != 0) {
+			/* if we got a 0-len one we need to put it back into
+			** service.  if we made it the current read req we'd
+			** be stuck forever
+			*/
+			if (req->actual == 0)
+				goto requeue_req;
+
+			ctxt->read_req = req;
+			ctxt->read_count = req->actual;
+			ctxt->read_buf = req->buf;
+			DBG("rx %p %d\n", req, req->actual);
+		}
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+	}
+
+fail:
+	_unlock(&ctxt->read_excl);
+	return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+			 size_t count, loff_t *pos)
+{
+	struct adb_context *ctxt = &_context;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	DBG("adb_write(%d)\n", count);
+
+	if (_lock(&ctxt->write_excl))
+		return -EBUSY;
+
+	while (count > 0) {
+		if (ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->write_wq,
+					       ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			xfer = count > TXN_MAX ? TXN_MAX : count;
+			if (copy_from_user(req->buf, buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+
+			req->length = xfer;
+			ret = usb_ept_queue_xfer(ctxt->in, req);
+			if (ret < 0) {
+				DBG("adb_write: xfer error %d\n", ret);
+				ctxt->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+
+
+	if (req)
+		req_put(ctxt, &ctxt->tx_idle, req);
+
+	_unlock(&ctxt->write_excl);
+	return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	if (_lock(&ctxt->open_excl))
+		return -EBUSY;
+
+	/* clear the error latch */
+	ctxt->error = 0;
+
+	return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	_unlock(&ctxt->open_excl);
+	return 0;
+}
+
+static struct file_operations adb_fops = {
+	.owner =   THIS_MODULE,
+	.read =    adb_read,
+	.write =   adb_write,
+	.open =    adb_open,
+	.release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_adb",
+	.fops = &adb_fops,
+};
+
+static int adb_enable_open(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	if (_lock(&ctxt->enable_excl))
+		return -EBUSY;
+
+	printk(KERN_INFO "enabling adb function\n");
+	usb_function_enable(ADB_FUNCTION_NAME, 1);
+	/* clear the error latch */
+	ctxt->error = 0;
+
+	return 0;
+}
+
+static int adb_enable_release(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	printk(KERN_INFO "disabling adb function\n");
+	usb_function_enable(ADB_FUNCTION_NAME, 0);
+	_unlock(&ctxt->enable_excl);
+	return 0;
+}
+
+static struct file_operations adb_enable_fops = {
+	.owner =   THIS_MODULE,
+	.open =    adb_enable_open,
+	.release = adb_enable_release,
+};
+
+static struct miscdevice adb_enable_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_adb_enable",
+	.fops = &adb_enable_fops,
+};
+
+static void adb_unbind(void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	if (!ctxt->bound)
+		return;
+
+	while ((req = req_get(ctxt, &ctxt->rx_idle))) {
+		usb_ept_free_req(ctxt->out, req);
+	}
+	while ((req = req_get(ctxt, &ctxt->tx_idle))) {
+		usb_ept_free_req(ctxt->in, req);
+	}
+	if (ctxt->in) {
+		usb_ept_fifo_flush(ctxt->in);
+		usb_ept_enable(ctxt->in,  0);
+		usb_free_endpoint(ctxt->in);
+	}
+	if (ctxt->out) {
+		usb_ept_fifo_flush(ctxt->out);
+		usb_ept_enable(ctxt->out,  0);
+		usb_free_endpoint(ctxt->out);
+	}
+
+	ctxt->online = 0;
+	ctxt->error = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+	ctxt->bound = 0;
+}
+
+static void adb_bind(void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+	int n;
+
+	intf_desc.bInterfaceNumber =
+		usb_msm_get_next_ifc_number(&usb_func_adb);
+
+	ctxt->in = usb_alloc_endpoint(USB_DIR_IN);
+	if (ctxt->in) {
+		hs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ctxt->in->num;
+		fs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ctxt->in->num;
+	}
+
+	ctxt->out = usb_alloc_endpoint(USB_DIR_OUT);
+	if (ctxt->out) {
+		hs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT|ctxt->out->num;
+		fs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT|ctxt->out->num;
+	}
+
+	for (n = 0; n < RX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 4096);
+		if (req == 0) {
+			ctxt->bound = 1;
+			goto fail;
+		}
+		req->context = ctxt;
+		req->complete = adb_complete_out;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	}
+
+	for (n = 0; n < TX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 4096);
+		if (req == 0) {
+			ctxt->bound = 1;
+			goto fail;
+		}
+		req->context = ctxt;
+		req->complete = adb_complete_in;
+		req_put(ctxt, &ctxt->tx_idle, req);
+	}
+	ctxt->bound = 1;
+	return;
+
+fail:
+	printk(KERN_ERR "adb_bind() could not allocate requests\n");
+	adb_unbind(ctxt);
+}
+
+static void adb_configure(int configured, void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	if (configured) {
+		ctxt->online = 1;
+
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(ctxt->in, &hs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->out, &hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(ctxt->in, &fs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->out, &fs_bulk_out_desc);
+		}
+		usb_ept_enable(ctxt->in,  1);
+		usb_ept_enable(ctxt->out, 1);
+
+		/* if we have a stale request being read, recycle it */
+		ctxt->read_buf = 0;
+		ctxt->read_count = 0;
+		if (ctxt->read_req) {
+			req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+			ctxt->read_req = 0;
+		}
+
+		/* retire any completed rx requests from previous session */
+		while ((req = req_get(ctxt, &ctxt->rx_done)))
+			req_put(ctxt, &ctxt->rx_idle, req);
+
+	} else {
+		ctxt->online = 0;
+		ctxt->error = 1;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+}
+
+static struct usb_function usb_func_adb = {
+	.bind = adb_bind,
+	.unbind = adb_unbind,
+	.configure = adb_configure,
+
+	.name = ADB_FUNCTION_NAME,
+	.context = &_context,
+
+};
+
+struct usb_descriptor_header *adb_hs_descriptors[4];
+struct usb_descriptor_header *adb_fs_descriptors[4];
+static int __init adb_init(void)
+{
+	int ret = 0;
+	struct adb_context *ctxt = &_context;
+	DBG("adb_init()\n");
+
+	init_waitqueue_head(&ctxt->read_wq);
+	init_waitqueue_head(&ctxt->write_wq);
+
+	atomic_set(&ctxt->open_excl, 0);
+	atomic_set(&ctxt->read_excl, 0);
+	atomic_set(&ctxt->write_excl, 0);
+	atomic_set(&ctxt->enable_excl, 0);
+
+	spin_lock_init(&ctxt->lock);
+
+	INIT_LIST_HEAD(&ctxt->rx_idle);
+	INIT_LIST_HEAD(&ctxt->rx_done);
+	INIT_LIST_HEAD(&ctxt->tx_idle);
+
+	adb_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	adb_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&hs_bulk_in_desc;
+	adb_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&hs_bulk_out_desc;
+	adb_hs_descriptors[3] = NULL;
+
+	adb_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	adb_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&fs_bulk_in_desc;
+	adb_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&fs_bulk_out_desc;
+	adb_fs_descriptors[3] = NULL;
+
+	usb_func_adb.hs_descriptors = adb_hs_descriptors;
+	usb_func_adb.fs_descriptors = adb_fs_descriptors;
+
+	ret = misc_register(&adb_device);
+	if (ret) {
+		printk(KERN_ERR "adb Can't register misc device  %d \n",
+						MISC_DYNAMIC_MINOR);
+		return ret;
+	}
+	ret = misc_register(&adb_enable_device);
+	if (ret) {
+		printk(KERN_ERR "adb Can't register misc enable device  %d \n",
+						MISC_DYNAMIC_MINOR);
+		misc_deregister(&adb_device);
+		return ret;
+	}
+
+	ret = usb_function_register(&usb_func_adb);
+	if (ret) {
+		misc_deregister(&adb_device);
+		misc_deregister(&adb_enable_device);
+	}
+	return ret;
+}
+
+module_init(adb_init);
+
+static void __exit adb_exit(void)
+{
+	misc_deregister(&adb_device);
+	misc_deregister(&adb_enable_device);
+
+	usb_function_unregister(&usb_func_adb);
+}
+module_exit(adb_exit);
diff --git a/drivers/usb/function/diag.c b/drivers/usb/function/diag.c
new file mode 100644
index 0000000..94c32e7
--- /dev/null
+++ b/drivers/usb/function/diag.c
@@ -0,0 +1,567 @@
+/* drivers/usb/function/diag.c
+ *
+ * Diag Function Device - Route DIAG frames between SMD and USB
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+
+#include <mach/msm_smd.h>
+#include <mach/usbdiag.h>
+
+#include "usb_function.h"
+
+#define WRITE_COMPLETE 0
+#define READ_COMPLETE  0
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof intf_desc,
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength 			=	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType 	=	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes 		=	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize 	=	__constant_cpu_to_le16(512),
+	.bInterval 			=	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+/* list of requests */
+struct diag_req_entry {
+	struct list_head re_entry;
+	struct usb_request *usb_req;
+	void *diag_request;
+};
+struct diag_context {
+	struct usb_endpoint *epout;
+	struct usb_endpoint *epin;
+	spinlock_t dev_lock;
+	/* linked list of read requets*/
+	struct list_head dev_read_req_list;
+	/* linked list of write requets*/
+	struct list_head dev_write_req_list;
+	struct diag_operations *operations;
+	struct workqueue_struct *diag_wq;
+	struct work_struct usb_config_work;
+	unsigned configured;
+	unsigned bound;
+	int diag_opened;
+};
+
+static struct usb_function usb_func_diag;
+static struct diag_context _context;
+static void diag_write_complete(struct usb_endpoint *,
+		struct usb_request *);
+static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *,
+		unsigned len, gfp_t);
+static void diag_free_req_entry(struct usb_endpoint *, struct diag_req_entry *);
+static void diag_read_complete(struct usb_endpoint *, struct usb_request *);
+
+
+static void diag_unbind(void *context)
+{
+
+	struct diag_context *ctxt = context;
+
+	if (!ctxt)
+		return;
+	if (!ctxt->bound)
+		return;
+	if (ctxt->epin) {
+		usb_ept_fifo_flush(ctxt->epin);
+		usb_ept_enable(ctxt->epin, 0);
+		usb_free_endpoint(ctxt->epin);
+		}
+	if (ctxt->epout) {
+		usb_ept_fifo_flush(ctxt->epout);
+		usb_ept_enable(ctxt->epout, 0);
+		usb_free_endpoint(ctxt->epout);
+		}
+	ctxt->bound = 0;
+}
+static void diag_bind(void *context)
+{
+	struct diag_context *ctxt = context;
+
+	if (!ctxt)
+		return;
+
+	intf_desc.bInterfaceNumber =
+		usb_msm_get_next_ifc_number(&usb_func_diag);
+
+	ctxt->epin = usb_alloc_endpoint(USB_DIR_IN);
+	if (ctxt->epin) {
+		hs_bulk_in_desc.bEndpointAddress =
+			USB_DIR_IN | ctxt->epin->num;
+		fs_bulk_in_desc.bEndpointAddress =
+			USB_DIR_IN | ctxt->epin->num;
+	}
+
+	ctxt->epout = usb_alloc_endpoint(USB_DIR_OUT);
+	if (ctxt->epout) {
+		hs_bulk_out_desc.bEndpointAddress =
+			USB_DIR_OUT | ctxt->epout->num;
+		fs_bulk_out_desc.bEndpointAddress =
+			USB_DIR_OUT | ctxt->epout->num;
+	}
+
+	ctxt->bound = 1;
+}
+static void diag_configure(int configured, void *_ctxt)
+
+{
+	struct diag_context *ctxt = _ctxt;
+
+	if (!ctxt)
+		return;
+	if (configured) {
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(ctxt->epin, &hs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->epout, &hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(ctxt->epin, &fs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->epout, &fs_bulk_out_desc);
+		}
+		usb_ept_enable(ctxt->epin,  1);
+		usb_ept_enable(ctxt->epout, 1);
+		ctxt->configured = 1;
+		queue_work(_context.diag_wq, &(_context.usb_config_work));
+	} else {
+		/* all pending requests will be canceled */
+		ctxt->configured = 0;
+		if (ctxt->epin) {
+			usb_ept_fifo_flush(ctxt->epin);
+			usb_ept_enable(ctxt->epin, 0);
+		}
+		if (ctxt->epout) {
+			usb_ept_fifo_flush(ctxt->epout);
+			usb_ept_enable(ctxt->epout, 0);
+		}
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_disconnect))
+				ctxt->operations->diag_disconnect();
+	}
+
+}
+static struct usb_function usb_func_diag = {
+	.bind = diag_bind,
+	.configure = diag_configure,
+	.unbind = diag_unbind,
+
+
+	.name = "diag",
+	.context = &_context,
+};
+int diag_usb_register(struct diag_operations *func)
+{
+	struct diag_context *ctxt = &_context;
+
+	if (func == NULL) {
+		printk(KERN_ERR "diag_usb_register:registering"
+				"diag char operations NULL\n");
+		return -1;
+	}
+	ctxt->operations = func;
+	if (ctxt->configured == 1)
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_connect))
+				ctxt->operations->diag_connect();
+	return 0;
+}
+EXPORT_SYMBOL(diag_usb_register);
+
+int diag_usb_unregister(void)
+{
+	struct diag_context *ctxt = &_context;
+
+	ctxt->operations = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(diag_usb_unregister);
+
+int diag_open(int num_req)
+{
+	struct diag_context *ctxt = &_context;
+	struct diag_req_entry *write_entry;
+	struct diag_req_entry *read_entry;
+	int i = 0;
+
+	for (i = 0; i < num_req; i++) {
+		write_entry = diag_alloc_req_entry(ctxt->epin, 0, GFP_KERNEL);
+		if (write_entry) {
+			write_entry->usb_req->complete = diag_write_complete;
+			write_entry->usb_req->device = (void *)ctxt;
+			list_add(&write_entry->re_entry,
+					&ctxt->dev_write_req_list);
+		} else
+			goto write_error;
+	}
+
+	for (i = 0; i < num_req ; i++) {
+		read_entry = diag_alloc_req_entry(ctxt->epout, 0 , GFP_KERNEL);
+		if (read_entry) {
+			read_entry->usb_req->complete = diag_read_complete;
+			read_entry->usb_req->device = (void *)ctxt;
+			list_add(&read_entry->re_entry ,
+					&ctxt->dev_read_req_list);
+		} else
+			goto read_error;
+		}
+	ctxt->diag_opened = 1;
+	return 0;
+read_error:
+	printk(KERN_ERR "%s:read requests allocation failure\n", __func__);
+	while (!list_empty(&ctxt->dev_read_req_list)) {
+		read_entry = list_entry(ctxt->dev_read_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&read_entry->re_entry);
+		diag_free_req_entry(ctxt->epout, read_entry);
+	}
+write_error:
+	printk(KERN_ERR "%s: write requests allocation failure\n", __func__);
+	while (!list_empty(&ctxt->dev_write_req_list)) {
+		write_entry = list_entry(ctxt->dev_write_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&write_entry->re_entry);
+		diag_free_req_entry(ctxt->epin, write_entry);
+	}
+	ctxt->diag_opened = 0;
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(diag_open);
+
+void diag_close(void)
+{
+	struct diag_context *ctxt = &_context;
+	struct diag_req_entry *req_entry;
+	/* free write requests */
+
+	while (!list_empty(&ctxt->dev_write_req_list)) {
+		req_entry = list_entry(ctxt->dev_write_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		diag_free_req_entry(ctxt->epin, req_entry);
+	}
+
+	/* free read requests */
+	while (!list_empty(&ctxt->dev_read_req_list)) {
+		req_entry = list_entry(ctxt->dev_read_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		diag_free_req_entry(ctxt->epout, req_entry);
+	}
+	return;
+}
+EXPORT_SYMBOL(diag_close);
+
+static void diag_free_req_entry(struct usb_endpoint *ep,
+		struct diag_req_entry *req)
+{
+	if (ep != NULL && req != NULL) {
+		if (req->usb_req != NULL)
+			usb_ept_free_req(ep, req->usb_req);
+		kfree(req);
+	}
+}
+
+
+static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *ep,
+		unsigned len, gfp_t kmalloc_flags)
+{
+	struct diag_req_entry *req;
+
+	req = kmalloc(sizeof(struct diag_req_entry), kmalloc_flags);
+	if (req == NULL)
+		return ERR_PTR(-ENOMEM);
+
+
+	req->usb_req = usb_ept_alloc_req(ep , 0);
+	if (req->usb_req == NULL) {
+		kfree(req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req->usb_req->context = req;
+	return req;
+}
+
+int diag_read(struct diag_request *d_req)
+{
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct diag_req_entry *req_entry = NULL;
+	struct diag_context *ctxt = &_context;
+
+
+	if (ctxt->diag_opened != 1)
+		return -EIO;
+	spin_lock_irqsave(&ctxt->dev_lock , flags);
+	if (!list_empty(&ctxt->dev_read_req_list)) {
+		req_entry = list_entry(ctxt->dev_read_req_list.next ,
+				struct diag_req_entry , re_entry);
+		req_entry->diag_request = d_req;
+		req = req_entry->usb_req;
+		list_del(&req_entry->re_entry);
+	}
+	spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+	if (req) {
+		req->buf = d_req->buf;
+		req->length = d_req->length;
+		req->device = ctxt;
+		if (usb_ept_queue_xfer(ctxt->epout, req)) {
+			/* If error add the link to the linked list again. */
+			spin_lock_irqsave(&ctxt->dev_lock , flags);
+			list_add_tail(&req_entry->re_entry ,
+					&ctxt->dev_read_req_list);
+			spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+			printk(KERN_ERR "diag_read:can't queue the request\n");
+			return -EIO;
+		}
+	} else {
+		printk(KERN_ERR
+				"diag_read:no requests avialable\n");
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(diag_read);
+
+int diag_write(struct diag_request *d_req)
+{
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct diag_req_entry *req_entry = NULL;
+	struct diag_context *ctxt = &_context;
+
+	if (ctxt->diag_opened != 1)
+		return -EIO;
+	spin_lock_irqsave(&ctxt->dev_lock , flags);
+	if (!list_empty(&ctxt->dev_write_req_list)) {
+		req_entry = list_entry(ctxt->dev_write_req_list.next ,
+				struct diag_req_entry , re_entry);
+		req_entry->diag_request = d_req;
+		req = req_entry->usb_req;
+		list_del(&req_entry->re_entry);
+	}
+	spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+	if (req) {
+		req->buf = d_req->buf;
+		req->length = d_req->length;
+		req->device = ctxt;
+		if (usb_ept_queue_xfer(ctxt->epin, req)) {
+			/* If error add the link to linked list again*/
+			spin_lock_irqsave(&ctxt->dev_lock, flags);
+			list_add_tail(&req_entry->re_entry ,
+					&ctxt->dev_write_req_list);
+			spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+			printk(KERN_ERR "diag_write: cannot queue"
+					" read request\n");
+			return -EIO;
+		}
+	} else {
+		printk(KERN_ERR	"diag_write: no requests available\n");
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(diag_write);
+
+static void diag_write_complete(struct usb_endpoint *ep ,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = (struct diag_context *)req->device;
+	struct diag_req_entry *diag_req = req->context;
+	struct diag_request *d_req = (struct diag_request *)
+						diag_req->diag_request;
+	unsigned long flags;
+
+	if (ctxt == NULL) {
+		printk(KERN_ERR "diag_write_complete : requesting"
+				"NULL device pointer\n");
+		return;
+	}
+	if (req->status == WRITE_COMPLETE) {
+		if ((req->length >= ep->max_pkt) &&
+				((req->length % ep->max_pkt) == 0)) {
+			req->length = 0;
+			req->device = ctxt;
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+			/* Queue zero length packet */
+			usb_ept_queue_xfer(ctxt->epin, req);
+			return;
+		}
+			/* normal completion*/
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_write_req_list);
+		if (req->length != 0) {
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+		}
+		spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_write_complete))
+				ctxt->operations->diag_char_write_complete(
+					d_req);
+	} else {
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+			&ctxt->dev_write_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_write_complete))
+				ctxt->operations->diag_char_write_complete(
+					d_req);
+	}
+}
+static void diag_read_complete(struct usb_endpoint *ep ,
+		struct usb_request *req)
+{
+	 struct diag_context *ctxt = (struct diag_context *)req->device;
+	 struct diag_req_entry *diag_req = req->context;
+	 struct diag_request *d_req = (struct diag_request *)
+							diag_req->diag_request;
+	 unsigned long flags;
+
+	if (ctxt == NULL) {
+		printk(KERN_ERR "diag_read_complete : requesting"
+				"NULL device pointer\n");
+		return;
+	}
+	if (req->status == READ_COMPLETE) {
+			/* normal completion*/
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_read_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_read_complete))
+				ctxt->operations->diag_char_read_complete(
+					d_req);
+	} else {
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_read_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_read_complete))
+				ctxt->operations->diag_char_read_complete(
+					d_req);
+	}
+}
+void usb_config_work_func(struct work_struct *work)
+{
+	struct diag_context *ctxt = &_context;
+	if ((ctxt->operations) &&
+		(ctxt->operations->diag_connect))
+			ctxt->operations->diag_connect();
+}
+
+struct usb_descriptor_header *diag_hs_descriptors[4];
+struct usb_descriptor_header *diag_fs_descriptors[4];
+
+static int __init diag_init(void)
+{
+	int r;
+	struct diag_context *ctxt = &_context;
+
+	diag_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	diag_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&hs_bulk_in_desc;
+	diag_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&hs_bulk_out_desc;
+	diag_hs_descriptors[3] = NULL;
+
+	diag_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	diag_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&fs_bulk_in_desc;
+	diag_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&fs_bulk_out_desc;
+	diag_fs_descriptors[3] = NULL;
+	INIT_LIST_HEAD(&ctxt->dev_read_req_list);
+	INIT_LIST_HEAD(&ctxt->dev_write_req_list);
+	ctxt->diag_wq  = create_singlethread_workqueue("diag");
+	if (ctxt->diag_wq == NULL)
+		return -1;
+	INIT_WORK(&_context.usb_config_work , usb_config_work_func);
+
+	usb_func_diag.hs_descriptors = diag_hs_descriptors;
+	usb_func_diag.fs_descriptors = diag_fs_descriptors;
+	spin_lock_init(&_context.dev_lock);
+	r = usb_function_register(&usb_func_diag);
+	if (r < 0)
+		destroy_workqueue(ctxt->diag_wq);
+	return r;
+}
+
+module_init(diag_init);
+static void __exit diag_exit(void)
+{
+	struct diag_context *ctxt = &_context;
+	if (!ctxt)
+		return;
+	if (!ctxt)
+		BUG_ON(1);
+
+	usb_function_unregister(&usb_func_diag);
+	destroy_workqueue(ctxt->diag_wq);
+}
+module_exit(diag_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/ether.c b/drivers/usb/function/ether.c
new file mode 100644
index 0000000..f31032e9
--- /dev/null
+++ b/drivers/usb/function/ether.c
@@ -0,0 +1,327 @@
+/* drivers/usb/function/ether.c
+ *
+ * Simple Ethernet Function Device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Implements the "cdc_subset" bulk-only protocol supported by Linux.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "usb_function.h"
+
+/* Ethernet frame is 1514 + FCS, but round up to 512 * 3 so we
+ * always queue a multiple of the USB max packet size (64 or 512)
+ */
+#define USB_MTU 1536
+
+#define MAX_TX 8
+#define MAX_RX 8
+
+struct ether_context {
+	spinlock_t lock;
+	struct net_device *dev;
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head rx_reqs;
+	struct list_head tx_reqs;
+
+	struct net_device_stats stats;
+};
+
+static int ether_queue_out(struct ether_context *ctxt,
+			   struct usb_request *req);
+static void ether_in_complete(struct usb_endpoint *ept,
+			      struct usb_request *req);
+static void ether_out_complete(struct usb_endpoint *ept,
+			       struct usb_request *req);
+
+static void ether_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct ether_context *ctxt = _ctxt;
+	struct usb_request *req;
+	unsigned long flags;
+	int n;
+
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	for (n = 0; n < MAX_RX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 0);
+		if (!req)
+			break;
+		req->complete = ether_out_complete;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+	for (n = 0; n < MAX_TX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 0);
+		if (!req)
+			break;
+		req->complete = ether_in_complete;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->tx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+}
+
+static void ether_in_complete(struct usb_endpoint *ept,
+			      struct usb_request *req)
+{
+	unsigned long flags;
+	struct sk_buff *skb = req->context;
+	struct ether_context *ctxt = *((void **) skb->cb);
+
+	if (req->status == 0) {
+		ctxt->stats.tx_packets++;
+		ctxt->stats.tx_bytes += req->actual;
+	} else {
+		ctxt->stats.tx_errors++;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(&ctxt->tx_reqs))
+		netif_start_queue(ctxt->dev);
+	list_add_tail(&req->list, &ctxt->tx_reqs);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static void ether_out_complete(struct usb_endpoint *ept,
+			       struct usb_request *req)
+{
+	struct sk_buff *skb = req->context;
+	struct ether_context *ctxt = *((void **) skb->cb);
+
+	if (req->status == 0) {
+		skb_put(skb, req->actual);
+		skb->protocol = eth_type_trans(skb, ctxt->dev);
+		ctxt->stats.rx_packets++;
+		ctxt->stats.rx_bytes += req->actual;
+		netif_rx(skb);
+	} else {
+		dev_kfree_skb_any(skb);
+		ctxt->stats.rx_errors++;
+	}
+
+	/* don't bother requeuing if we just went offline */
+	if (req->status == -ENODEV) {
+		unsigned long flags;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	} else {
+		if (ether_queue_out(ctxt, req))
+			pr_err("ether_out: cannot requeue\n");
+	}
+}
+
+static int ether_queue_out(struct ether_context *ctxt,
+			   struct usb_request *req)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = alloc_skb(USB_MTU + NET_IP_ALIGN, GFP_ATOMIC);
+	if (!skb) {
+		pr_err("ether_queue_out: failed to alloc skb\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	*((void **) skb->cb) = ctxt;
+	req->buf = skb->data;
+	req->length = USB_MTU;
+	req->context = skb;
+
+	ret = usb_ept_queue_xfer(ctxt->out, req);
+	if (ret) {
+fail:
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+
+	return ret;
+}
+
+static void ether_configure(int configured, void *_ctxt)
+{
+	unsigned long flags;
+	struct ether_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	pr_info("ether_configure() %d\n", configured);
+
+	if (configured) {
+		/* we're online -- get all rx requests queued */
+		for (;;) {
+			spin_lock_irqsave(&ctxt->lock, flags);
+			if (list_empty(&ctxt->rx_reqs)) {
+				req = 0;
+			} else {
+				req = list_first_entry(&ctxt->rx_reqs,
+						       struct usb_request,
+						       list);
+				list_del(&req->list);
+			}
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+			if (!req)
+				break;
+			if (ether_queue_out(ctxt, req))
+				break;
+		}
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_ether = {
+	.bind = ether_bind,
+	.configure = ether_configure,
+
+	.name = "ether",
+
+	.ifc_class = 0x02,
+	.ifc_subclass = 0x0a,
+	.ifc_protocol = 0x00,
+
+	.ifc_name = "ether",
+
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int usb_ether_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+	struct usb_request *req;
+	unsigned long flags;
+	unsigned len;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(&ctxt->tx_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&ctxt->tx_reqs,
+				       struct usb_request, list);
+		list_del(&req->list);
+		if (list_empty(&ctxt->tx_reqs))
+			netif_stop_queue(dev);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	if (!req) {
+		pr_err("usb_ether_xmit: could not obtain tx request\n");
+		return 1;
+	}
+
+	/* ensure that we end with a short packet */
+	len = skb->len;
+	if (!(len & 63) || !(len & 511))
+		len++;
+
+	*((void **) skb->cb) = ctxt;
+	req->context = skb;
+	req->buf = skb->data;
+	req->length = len;
+
+	if (usb_ept_queue_xfer(ctxt->in, req)) {
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->tx_reqs);
+		netif_start_queue(dev);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+
+		dev_kfree_skb_any(skb);
+		ctxt->stats.tx_dropped++;
+
+		pr_err("usb_ether_xmit: could not queue tx request\n");
+	}
+
+	return 0;
+}
+
+static int usb_ether_open(struct net_device *dev)
+{
+	return 0;
+}
+
+static int usb_ether_stop(struct net_device *dev)
+{
+	return 0;
+}
+
+static struct net_device_stats *usb_ether_get_stats(struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+	return &ctxt->stats;
+}
+
+static void __init usb_ether_setup(struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+
+	pr_info("usb_ether_setup()\n");
+
+	INIT_LIST_HEAD(&ctxt->rx_reqs);
+	INIT_LIST_HEAD(&ctxt->tx_reqs);
+	spin_lock_init(&ctxt->lock);
+	ctxt->dev = dev;
+
+	dev->open = usb_ether_open;
+	dev->stop = usb_ether_stop;
+	dev->hard_start_xmit = usb_ether_xmit;
+	dev->get_stats = usb_ether_get_stats;
+	dev->watchdog_timeo = 20;
+
+	ether_setup(dev);
+
+	random_ether_addr(dev->dev_addr);
+}
+
+static int __init ether_init(void)
+{
+	struct net_device *dev;
+	int ret;
+
+	dev = alloc_netdev(sizeof(struct ether_context),
+			   "usb%d", usb_ether_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	ret = register_netdev(dev);
+	if (ret) {
+		free_netdev(dev);
+	} else {
+		struct ether_context *ctxt = netdev_priv(dev);
+		usb_func_ether.context = ctxt;
+		usb_function_register(&usb_func_ether);
+	}
+	return ret;
+}
+
+module_init(ether_init);
diff --git a/drivers/usb/function/ether_cdc_ecm.c b/drivers/usb/function/ether_cdc_ecm.c
new file mode 100644
index 0000000..8fa5af1
--- /dev/null
+++ b/drivers/usb/function/ether_cdc_ecm.c
@@ -0,0 +1,1337 @@
+/*
+ * ether_cdc_ecm.c -- Ethernet Function driver, with CDC
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This file has been derived from gadget/ether.c
+ *
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/cdc.h>
+
+#include "usb_function.h"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Ethernet function driver -- with CDC options
+ * Builds on hardware support for a full duplex link.
+ *
+ * CDC Ethernet is the standard USB solution for sending Ethernet frames
+ * using USB.  Real hardware tends to use the same framing protocol but look
+ * different for control features.  This driver strongly prefers to use
+ * this USB-IF standard as its open-systems interoperability solution;
+ * most host side USB stacks (except from Microsoft) support it.
+ */
+
+#define DRIVER_DESC		"Ethernet Function CDC ECM"
+#define DRIVER_VERSION		"1.0"
+
+static const char shortname[] = "ether";
+static const char driver_desc[] = DRIVER_DESC;
+
+static unsigned int string_data;
+static unsigned int string_control;
+static unsigned int string_ethaddr;
+#define RX_EXTRA	20		/* guard against rx overflows */
+
+
+
+/* outgoing packet filters. */
+#define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
+			| USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+			| USB_CDC_PACKET_TYPE_PROMISCUOUS \
+			| USB_CDC_PACKET_TYPE_DIRECTED)
+
+/*-------------------------------------------------------------------------*/
+
+struct eth_dev {
+	spinlock_t		lock;
+	struct usb_request	*req;		/* for control responses */
+	struct usb_request	*stat_req;	/* for cdc status */
+
+	unsigned		configured:1;
+	struct usb_endpoint	*in_ep, *out_ep, *status_ep;
+
+	spinlock_t		req_lock;
+	struct list_head	tx_reqs, rx_reqs;
+
+	struct net_device	*net;
+	struct net_device_stats	stats;
+	atomic_t		tx_qlen;
+
+	struct work_struct	work;
+	unsigned		zlp:1;
+	unsigned		suspended:1;
+	u16			cdc_filter;
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+	u8			host_mac[ETH_ALEN];
+
+	int alt_set;
+};
+
+static struct usb_function usb_func_ether;
+
+/* Ethernet function descriptors */
+#define USB_DT_IAD_SIZE		8
+struct usb_interface_assoc_descriptor	eth_IAD = {
+	.bLength           = USB_DT_IAD_SIZE,
+	.bDescriptorType   = USB_DT_INTERFACE_ASSOCIATION,
+	.bInterfaceCount   = 2,
+	.bFunctionClass    = USB_CLASS_COMM,
+	.bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol = USB_CDC_PROTO_NONE,
+	.iFunction         = 0,
+};
+
+struct usb_interface_descriptor		eth_control_intf = {
+	.bLength =  USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+};
+
+struct usb_cdc_header_desc		eth_header_desc = {
+	.bLength =		sizeof(struct usb_cdc_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		__constant_cpu_to_le16(0x0110),
+};
+
+struct usb_cdc_union_desc		eth_union_desc = {
+	.bLength =		sizeof(struct usb_cdc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+};
+
+struct usb_cdc_ether_desc 		eth_ether_desc = {
+	.bLength =		sizeof(struct usb_cdc_ether_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+	/* this descriptor actually adds value, surprise! */
+	.bmEthernetStatistics =	__constant_cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	__constant_cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	__constant_cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+struct usb_endpoint_descriptor 		eth_control_intf_hs_int_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.bInterval =           4,
+	.wMaxPacketSize =       64,
+};
+
+struct usb_endpoint_descriptor 		eth_control_intf_fs_int_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.bInterval =           4,
+	.wMaxPacketSize =       64,
+};
+
+struct usb_interface_descriptor 	eth_data_alt_zero_intf = {
+	.bLength =  USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =    0,
+	.bNumEndpoints =        0,
+	.bInterfaceClass =      USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =   0,
+	.bInterfaceProtocol =   0,
+};
+
+struct usb_interface_descriptor 	eth_data_alt_one_intf = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =    1,
+	.bNumEndpoints =        2,
+	.bInterfaceClass =      USB_CLASS_CDC_DATA ,
+	.bInterfaceSubClass =   0,
+	.bInterfaceProtocol =   USB_CDC_PROTO_NONE,
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_hs_bulk_out_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(512),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_fs_bulk_out_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(64),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_hs_bulk_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(512),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_fs_bulk_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(64),
+};
+
+struct eth_dev *eth_device;
+
+/* Some systems will want different product identifers published in the
+ * device descriptor, either numbers or strings or both.  These string
+ * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+static char ethaddr[2 * ETH_ALEN + 1];
+static int eth_bound;
+
+#define DEFAULT_QLEN	2	/* double buffering by default */
+
+/* peak bulk transfer bits-per-second */
+#define	HS_BPS		(13 * 512 * 8 * 1000 * 8)
+
+/* for dual-speed hardware, use deeper queues at highspeed */
+#define qlen (DEFAULT_QLEN * 5) /* High Speed */
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt, (d)->net->name, ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DEBUG(dev, fmt, args...) \
+	xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define DEBUG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDEBUG	DEBUG
+#else
+#define VDEBUG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev, KERN_ERR, fmt, ## args)
+#ifdef WARN
+#undef WARN
+#endif
+#define WARN(dev, fmt, args...) \
+	xprintk(dev, KERN_WARNING, fmt, ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev, KERN_INFO, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* include the status endpoint if we can, even where it's optional.
+ * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real ether
+ * can provide.  more advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+#define STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags);
+static int alloc_requests(struct eth_dev *dev, unsigned n, gfp_t gfp_flags);
+
+static int set_ether_config(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	int result = 0;
+
+	if (dev->status_ep)
+		usb_ept_enable(dev->status_ep, 1);
+
+	result = alloc_requests(dev, qlen , gfp_flags);
+	if (result == 0)
+		DEBUG(dev, "qlen %d\n", qlen);
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+static void eth_reset_config(struct eth_dev *dev)
+{
+	struct usb_request	*req;
+	unsigned long  flags;
+
+	DEBUG(dev, "%s\n", __func__);
+
+	if (!dev)
+		return;
+	if (!dev->net)
+		return;
+
+	if (dev->configured == 0)
+		return;
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	/* disable endpoints, forcing (synchronous) completion of
+	 * pending i/o.  then free the requests.
+	 */
+	if (dev->in_ep) {
+		usb_ept_enable(dev->in_ep, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		while (likely(!list_empty(&dev->tx_reqs))) {
+			req = container_of(dev->tx_reqs.next,
+						struct usb_request, list);
+			list_del(&req->list);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			usb_ept_free_req(dev->in_ep, req);
+			spin_lock_irqsave(&dev->req_lock, flags);
+		}
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	if (dev->out_ep) {
+		usb_ept_enable(dev->out_ep, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		while (likely(!list_empty(&dev->rx_reqs))) {
+			req = container_of(dev->rx_reqs.next,
+						struct usb_request, list);
+			list_del(&req->list);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			usb_ept_free_req(dev->out_ep, req);
+			spin_lock_irqsave(&dev->req_lock, flags);
+		}
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+
+	if (dev->status_ep)
+		usb_ept_free_req(dev->status_ep, 0);
+	dev->cdc_filter = 0;
+	dev->configured = 0;
+}
+
+/* change our operational config.  must agree with the code
+ * that returns config descriptors, and altsetting code.
+ */
+static int eth_set_config(struct eth_dev *dev,  gfp_t gfp_flags)
+{
+	int result = 0;
+
+	eth_reset_config(dev);
+	result = set_ether_config(dev, gfp_flags);
+	if (result)
+		eth_reset_config(dev);
+	else
+		dev->configured = 1;
+	return result;
+}
+
+static void eth_configure(int configured, void *_ctxt)
+{
+	int                     result = 0;
+	struct eth_dev *dev = (struct eth_dev *) _ctxt;
+	if (!dev)
+		return ;
+	if (!eth_bound)
+		return;
+
+	if (!configured) {
+		eth_reset_config(dev);
+		return ;
+	}
+	if (dev->configured == 1)
+		return ;
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->status_ep,
+					&eth_control_intf_hs_int_in_ep_desc);
+		usb_configure_endpoint(dev->in_ep,
+					&eth_data_intf_hs_bulk_in_ep_desc);
+		usb_configure_endpoint(dev->out_ep,
+					&eth_data_intf_hs_bulk_out_ep_desc);
+	} else {
+		usb_configure_endpoint(dev->status_ep,
+					&eth_control_intf_fs_int_in_ep_desc);
+		usb_configure_endpoint(dev->in_ep,
+					&eth_data_intf_fs_bulk_in_ep_desc);
+		usb_configure_endpoint(dev->out_ep,
+					&eth_data_intf_fs_bulk_out_ep_desc);
+	}
+	result = eth_set_config(dev, GFP_ATOMIC);
+}
+/* The interrupt endpoint is used in CDC networking models (Ethernet, ATM)
+ * only to notify the host about link status changes (which we support)
+ * Since we want this CDC Ethernet code to be vendor-neutral, only one
+ * status request is ever queued.
+ */
+
+static void
+eth_status_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct usb_cdc_notification	*event = req->buf;
+	int				value = req->status;
+
+	/* issue the second notification if host reads the first */
+	if (event->bNotificationType == USB_CDC_NOTIFY_NETWORK_CONNECTION
+			&& value == 0) {
+		__le32	*data = req->buf + sizeof *event;
+
+		event->bmRequestType = 0xA1;
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = __constant_cpu_to_le16(0);
+		event->wIndex =	__constant_cpu_to_le16(
+				eth_data_alt_one_intf.bInterfaceNumber);
+		event->wLength = __constant_cpu_to_le16(8);
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data[0] = data[1] = cpu_to_le32(HS_BPS);
+
+		req->length = STATUS_BYTECOUNT;
+		value = usb_ept_queue_xfer(ep, req);
+		DEBUG(dev, "send SPEED_CHANGE --> %d\n", value);
+		if (value == 0)
+			return;
+	} else if (value != -ECONNRESET)
+		DEBUG(dev, "event %02x --> %d\n",
+			event->bNotificationType, value);
+	req->context = NULL;
+}
+
+static void issue_start_status(struct eth_dev *dev)
+{
+	struct usb_request		*req = dev->stat_req;
+	struct usb_cdc_notification	*event;
+	int				value;
+
+	DEBUG(dev, "%s, flush old status first\n", __func__);
+
+	/* flush old status
+	 *
+	 * FIXME ugly idiom, maybe we'd be better with just
+	 * a "cancel the whole queue" primitive since any
+	 * unlink-one primitive has way too many error modes.
+	 * here, we "know" toggle is already clear...
+	 *
+	 * FIXME iff req->context != null just dequeue it
+	 */
+	usb_ept_enable(dev->status_ep,  0);
+	usb_ept_enable(dev->status_ep, 1);
+
+	/* 3.8.1 says to issue first NETWORK_CONNECTION, then
+	 * a SPEED_CHANGE.  could be useful in some configs.
+	 */
+	event = req->buf;
+	event->bmRequestType = 0xA1;
+	event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+	event->wValue = __constant_cpu_to_le16(1);	/* connected */
+	event->wIndex = __constant_cpu_to_le16(
+				eth_data_alt_one_intf.bInterfaceNumber);
+	event->wLength = 0;
+
+	req->length = sizeof *event;
+	req->complete = eth_status_complete;
+	req->context = dev;
+
+	value = usb_ept_queue_xfer(dev->status_ep, req);
+	if (value < 0)
+		DEBUG(dev, "status buf queue --> %d\n", value);
+}
+
+static int  eth_set_interface(int  wIndex, int wValue, void *_ctxt)
+{
+	struct eth_dev *dev = eth_device;
+	unsigned long		flags;
+
+	if (dev == NULL)
+		return 1;
+
+	if ((wIndex == eth_data_alt_one_intf.bInterfaceNumber)
+			&& (wValue == 1)) {
+		dev->alt_set = 1;
+		usb_ept_enable(dev->in_ep, 1);
+		usb_ept_enable(dev->out_ep, 1);
+		dev->cdc_filter = DEFAULT_FILTER;
+		netif_carrier_on(dev->net);
+		issue_start_status(dev);
+		if (netif_running(dev->net)) {
+			spin_lock_irqsave(&dev->lock, flags);
+			eth_start(dev, GFP_ATOMIC);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	} else {
+		dev->alt_set = 0;
+		netif_stop_queue(dev->net);
+		netif_carrier_off(dev->net);
+	}
+	return 0;
+}
+
+static int eth_get_interface(int wIndex, void *_ctxt)
+{
+	struct eth_dev *dev = eth_device;
+
+	return dev->alt_set;
+}
+
+/*
+ * The setup() callback implements all the ep0 functionality that's not
+ * handled lower down.  CDC has a number of less-common features:
+ *
+ *  - class-specific descriptors for the control interface
+ *  - class-specific control requests
+ */
+static int
+eth_setup(struct usb_ctrlrequest *ctrl, void *buf, int len, void *_ctxt)
+{
+	struct eth_dev	*dev = (struct eth_dev *) _ctxt;
+	int		value = -EOPNOTSUPP;
+	u16		wIndex = le16_to_cpu(ctrl->wIndex);
+	u16		wValue = le16_to_cpu(ctrl->wValue);
+	u16		wLength = le16_to_cpu(ctrl->wLength);
+	u16		data_int = eth_data_alt_one_intf.bInterfaceNumber;
+	u16		ctrl_int = eth_control_intf.bInterfaceNumber;
+	switch (ctrl->bRequest) {
+	case USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
+			|| wLength != 0
+			|| ((wIndex != data_int) && (wIndex != ctrl_int)))
+			break;
+		DEBUG(dev, "packet filter %02x\n", wValue);
+		dev->cdc_filter = wValue;
+		value = 0;
+		break;
+
+	/* and potentially:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	default:
+		VDEBUG(dev,
+			"unknown control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+	}
+	return value;
+}
+
+
+static void eth_disconnect(void *_ctxt)
+{
+	struct eth_dev		*dev = (struct eth_dev *) _ctxt;
+	unsigned long		flags;
+
+	printk(KERN_INFO "eth_disconnect()\n");
+	spin_lock_irqsave(&dev->lock, flags);
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+	eth_reset_config(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int usb_eth_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+
+	if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		return -ERANGE;
+	/* no zero-length packet read wanted after mtu-sized packets */
+	if (((new_mtu + sizeof(struct ethhdr)) %
+			(usb_ept_get_max_packet(dev->in_ep))) == 0)
+		return -EDOM;
+	net->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *eth_get_stats(struct net_device *net)
+{
+	return &((struct eth_dev *)netdev_priv(net))->stats;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	strlcpy(p->driver, shortname, sizeof p->driver);
+	strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
+	strlcpy(p->fw_version, "ethernet", sizeof p->fw_version);
+}
+
+static u32 eth_get_link(struct net_device *net)
+{
+	return 1;
+}
+
+static struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = eth_get_link
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit(flag, &dev->todo))
+		return;
+	if (!schedule_work(&dev->work))
+		ERROR(dev, "kevent %d may have been dropped\n", flag);
+	else
+		DEBUG(dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_endpoint *ep, struct usb_request *req);
+
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff		*skb;
+	int			retval = -ENOMEM;
+	size_t			size;
+	unsigned long		flags;
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*max_pkt), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*max_pkt), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 */
+	size = (sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA);
+	size += usb_ept_get_max_packet(dev->out_ep) - 1;
+	size -= size % usb_ept_get_max_packet(dev->out_ep);
+	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+	if (skb  == NULL) {
+		DEBUG(dev, "no rx skb\n");
+		goto enomem;
+	}
+
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	req->buf = skb->data;
+	req->length = size;
+	req->complete = rx_complete;
+	req->context = skb;
+
+	retval = usb_ept_queue_xfer(dev->out_ep, req);
+	if (retval == -ENOMEM)
+enomem:
+		defer_kevent(dev, WORK_RX_MEMORY);
+	if (retval) {
+		DEBUG(dev, "rx submit --> %d\n", retval);
+		if (skb)
+			dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return retval;
+}
+
+static void rx_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = eth_device;
+	int		status = req->status;
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+		/* we know MaxPacketsPerTransfer == 1 here */
+		if (status < 0
+				|| ETH_HLEN > skb->len
+				|| skb->len > ETH_FRAME_LEN) {
+			dev->stats.rx_errors++;
+			dev->stats.rx_length_errors++;
+			DEBUG(dev, "rx length %d\n", skb->len);
+			break;
+		}
+
+		skb->protocol = eth_type_trans(skb, dev->net);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
+
+		/* no buffer copies needed, unless hardware can't
+		 * use skb buffers.
+		 */
+		status = netif_rx(skb);
+		skb = NULL;
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDEBUG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DEBUG(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		dev->stats.rx_errors++;
+		DEBUG(dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+	if (!netif_running(dev->net)) {
+clean:
+		spin_lock(&dev->req_lock);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock(&dev->req_lock);
+		req = NULL;
+	}
+	if (req)
+		rx_submit(dev, req, GFP_ATOMIC);
+}
+
+static int prealloc(struct list_head *list, struct usb_endpoint *ep,
+			unsigned n, gfp_t gfp_flags)
+{
+	unsigned		i;
+	struct usb_request	*req;
+
+	if (!n)
+		return -ENOMEM;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry(req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+	while (i--) {
+		/* CDC ECM uses skb buffer pointer for requests */
+		req = usb_ept_alloc_req(ep, 0);
+		if (!req)
+			return list_empty(list) ? -ENOMEM : 0;
+		list_add(&req->list, list);
+	}
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del(&req->list);
+		usb_ept_free_req(ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of(next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
+{
+	int status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	status = prealloc(&dev->tx_reqs, dev->in_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	status = prealloc(&dev->rx_reqs, dev->out_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	goto done;
+fail:
+	DEBUG(dev, "can't alloc requests\n");
+done:
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	return status;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		if (rx_submit(dev, req, gfp_flags) < 0) {
+			defer_kevent(dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+}
+
+static void eth_work(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
+
+	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running(dev->net))
+			rx_fill(dev, GFP_KERNEL);
+	}
+
+	if (dev->todo)
+		DEBUG(dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = eth_device;
+
+	switch (req->status) {
+	default:
+		dev->stats.tx_errors++;
+		VDEBUG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		dev->stats.tx_bytes += skb->len;
+	}
+	dev->stats.tx_packets++;
+
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->tx_reqs);
+	spin_unlock(&dev->req_lock);
+	dev_kfree_skb_any(skb);
+
+	atomic_dec(&dev->tx_qlen);
+	if (netif_carrier_ok(dev->net))
+		netif_wake_queue(dev->net);
+}
+
+static inline int eth_is_promisc(struct eth_dev *dev)
+{
+	return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			length = skb->len;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+
+	/* apply outgoing CDC filters */
+	if (!eth_is_promisc(dev)) {
+		u8		*dest = skb->data;
+
+		if (is_multicast_ether_addr(dest)) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (is_broadcast_ether_addr(dest))
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(dev->cdc_filter & type)) {
+				dev_kfree_skb_any(skb);
+				return 0;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	/*
+	 * this freelist can be empty if an interrupt triggered disconnect()
+	 * and reconfigured the function (shutting down this queue) after the
+	 * network stack decided to xmit but before we got the spinlock.
+	 */
+	if (list_empty(&dev->tx_reqs)) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		return 1;
+	}
+
+	req = container_of(dev->tx_reqs.next, struct usb_request, list);
+	list_del(&req->list);
+
+	/* temporarily stop TX queue when the freelist empties */
+	if (list_empty(&dev->tx_reqs))
+		netif_stop_queue(net);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 */
+	req->buf = skb->data;
+	req->context = skb;
+	req->complete = tx_complete;
+
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	if (!dev->zlp && (length % usb_ept_get_max_packet(dev->in_ep)) == 0)
+		length++;
+
+	req->length = length;
+
+	retval = usb_ept_queue_xfer(dev->in_ep, req);
+	switch (retval) {
+	default:
+		DEBUG(dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+		net->trans_start = jiffies;
+		atomic_inc(&dev->tx_qlen);
+	}
+	if (retval) {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (list_empty(&dev->tx_reqs))
+			netif_start_queue(net);
+		list_add(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return 0;
+}
+
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	DEBUG(dev, "%s\n", __func__);
+
+	/* fill the rx queue */
+	rx_fill(dev, gfp_flags);
+
+	/* and open the tx floodgates */
+	atomic_set(&dev->tx_qlen, 0);
+	netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	DEBUG(dev, "%s\n", __func__);
+	if (netif_carrier_ok(dev->net))
+		eth_start(dev, GFP_KERNEL);
+	return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	VDEBUG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	DEBUG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->stats.rx_packets, dev->stats.tx_packets,
+		dev->stats.rx_errors, dev->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	if (dev->configured) {
+		usb_ept_enable(dev->in_ep, 0);
+		usb_ept_enable(dev->out_ep, 0);
+		if (netif_carrier_ok(dev->net)) {
+			DEBUG(dev, "host still using in/out endpoints\n");
+			/* FIXME idiom may leave toggle wrong here */
+			usb_ept_enable(dev->in_ep, 1);
+			usb_ept_enable(dev->out_ep, 1);
+		}
+		if (dev->status_ep) {
+			usb_ept_enable(dev->status_ep, 0);
+			usb_ept_enable(dev->status_ep,  1);
+		}
+	}
+
+	return 0;
+}
+
+
+static u8 __devinit nibble(unsigned char c)
+{
+	if (likely(isdigit(c)))
+		return c - '0';
+	c = toupper(c);
+	if (likely(isxdigit(c)))
+		return 10 + c - 'A';
+	return 0;
+}
+
+static int __devinit get_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = nibble(*str++) << 4;
+			num |= (nibble(*str++));
+			dev_addr[i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	random_ether_addr(dev_addr);
+	return 1;
+}
+
+static void  eth_unbind(void *_ctxt)
+{
+	struct eth_dev   *dev = (struct eth_dev *)_ctxt ;
+
+	pr_debug("%s ()\n", __func__);
+	if (!dev)
+		return ;
+	if (!eth_bound)
+		return;
+
+	if (dev->in_ep) {
+		usb_ept_fifo_flush(dev->in_ep);
+		usb_ept_enable(dev->in_ep, 0);
+		usb_free_endpoint(dev->in_ep);
+	}
+	if (dev->out_ep) {
+		usb_ept_fifo_flush(dev->out_ep);
+		usb_ept_enable(dev->out_ep, 0);
+		usb_free_endpoint(dev->out_ep);
+	}
+	if (dev->status_ep) {
+		usb_ept_fifo_flush(dev->status_ep);
+		usb_ept_enable(dev->status_ep, 0);
+		usb_free_endpoint(dev->status_ep);
+	}
+
+
+	if (dev->net) {
+		unregister_netdev(dev->net);
+		free_netdev(dev->net);
+	}
+	eth_bound = 0;
+	return ;
+}
+
+static void  eth_bind(void *_ctxt)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	u8			zlp = 1;
+	struct usb_endpoint     *in_ep, *out_ep, *status_ep = NULL;
+	int			status = -ENOMEM;
+	int			ret;
+	struct device		*get_dev;
+
+	get_dev = usb_get_device();
+
+	ret = usb_msm_get_next_ifc_number(&usb_func_ether);
+	eth_control_intf.bInterfaceNumber = ret;
+	eth_control_intf.iInterface = string_control;
+	eth_IAD.bFirstInterface = ret;
+	eth_union_desc.bMasterInterface0 = ret;
+
+	ret = usb_msm_get_next_ifc_number(&usb_func_ether);
+	eth_data_alt_zero_intf.bInterfaceNumber = ret;
+	eth_data_alt_zero_intf.iInterface = 0;
+	eth_data_alt_one_intf.bInterfaceNumber = ret;
+	eth_data_alt_one_intf.iInterface = string_data;
+	eth_union_desc.bSlaveInterface0 = ret;
+
+	/* Enable IAD */
+	usb_msm_enable_iad();
+
+	/* Configuring STATUS endpoint */
+	status_ep = usb_alloc_endpoint(USB_DIR_IN);
+	status_ep->max_pkt = 64;
+
+	eth_control_intf_hs_int_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | status_ep->num;
+	eth_control_intf_hs_int_in_ep_desc.wMaxPacketSize =
+						status_ep->max_pkt;
+	eth_control_intf_fs_int_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | status_ep->num;
+	eth_control_intf_hs_int_in_ep_desc.bInterval = 4;
+
+	/* Configuring OUT endpoint */
+	out_ep = usb_alloc_endpoint(USB_DIR_OUT);
+	out_ep->max_pkt = 512;
+	eth_data_intf_hs_bulk_out_ep_desc.bEndpointAddress =
+						USB_DIR_OUT | out_ep->num;
+	eth_data_intf_hs_bulk_out_ep_desc.wMaxPacketSize = out_ep->max_pkt;
+	eth_data_intf_fs_bulk_out_ep_desc.bEndpointAddress =
+						USB_DIR_OUT | out_ep->num;
+
+	/*Configuring IN Endpoint*/
+	in_ep = usb_alloc_endpoint(USB_DIR_IN);
+	in_ep->max_pkt = 512;
+	eth_data_intf_hs_bulk_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | in_ep->num;
+	eth_data_intf_hs_bulk_in_ep_desc.wMaxPacketSize = in_ep->max_pkt;
+	eth_data_intf_fs_bulk_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | in_ep->num;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net) {
+		printk(KERN_DEBUG "eth_bind: alloc_etherdev failed \n");
+		return ;
+	}
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+
+	/* network device setup */
+	dev->net = net;
+	strcpy(net->name, "usb%d");
+	dev->zlp = zlp;
+	dev->in_ep = in_ep;
+	dev->out_ep = out_ep;
+	dev->status_ep = status_ep;
+
+	eth_device = dev;
+	usb_func_ether.context = eth_device;
+
+	/* Module params for these addresses should come from ID proms.
+	 * The host side address is used with CDC, and commonly
+	 * ends up in a persistent config database.  It's not clear if
+	 * host side code for the SAFE thing cares -- its original BLAN
+	 * thing didn't, Sharp never assigned those addresses on Zaurii.
+	 */
+	if (get_ether_addr(dev_addr, net->dev_addr))
+		dev_warn(get_dev,
+			"using random %s ethernet address\n", "self");
+	if (get_ether_addr(host_addr, dev->host_mac))
+		dev_warn(get_dev,
+			"using random %s ethernet address\n", "host");
+	snprintf(ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
+		dev->host_mac[0], dev->host_mac[1],
+		dev->host_mac[2], dev->host_mac[3],
+		dev->host_mac[4], dev->host_mac[5]);
+
+	net->change_mtu = usb_eth_change_mtu;
+	net->get_stats = eth_get_stats;
+	net->hard_start_xmit = eth_start_xmit;
+	net->open = eth_open;
+	net->stop = eth_stop;
+	/* watchdog_timeo, tx_timeout ...
+	 * set_multicast_list */
+	SET_ETHTOOL_OPS(net, &ops);
+	/* ... and maybe likewise for status transfer */
+	if (dev->status_ep) {
+		dev->stat_req = usb_ept_alloc_req(dev->status_ep,
+					STATUS_BYTECOUNT);
+		if (!dev->stat_req) {
+			usb_ept_free_req(dev->status_ep, dev->req);
+			goto fail;
+		}
+		dev->stat_req->context = NULL;
+	}
+	/* finish hookup to lower layer ... */
+	/* two kinds of host-initiated state changes:
+	 *  - iff DATA transfer is active, carrier is "on"
+	 *  - tx queueing enabled if open *and* carrier is "on"
+	 */
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	SET_NETDEV_DEV(dev->net, get_dev);
+	status = register_netdev(dev->net);
+	if (status < 0)
+		goto fail1;
+
+	INFO(dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
+	INFO(dev, "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		net->dev_addr[0], net->dev_addr[1],
+		net->dev_addr[2], net->dev_addr[3],
+		net->dev_addr[4], net->dev_addr[5]);
+
+	INFO(dev, "HOST MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		dev->host_mac[0], dev->host_mac[1],
+		dev->host_mac[2], dev->host_mac[3],
+		dev->host_mac[4], dev->host_mac[5]);
+
+	string_data = usb_msm_get_next_strdesc_id("Ethernet Data");
+	if (string_data != 0) {
+		string_control = usb_msm_get_next_strdesc_id
+				 ("CDC Communications Control");
+		if (string_control != 0) {
+			string_ethaddr = usb_msm_get_next_strdesc_id(ethaddr);
+			if (string_ethaddr != 0) {
+				eth_ether_desc.iMACAddress = string_ethaddr;
+				eth_bound = 1;
+				return ;
+			}
+		}
+	}
+fail1:
+	dev_dbg(get_dev, "register_netdev failed, %d\n", status);
+fail:
+	eth_bound = 1;
+	printk(KERN_INFO"eth_bind: returning from eth_bind\n");
+	return ;
+}
+
+
+static struct usb_function usb_func_ether = {
+	.name		= "ethernet",
+	.bind		= eth_bind,
+	.unbind		= eth_unbind,
+	.configure	= eth_configure,
+	.disconnect	= eth_disconnect,
+	.setup		= eth_setup,
+	.set_interface	= eth_set_interface,
+	.get_interface	= eth_get_interface,
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+
+#define TOTAL_ETH_DESCRIPTORS 11
+struct usb_descriptor_header *eth_hs_descriptors[TOTAL_ETH_DESCRIPTORS];
+struct usb_descriptor_header *eth_fs_descriptors[TOTAL_ETH_DESCRIPTORS];
+
+static int __init init(void)
+{
+	int rc;
+
+	eth_hs_descriptors[0] = (struct usb_descriptor_header *)
+				&eth_IAD;
+	eth_hs_descriptors[1] = (struct usb_descriptor_header *)
+				&eth_control_intf;
+	eth_hs_descriptors[2] = (struct usb_descriptor_header *)
+				&eth_header_desc;
+	eth_hs_descriptors[3] = (struct usb_descriptor_header *)
+				&eth_union_desc;
+	eth_hs_descriptors[4] = (struct usb_descriptor_header *)
+				&eth_ether_desc;
+	eth_hs_descriptors[5] = (struct usb_descriptor_header *)
+				&eth_control_intf_hs_int_in_ep_desc;
+	eth_hs_descriptors[6] = (struct usb_descriptor_header *)
+				&eth_data_alt_zero_intf;
+	eth_hs_descriptors[7] = (struct usb_descriptor_header *)
+				&eth_data_alt_one_intf;
+	eth_hs_descriptors[8] = (struct usb_descriptor_header *)
+				&eth_data_intf_hs_bulk_out_ep_desc;
+	eth_hs_descriptors[9] = (struct usb_descriptor_header *)
+				&eth_data_intf_hs_bulk_in_ep_desc;
+	eth_hs_descriptors[10] = NULL;
+
+	eth_fs_descriptors[0] = (struct usb_descriptor_header *)&eth_IAD;
+	eth_fs_descriptors[1] = (struct usb_descriptor_header *)
+				&eth_control_intf;
+	eth_fs_descriptors[2] = (struct usb_descriptor_header *)
+				&eth_header_desc;
+	eth_fs_descriptors[3] = (struct usb_descriptor_header *)&eth_union_desc;
+	eth_fs_descriptors[4] = (struct usb_descriptor_header *)&eth_ether_desc;
+	eth_fs_descriptors[5] = (struct usb_descriptor_header *)
+				&eth_control_intf_fs_int_in_ep_desc;
+	eth_fs_descriptors[6] = (struct usb_descriptor_header *)
+				&eth_data_alt_zero_intf;
+	eth_fs_descriptors[7] = (struct usb_descriptor_header *)
+				&eth_data_alt_one_intf;
+	eth_fs_descriptors[8] = (struct usb_descriptor_header *)
+				&eth_data_intf_fs_bulk_out_ep_desc;
+	eth_fs_descriptors[9] = (struct usb_descriptor_header *)
+				&eth_data_intf_fs_bulk_in_ep_desc;
+	eth_fs_descriptors[10] = NULL;
+
+	usb_func_ether.hs_descriptors = eth_hs_descriptors;
+	usb_func_ether.fs_descriptors = eth_fs_descriptors;
+	rc = usb_function_register(&usb_func_ether);
+
+	if (rc < 0)
+		printk(KERN_INFO "cdcecm init:usb function register failed \n");
+	return rc;
+}
+module_init(init);
+
+static void __exit eth_cleanup(void)
+{
+	struct eth_dev          *dev = eth_device;
+
+	usb_function_unregister(&usb_func_ether);
+	if (dev) {
+		dev->net = NULL;
+		dev = NULL;
+	}
+}
+module_exit(eth_cleanup);
diff --git a/drivers/usb/function/loopback.c b/drivers/usb/function/loopback.c
new file mode 100644
index 0000000..d7c93a3
--- /dev/null
+++ b/drivers/usb/function/loopback.c
@@ -0,0 +1,128 @@
+/* drivers/usb/function/loopback.c
+ *
+ * Simple Loopback Function Device
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct loopback_context
+{
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+	struct usb_request *req_out;
+	struct usb_request *req_in;
+};
+
+static struct loopback_context _context;
+
+static void loopback_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct loopback_context *ctxt = _ctxt;
+
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	printk(KERN_INFO "loopback_bind() %p, %p\n", ctxt->out, ctxt->in);
+
+	ctxt->req_out = usb_ept_alloc_req(ctxt->out, 4096);
+	ctxt->req_in = usb_ept_alloc_req(ctxt->in, 4096);
+}
+
+static void loopback_queue_in(struct loopback_context *ctxt, void *data, unsigned len);
+static void loopback_queue_out(struct loopback_context *ctxt);
+
+static void loopback_in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct loopback_context *ctxt = req->context;
+	printk(KERN_INFO "loopback_out_complete (%d)\n", req->actual);
+	loopback_queue_out(ctxt);
+}
+
+static void loopback_out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct loopback_context *ctxt = req->context;
+	printk(KERN_INFO "loopback_in_complete (%d)\n", req->actual);
+
+	if (req->status == 0) {
+		loopback_queue_in(ctxt, req->buf, req->actual);
+	} else {
+		loopback_queue_out(ctxt);
+	}
+}
+
+static void loopback_queue_out(struct loopback_context *ctxt)
+{
+	struct usb_request *req = ctxt->req_out;
+
+	req->complete = loopback_out_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->out, req);
+}
+
+static void loopback_queue_in(struct loopback_context *ctxt, void *data, unsigned len)
+{
+	struct usb_request *req = ctxt->req_in;
+
+	memcpy(req->buf, data, len);
+	req->complete = loopback_in_complete;
+	req->context = ctxt;
+	req->length = len;
+
+	usb_ept_queue_xfer(ctxt->in, req);
+}
+
+static void loopback_configure(int configured, void *_ctxt)
+{
+	struct loopback_context *ctxt = _ctxt;
+	printk(KERN_INFO "loopback_configure() %d\n", configured);
+
+	if (configured) {
+		loopback_queue_out(ctxt);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_loopback = {
+	.bind = loopback_bind,
+	.configure = loopback_configure,
+
+	.name = "loopback",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xff,
+	.ifc_protocol = 0xff,
+
+	.ifc_name = "loopback",
+
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int __init loopback_init(void)
+{
+	printk(KERN_INFO "loopback_init()\n");
+	return usb_function_register(&usb_func_loopback);
+}
+
+module_init(loopback_init);
diff --git a/drivers/usb/function/mass_storage.c b/drivers/usb/function/mass_storage.c
new file mode 100644
index 0000000..f679cd0
--- /dev/null
+++ b/drivers/usb/function/mass_storage.c
@@ -0,0 +1,3009 @@
+/* drivers/usb/function/mass_storage.c
+ *
+ * Function Driver for USB Mass Storage
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * Based heavily on the file_storage gadget driver in
+ * drivers/usb/gadget/file_storage.c and licensed under the same terms:
+ *
+ * Copyright (C) 2003-2007 Alan Stern
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define DEBUG
+//#define VERBOSE_DEBUG
+//#define DUMP_MSGS
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/switch.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/mass_storage_function.h>
+#include <linux/usb_usual.h>
+#include <linux/platform_device.h>
+#include <linux/wakelock.h>
+
+#include "usb_function.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_NAME		"usb_mass_storage"
+#define MAX_LUNS		8
+
+#ifdef DEBUG
+#define LDBG(lun, fmt, args...) \
+	dev_dbg(&(lun)->dev , fmt , ## args)
+#define MDBG(fmt,args...) \
+	printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
+#else
+#define LDBG(lun, fmt, args...) \
+	do { } while (0)
+#define MDBG(fmt,args...) \
+	do { } while (0)
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG	LDBG
+#else
+#define VLDBG(lun, fmt, args...) \
+	do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LERROR(lun, fmt, args...) \
+	dev_err(&(lun)->dev , fmt , ## args)
+#define LWARN(lun, fmt, args...) \
+	dev_warn(&(lun)->dev , fmt , ## args)
+#define LINFO(lun, fmt, args...) \
+	dev_info(&(lun)->dev , fmt , ## args)
+
+#define MINFO(fmt,args...) \
+	printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
+
+#define DBG(d, fmt, args...) \
+	dev_dbg(&(d)->pdev->dev , fmt , ## args)
+#define VDBG(d, fmt, args...) \
+	dev_vdbg(&(d)->pdev->dev , fmt , ## args)
+#define ERROR(d, fmt, args...) \
+	dev_err(&(d)->pdev->dev , fmt , ## args)
+#define MS_WARN(d, fmt, args...) \
+	dev_warn(&(d)->pdev->dev , fmt , ## args)
+#define INFO(d, fmt, args...) \
+	dev_info(&(d)->pdev->dev , fmt , ## args)
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk-only data structures */
+
+/* Command Block Wrapper */
+struct bulk_cb_wrap {
+	__le32	Signature;		/* Contains 'USBC' */
+	u32	Tag;			/* Unique per command id */
+	__le32	DataTransferLength;	/* Size of the data */
+	u8	Flags;			/* Direction in bit 7 */
+	u8	Lun;			/* LUN (normally 0) */
+	u8	Length;			/* Of the CDB, <= MAX_COMMAND_SIZE */
+	u8	CDB[16];		/* Command Data Block */
+};
+
+#define USB_BULK_CB_WRAP_LEN	31
+#define USB_BULK_CB_SIG		0x43425355	/* Spells out USBC */
+#define USB_BULK_IN_FLAG	0x80
+
+/* Command Status Wrapper */
+struct bulk_cs_wrap {
+	__le32	Signature;		/* Should = 'USBS' */
+	u32	Tag;			/* Same as original command */
+	__le32	Residue;		/* Amount not transferred */
+	u8	Status;			/* See below */
+};
+
+#define USB_BULK_CS_WRAP_LEN	13
+#define USB_BULK_CS_SIG		0x53425355	/* Spells out 'USBS' */
+#define USB_STATUS_PASS		0
+#define USB_STATUS_FAIL		1
+#define USB_STATUS_PHASE_ERROR	2
+
+/* Bulk-only class specific requests */
+#define USB_BULK_RESET_REQUEST		0xff
+#define USB_BULK_GET_MAX_LUN_REQUEST	0xfe
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE	16
+
+/* SCSI commands that we recognize */
+#define SC_FORMAT_UNIT			0x04
+#define SC_INQUIRY			0x12
+#define SC_MODE_SELECT_6		0x15
+#define SC_MODE_SELECT_10		0x55
+#define SC_MODE_SENSE_6			0x1a
+#define SC_MODE_SENSE_10		0x5a
+#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL	0x1e
+#define SC_READ_6			0x08
+#define SC_READ_10			0x28
+#define SC_READ_12			0xa8
+#define SC_READ_CAPACITY		0x25
+#define SC_READ_FORMAT_CAPACITIES	0x23
+#define SC_RELEASE			0x17
+#define SC_REQUEST_SENSE		0x03
+#define SC_RESERVE			0x16
+#define SC_SEND_DIAGNOSTIC		0x1d
+#define SC_START_STOP_UNIT		0x1b
+#define SC_SYNCHRONIZE_CACHE		0x35
+#define SC_TEST_UNIT_READY		0x00
+#define SC_VERIFY			0x2f
+#define SC_WRITE_6			0x0a
+#define SC_WRITE_10			0x2a
+#define SC_WRITE_12			0xaa
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	/* Sense Key byte, etc. */
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+struct lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	ro : 1;
+	unsigned int	prevent_medium_removal : 1;
+	unsigned int	registered : 1;
+	unsigned int	info_valid : 1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	struct device	dev;
+};
+
+#define backing_file_is_open(curlun)	((curlun)->filp != NULL)
+
+
+static struct lun *dev_to_lun(struct device *dev)
+{
+	return container_of(dev, struct lun, dev);
+}
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
+
+/* Number of buffers for CBW, DATA and CSW */
+#ifdef CONFIG_USB_CSW_HACK
+#define NUM_BUFFERS	4
+#else
+#define NUM_BUFFERS	2
+#endif
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+	void				*buf;
+	enum fsg_buffer_state		state;
+	struct fsg_buffhd		*next;
+
+	/* The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here. */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	int				inreq_busy;
+	struct usb_request		*outreq;
+	int				outreq_busy;
+};
+
+enum fsg_state {
+	/* This one isn't used anywhere */
+	FSG_STATE_COMMAND_PHASE = -10,
+
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+int can_stall = 1;
+
+struct fsg_dev {
+	/* lock protects: state and all the req_busy's */
+	spinlock_t		lock;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* reference counting: wait until all LUNs are released */
+	struct kref		ref;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		/* For exception handling */
+
+	u8			config, new_config;
+
+	unsigned int		running : 1;
+	unsigned int		phase_error : 1;
+	unsigned int		short_packet_received : 1;
+	unsigned int		bad_lun_okay : 1;
+
+	unsigned long		atomic_bitflags;
+#define REGISTERED		0
+#define CLEAR_BULK_HALTS	1
+#define SUSPENDED		2
+
+	struct usb_endpoint		*bulk_in;
+	struct usb_endpoint		*bulk_out;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	buffhds[NUM_BUFFERS];
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	unsigned int		lun;
+	u32			residue;
+	u32			usb_amount_left;
+
+	unsigned int		nluns;
+	struct lun		*luns;
+	struct lun		*curlun;
+
+	u32				buf_size;
+	const char		*vendor;
+	const char		*product;
+	int				release;
+
+	struct platform_device *pdev;
+	struct switch_dev sdev;
+	int	bound;
+	struct wake_lock wake_lock, wake_lock_idle;
+};
+static int send_status(struct fsg_dev *fsg);
+
+static int exception_in_progress(struct fsg_dev *fsg)
+{
+	return (fsg->state > FSG_STATE_IDLE);
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_dev *fsg,
+		struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % fsg->bulk_out_maxpacket;
+	if (rem > 0)
+		length += fsg->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+static struct fsg_dev			*the_fsg;
+
+static void	close_backing_file(struct fsg_dev *fsg, struct lun *curlun);
+static void	close_all_backing_files(struct fsg_dev *fsg);
+
+
+static struct usb_function		fsg_function;
+/*-------------------------------------------------------------------------*/
+
+#ifdef DUMP_MSGS
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{
+	if (length < 512) {
+		DBG(fsg, "%s, length %u:\n", label, length);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+				16, 1, buf, length, 0);
+	}
+}
+
+static void dump_cdb(struct fsg_dev *fsg)
+{}
+
+#else
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{}
+
+#ifdef VERBOSE_DEBUG
+
+static void dump_cdb(struct fsg_dev *fsg)
+{
+	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
+			16, 1, fsg->cmnd, fsg->cmnd_size, 0);
+}
+
+#else
+
+static void dump_cdb(struct fsg_dev *fsg)
+{}
+
+#endif /* VERBOSE_DEBUG */
+#endif /* DUMP_MSGS */
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_endpoint *ep)
+{
+	const char  *name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		return -1;
+
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ept_set_halt(ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Routines for unaligned data access */
+
+static u16 get_be16(u8 *buf)
+{
+	return ((u16) buf[0] << 8) | ((u16) buf[1]);
+}
+
+static u32 get_be32(u8 *buf)
+{
+	return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
+			((u32) buf[2] << 8) | ((u32) buf[3]);
+}
+
+static void put_be16(u8 *buf, u16 val)
+{
+	buf[0] = val >> 8;
+	buf[1] = val;
+}
+
+static void put_be32(u8 *buf, u32 val)
+{
+	buf[0] = val >> 24;
+	buf[1] = val >> 16;
+	buf[2] = val >> 8;
+	buf[3] = val & 0xff;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* There is only one interface. */
+#define USB_SC_SCSI     0x06            /* Transparent SCSI */
+#define USB_PR_BULK     0x50            /* Bulk-only */
+static struct usb_interface_descriptor
+intf_desc = {
+	.bLength 		= sizeof intf_desc,
+	.bDescriptorType 	= USB_DT_INTERFACE,
+	.bNumEndpoints 		= 2,
+	.bInterfaceClass 	= USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass 	= USB_SC_SCSI,
+	.bInterfaceProtocol 	= USB_PR_BULK,
+};
+
+
+static struct usb_endpoint_descriptor
+hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+
+static struct usb_descriptor_header *hs_function[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+static struct usb_descriptor_header *fs_function[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+};
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_dev *fsg)
+{
+	/* Tell the main thread that something has happened */
+	fsg->thread_wakeup_needed = 1;
+	if (fsg->thread_task)
+		wake_up_process(fsg->thread_task);
+}
+
+
+static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	DBG(fsg, "raise_exception %d\n", (int)new_state);
+	/* Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal. */
+	spin_lock_irqsave(&fsg->lock, flags);
+	if (fsg->state <= new_state) {
+		fsg->state = new_state;
+		if (fsg->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+					fsg->thread_task);
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	struct fsg_buffhd	*bh = req->context;
+	unsigned long		flags;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+
+	/* Hold the lock while we update the request and buffer states */
+	if (req->status == 0) {
+		smp_wmb();
+		spin_lock_irqsave(&fsg->lock, flags);
+		bh->inreq_busy = 0;
+		bh->state = BUF_STATE_EMPTY;
+		wakeup_thread(fsg);
+		spin_unlock_irqrestore(&fsg->lock, flags);
+	} else
+		bh->inreq_busy = 0;
+}
+
+static void bulk_out_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	struct fsg_buffhd	*bh = req->context;
+	unsigned long		flags;
+
+	dump_msg(fsg, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual,
+				bh->bulk_out_intended_length);
+
+	/* Hold the lock while we update the request and buffer states */
+	if (req->status == 0) {
+		smp_wmb();
+		spin_lock_irqsave(&fsg->lock, flags);
+		bh->outreq_busy = 0;
+		bh->state = BUF_STATE_FULL;
+		wakeup_thread(fsg);
+		spin_unlock_irqrestore(&fsg->lock, flags);
+	} else
+		bh->outreq_busy = 0;
+}
+
+static int fsg_setup(struct usb_ctrlrequest *ctrl, void *buf,
+			int len, void *context)
+{
+	struct fsg_dev		*fsg = context;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg->config)
+		return value;
+
+	if (w_index != intf_desc.bInterfaceNumber)
+		return value;
+
+	/* Handle Bulk-only class-specific requests */
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		switch (ctrl->bRequest) {
+		case USB_BULK_RESET_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+
+			/* Raise an exception to stop the current operation
+			 * and reinitialize our state. */
+			DBG(fsg, "bulk reset request\n");
+			value = 0;
+			break;
+
+		case USB_BULK_GET_MAX_LUN_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_IN |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+			VDBG(fsg, "get max LUN\n");
+			*(u8 *) buf = fsg->nluns - 1;
+			value = 1;
+			break;
+		}
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(fsg,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return value;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_endpoint *ep,
+		struct usb_request *req, int *pbusy,
+		enum fsg_buffer_state *state)
+{
+	int	rc;
+	unsigned long		flags;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+	spin_lock_irqsave(&fsg->lock, flags);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irqrestore(&fsg->lock, flags);
+	rc = usb_ept_queue_xfer(ep, req);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/* Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled. */
+		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+						req->length == 0))
+			MS_WARN(fsg, "error in submission: %s --> %d\n",
+				(ep == fsg->bulk_in ? "bulk-in" : "bulk-out"),
+				rc);
+	}
+}
+
+
+static int sleep_thread(struct fsg_dev *fsg)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (fsg->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	fsg->thread_wakeup_needed = 0;
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_READ_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Carry out the file reads */
+	amount_left = fsg->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		/* No default reply */
+
+	for (;;) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 * Finally, if we're not at a page boundary, don't read past
+		 *	the next page.
+		 * If this means reading 0 then we were asked to read past
+		 *	the end of file. */
+		amount = min((unsigned int) amount_left,
+				(unsigned int)fsg->buf_size);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
+		if (partial_page > 0)
+			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
+					partial_page);
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+
+		/* If we were asked to read past the end of file,
+		 * end with an empty buffer. */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a block */
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		fsg->residue -= nread;
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		/* No more left to read */
+
+		/* Send this buffer and go read some more */
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		fsg->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nwritten;
+	int			rc;
+
+#ifdef CONFIG_USB_CSW_HACK
+	int			csw_hack_sent = 0;
+	int			i;
+#endif
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_WRITE_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (fsg->cmnd[1] & 0x08)	/* FUA */
+			curlun->filp->f_flags |= O_SYNC;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << 9;
+	amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/* Figure out how much we want to get:
+			 * Try to get the remaining amount.
+			 * But don't get more than the buffer size.
+			 * And don't try to go past the end of the file.
+			 * If we're not at a page boundary,
+			 *	don't go past the next page.
+			 * If this means getting 0, then we were asked
+			 *	to write past the end of file.
+			 * Finally, round down to a block boundary. */
+			amount = min(amount_left_to_req, (u32)fsg->buf_size);
+			amount = min((loff_t) amount, curlun->file_length -
+					usb_offset);
+			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
+			if (partial_page > 0)
+				amount = min(amount,
+	(unsigned int) PAGE_CACHE_SIZE - partial_page);
+
+			if (amount == 0) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info = usb_offset >> 9;
+				curlun->info_valid = 1;
+				continue;
+			}
+			amount -= (amount & 511);
+			if (amount == 0) {
+
+				/* Why were we were asked to transfer a
+				 * partial block? */
+				get_some_more = 0;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			fsg->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = fsg->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			/* We stopped early */
+#ifdef CONFIG_USB_CSW_HACK
+		/*
+		 * If the csw packet is already submmitted to the DCD,
+		 * by marking the state of buffer as full, then by checking
+		 * the residue, we make sure that this csw packet is not
+		 * written on to the storage media.
+		 */
+		if (bh->state == BUF_STATE_FULL && fsg->residue) {
+#else
+		if (bh->state == BUF_STATE_FULL) {
+#endif
+			smp_rmb();
+			fsg->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+	"write %u @ %llu beyond end %llu\n",
+	amount, (unsigned long long) file_offset,
+	(unsigned long long) curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					(char __user *) bh->buf,
+					amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+					(unsigned long long) file_offset,
+					(int) nwritten);
+			if (signal_pending(current))
+				return -EINTR;		/* Interrupted! */
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+						(int) nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+						(int) nwritten, amount);
+				nwritten -= (nwritten & 511);
+						/* Round down to a block */
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			fsg->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+#ifdef CONFIG_USB_CSW_HACK
+				/*
+				 * If csw is already sent & write failure
+				 * occured, then detach the storage media
+				 * from the corresponding lun, and cable must
+				 * be disconnected to recover fom this error.
+				 */
+				if (csw_hack_sent) {
+					if (backing_file_is_open(curlun)) {
+						close_backing_file(fsg, curlun);
+						curlun->unit_attention_data =
+							SS_MEDIUM_NOT_PRESENT;
+					}
+					break;
+				}
+#endif
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+#ifdef CONFIG_USB_CSW_HACK
+			if ((nwritten == amount) && !csw_hack_sent) {
+				/*
+				 * Check if any of the buffer is in the
+				 * busy state, if any buffer is in busy state,
+				 * means the complete data is not received
+				 * yet from the host. So there is no point in
+				 * csw right away without the complete data.
+				 */
+				for (i = 0; i < NUM_BUFFERS; i++) {
+					if (fsg->buffhds[i].state ==
+							BUF_STATE_BUSY)
+						break;
+				}
+				/* Check whether we received the complete
+				 * data from the host, before sending csw */
+				if (!amount_left_to_req && i == NUM_BUFFERS) {
+					csw_hack_sent = 1;
+					send_status(fsg);
+				}
+			}
+#endif
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual != bh->outreq->length) {
+				fsg->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Sync the file data, don't bother with the metadata.
+ * The caller must own fsg->filesem.
+ * This code was copied from fs/buffer.c:sys_fdatasync(). */
+static int fsync_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode;
+	int		rc, err;
+
+	if (curlun->ro || !filp)
+		return 0;
+	if (!filp->f_op->fsync)
+		return -EINVAL;
+
+	inode = filp->f_path.dentry->d_inode;
+	mutex_lock(&inode->i_mutex);
+	rc = filemap_fdatawrite(inode->i_mapping);
+	err = filp->f_op->fsync(filp, filp->f_path.dentry, 1);
+	if (!rc)
+		rc = err;
+	err = filemap_fdatawait(inode->i_mapping);
+	if (!rc)
+		rc = err;
+	mutex_unlock(&inode->i_mutex);
+	VLDBG(curlun, "fdatasync -> %d\n", rc);
+	return rc;
+}
+
+static void fsync_all(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		fsync_sub(&fsg->luns[i]);
+}
+
+static int do_synchronize_cache(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	lba = get_be32(&fsg->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it. */
+	if ((fsg->cmnd[1] & ~0x10) != 0) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_be16(&fsg->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		/* No default reply */
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << 9;
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 * If this means reading 0 then we were asked to read
+		 * past the end of file. */
+		amount = min((unsigned int) amount_left,
+				(unsigned int)fsg->buf_size);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a sector */
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	u8	*buf = (u8 *) bh->buf;
+
+	if (!fsg->curlun) {		/* Unsupported LUNs are okay */
+		fsg->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		/* Unsupported, no device-type */
+		return 36;
+	}
+
+	memset(buf, 0, 8);	/* Non-removable, direct-access device */
+
+	buf[1] = 0x80;	/* set removable bit */
+	buf[2] = 2;		/* ANSI SCSI level 2 */
+	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
+	buf[4] = 31;		/* Additional length */
+				/* No special options */
+	sprintf(buf + 8, "%-8s%-16s%04x", fsg->vendor,
+			fsg->product, fsg->release);
+	return 36;
+}
+
+
+static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		fsg->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			/* Valid, current error */
+	buf[2] = SK(sd);
+	put_be32(&buf[3], sdinfo);		/* Sense information */
+	buf[7] = 18 - 8;			/* Additional sense length */
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+
+static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u32		lba = get_be32(&fsg->cmnd[2]);
+	int		pmi = fsg->cmnd[8];
+	u8		*buf = (u8 *) bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_be32(&buf[0], curlun->num_sectors - 1);	/* Max logical block */
+	put_be32(&buf[4], 512);				/* Block length */
+	return 8;
+}
+
+
+static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		mscmnd = fsg->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((fsg->cmnd[1] & ~0x08) != 0) {		/* Mask away DBD */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = fsg->cmnd[2] >> 6;
+	page_code = fsg->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/* Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later. */
+	memset(buf, 0, 8);
+	if (mscmnd == SC_MODE_SENSE_6) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 4;
+		limit = 255;
+	} else {			/* SC_MODE_SENSE_10 */
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 8;
+		limit = 65535;
+	}
+
+	/* No block descriptors */
+
+	/* Disabled to workaround USB reset problems with a Vista host.
+	 */
+#if 0
+	/* The mode pages, in numerical order.  The only page we support
+	 * is the Caching page. */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		/* Page code */
+		buf[1] = 10;		/* Page length */
+		memset(buf+2, 0, 10);	/* None of the fields are changeable */
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	/* Write cache enable, */
+					/* Read cache not disabled */
+					/* No cache retention priorities */
+			put_be16(&buf[4], 0xffff);  /* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_be16(&buf[8], 0xffff);  /* Maximum prefetch */
+			/* Maximum prefetch ceiling */
+			put_be16(&buf[10], 0xffff);
+		}
+		buf += 12;
+	}
+#else
+	valid_page = 1;
+#endif
+
+	/* Check that a valid page was requested and the mode data length
+	 * isn't too long. */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == SC_MODE_SENSE_6)
+		buf0[0] = len - 1;
+	else
+		put_be16(buf0, len - 2);
+	return len;
+}
+
+static int do_start_stop(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		loej, start;
+
+	/* int immed = fsg->cmnd[1] & 0x01; */
+	loej = fsg->cmnd[4] & 0x02;
+	start = fsg->cmnd[4] & 0x01;
+
+	if (loej) {
+		/* eject request from the host */
+		if (backing_file_is_open(curlun)) {
+			close_backing_file(fsg, curlun);
+			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+		}
+	}
+
+	return 0;
+}
+
+static int do_prevent_allow(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		prevent;
+
+	prevent = fsg->cmnd[4] & 0x01;
+	if ((fsg->cmnd[4] & ~0x01) != 0) {		/* Mask away Prevent */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_dev *fsg,
+			struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
+	buf += 4;
+
+	put_be32(&buf[0], curlun->num_sectors);	/* Number of blocks */
+	put_be32(&buf[4], 512);				/* Block length */
+	buf[4] = 0x02;					/* Current capacity */
+	return 12;
+}
+
+
+static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+
+	/* We don't support MODE SELECT */
+	curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int     rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		DBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			DBG(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ept_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+/*-------------------------------------------------------------------------*/
+#if 0
+static int write_zero(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+
+	DBG(fsg, "write_zero\n");
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	bh->inreq->length = 0;
+	start_transfer(fsg, fsg->bulk_in, bh->inreq,
+			&bh->inreq_busy, &bh->state);
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+#endif
+
+static int throw_away_data(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	DBG(fsg, "throw_away_data\n");
+	while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
+			fsg->usb_amount_left > 0) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			fsg->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual != bh->outreq->length ||
+					bh->outreq->status != 0) {
+				raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
+			amount = min(fsg->usb_amount_left, (u32) fsg->buf_size);
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			fsg->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+
+static int finish_reply(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	int			rc = 0;
+	int			i;
+
+	switch (fsg->data_dir) {
+	case DATA_DIR_NONE:
+		break;			/* Nothing to send */
+
+	case DATA_DIR_UNKNOWN:
+		rc = -EINVAL;
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (fsg->data_size == 0)
+			;		/* Nothing to send */
+
+		/* If there's no residue, simply send the last buffer */
+		else if (fsg->residue == 0) {
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+		} else {
+			if (can_stall) {
+				bh->state = BUF_STATE_EMPTY;
+				for (i = 0; i < NUM_BUFFERS; ++i) {
+					struct fsg_buffhd
+							*bh = &fsg->buffhds[i];
+					while (bh->state != BUF_STATE_EMPTY) {
+						rc = sleep_thread(fsg);
+						if (rc)
+							return rc;
+					}
+				}
+				rc = halt_bulk_in_endpoint(fsg);
+			} else {
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			}
+#if 0
+	/* this is unnecessary, and was causing problems with MacOS */
+			if (length > 0)
+				write_zero(fsg);
+#endif
+		}
+		break;
+
+	/* We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests. */
+	case DATA_DIR_FROM_HOST:
+		if (fsg->residue == 0)
+			;		/* Nothing to receive */
+
+		/* Did the host stop sending unexpectedly early? */
+		else if (fsg->short_packet_received) {
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+
+		/* We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on. */
+#if 0
+		fsg_set_halt(fsg, fsg->bulk_out);
+		raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+		rc = -EINTR;
+#endif
+
+		/* We can't stall.  Read in the excess data and throw it
+		 * all away. */
+		else
+			rc = throw_away_data(fsg);
+		break;
+	}
+	return rc;
+}
+
+
+static int send_status(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u8			status = USB_STATUS_PASS;
+	u32			sd, sdinfo = 0;
+	struct bulk_cs_wrap	*csw;
+
+	DBG(fsg, "send_status\n");
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (fsg->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (fsg->phase_error) {
+		DBG(fsg, "sending phase-error status\n");
+		status = USB_STATUS_PHASE_ERROR;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(fsg, "sending command-failure status\n");
+		status = USB_STATUS_FAIL;
+		VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	csw = bh->buf;
+
+	/* Store and send the Bulk-only CSW */
+	csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
+	csw->Tag = fsg->tag;
+#ifdef CONFIG_USB_CSW_HACK
+	/* Since csw is being sent early, before
+	 * writing on to storage media, need to set
+	 * residue to zero,assuming that write will succeed.
+	 */
+	csw->Residue = 0;
+#else
+	csw->Residue = cpu_to_le32(fsg->residue);
+#endif
+	csw->Status = status;
+
+	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
+	start_transfer(fsg, fsg->bulk_in, bh->inreq,
+			&bh->inreq_busy, &bh->state);
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_dev *fsg, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = fsg->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct lun		*curlun;
+
+	hdlen[0] = 0;
+	if (fsg->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
+				fsg->data_size);
+	VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+			name, cmnd_size, dirletter[(int) data_dir],
+			fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
+
+	/* We can't reply at all until we know the correct data direction
+	 * and size. */
+	if (fsg->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (fsg->data_dir == DATA_DIR_UNKNOWN) {	/* CB or CBI */
+		fsg->data_dir = data_dir;
+		fsg->data_size = fsg->data_size_from_cmnd;
+
+	} else {					/* Bulk-only */
+		if (fsg->data_size < fsg->data_size_from_cmnd) {
+
+			/* Host data size < Device data size is a phase error.
+			 * Carry out the command, but only transfer as much
+			 * as we are allowed. */
+			DBG(fsg, "phase error 1\n");
+			fsg->data_size_from_cmnd = fsg->data_size;
+			fsg->phase_error = 1;
+		}
+	}
+	fsg->residue = fsg->usb_amount_left = fsg->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
+		fsg->phase_error = 1;
+		DBG(fsg, "phase error 2\n");
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != fsg->cmnd_size) {
+
+		/* Special case workaround: MS-Windows issues REQUEST SENSE/
+		 * INQUIRY with cbw->Length == 12 (it should be 6). */
+		if ((fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
+		 || (fsg->cmnd[0] == SC_INQUIRY && fsg->cmnd_size == 12))
+			cmnd_size = fsg->cmnd_size;
+		else {
+			fsg->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (fsg->lun != lun)
+		DBG(fsg, "using LUN %d from CBW, "
+				"not LUN %d from CDB\n",
+				fsg->lun, lun);
+
+	/* Check the LUN */
+	if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
+		fsg->curlun = curlun = &fsg->luns[fsg->lun];
+		if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		fsg->curlun = curlun = NULL;
+		fsg->bad_lun_okay = 0;
+
+		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not. */
+		if (fsg->cmnd[0] != SC_INQUIRY &&
+				fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			DBG(fsg, "unsupported LUN %d\n", fsg->lun);
+			return -EINVAL;
+		}
+	}
+
+	/* If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+			fsg->cmnd[0] != SC_INQUIRY &&
+			fsg->cmnd[0] != SC_REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	fsg->cmnd[1] &= 0x1f;			/* Mask away the LUN */
+	for (i = 1; i < cmnd_size; ++i) {
+		if (fsg->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			DBG(fsg, "SS_INVALID_FIELD_IN_CDB\n");
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !backing_file_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		DBG(fsg, "SS_MEDIUM_NOT_PRESENT\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int do_scsi_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(fsg);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	fsg->phase_error = 0;
+	fsg->short_packet_received = 0;
+
+	down_read(&fsg->filesem);	/* We're using the backing file */
+	switch (fsg->cmnd[0]) {
+
+	case SC_INQUIRY:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"INQUIRY")) == 0)
+			reply = do_inquiry(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(1<<1) | (1<<4), 0,
+				"MODE SELECT(6)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (3<<7), 0,
+				"MODE SELECT(10)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (1<<4), 0,
+				"MODE SENSE(6)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (3<<7), 0,
+				"MODE SENSE(10)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<4), 0,
+				"PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
+			reply = do_prevent_allow(fsg);
+		break;
+
+	case SC_READ_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(7<<1) | (1<<4), 1,
+				"READ(6)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"READ(10)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"READ(12)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_CAPACITY:
+		fsg->data_size_from_cmnd = 8;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(0xf<<2) | (1<<8), 1,
+				"READ CAPACITY")) == 0)
+			reply = do_read_capacity(fsg, bh);
+		break;
+
+	case SC_READ_FORMAT_CAPACITIES:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7), 1,
+				"READ FORMAT CAPACITIES")) == 0)
+			reply = do_read_format_capacities(fsg, bh);
+		break;
+
+	case SC_REQUEST_SENSE:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"REQUEST SENSE")) == 0)
+			reply = do_request_sense(fsg, bh);
+		break;
+
+	case SC_START_STOP_UNIT:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<1) | (1<<4), 0,
+				"START-STOP UNIT")) == 0)
+			reply = do_start_stop(fsg);
+		break;
+
+	case SC_SYNCHRONIZE_CACHE:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(0xf<<2) | (3<<7), 1,
+				"SYNCHRONIZE CACHE")) == 0)
+			reply = do_synchronize_cache(fsg);
+		break;
+
+	case SC_TEST_UNIT_READY:
+		fsg->data_size_from_cmnd = 0;
+		reply = check_command(fsg, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/* Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0. */
+	case SC_VERIFY:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"VERIFY")) == 0)
+			reply = do_verify(fsg);
+		break;
+
+	case SC_WRITE_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(7<<1) | (1<<4), 1,
+				"WRITE(6)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"WRITE(10)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"WRITE(12)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	/* Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks. */
+	case SC_FORMAT_UNIT:
+	case SC_RELEASE:
+	case SC_RESERVE:
+	case SC_SEND_DIAGNOSTIC:
+		/* Fall through */
+
+	default:
+		fsg->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
+		if ((reply = check_command(fsg, fsg->cmnd_size,
+				DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+			fsg->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&fsg->filesem);
+
+	VDBG(fsg, "reply: %d, fsg->data_size_from_cmnd: %d\n",
+			reply, fsg->data_size_from_cmnd);
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		/* Error reply length */
+	if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32) reply, fsg->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		fsg->residue -= reply;
+	}				/* Otherwise it's already set */
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = req->buf;
+
+	/* Was this a real packet? */
+	if (req->status)
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != USB_BULK_CB_WRAP_LEN ||
+			cbw->Signature != __constant_cpu_to_le32(
+				USB_BULK_CB_SIG)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	fsg->cmnd_size = cbw->Length;
+	memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
+	if (cbw->Flags & USB_BULK_IN_FLAG)
+		fsg->data_dir = DATA_DIR_TO_HOST;
+	else
+		fsg->data_dir = DATA_DIR_FROM_HOST;
+	fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (fsg->data_size == 0)
+		fsg->data_dir = DATA_DIR_NONE;
+	fsg->lun = cbw->Lun;
+	fsg->tag = cbw->Tag;
+	return 0;
+}
+
+
+static int get_next_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	/* Queue a request to read a Bulk-only CBW */
+	set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
+	start_transfer(fsg, fsg->bulk_out, bh->outreq,
+			&bh->outreq_busy, &bh->state);
+
+	/* We will drain the buffer in software, which means we
+	 * can reuse it for the next filling.  No need to advance
+	 * next_buffhd_to_fill. */
+
+	/* Wait for the CBW to arrive */
+	while (bh->state != BUF_STATE_FULL) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	smp_rmb();
+	rc = received_cbw(fsg, bh);
+	bh->state = BUF_STATE_EMPTY;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int alloc_request(struct fsg_dev *fsg, struct usb_endpoint *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ept_alloc_req(ep, 0);
+	if (*preq)
+		return 0;
+	ERROR(fsg, "can't allocate request for bulk %s\n",
+			(ep == fsg->bulk_in ? "IN" : "OUT"));
+	return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface.  The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_dev *fsg, int altsetting)
+{
+	int	rc = 0;
+	int	i;
+
+	if (fsg->running)
+		DBG(fsg, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd *bh = &fsg->buffhds[i];
+
+		if (bh->inreq) {
+			usb_ept_cancel_xfer(fsg->bulk_in, bh->inreq);
+			usb_ept_free_req(fsg->bulk_in, bh->inreq);
+			bh->inreq = NULL;
+		}
+		if (bh->outreq) {
+			usb_ept_cancel_xfer(fsg->bulk_out, bh->outreq);
+			usb_ept_free_req(fsg->bulk_out, bh->outreq);
+			bh->outreq = NULL;
+		}
+	}
+
+	fsg->running = 0;
+	if (altsetting < 0 || rc != 0)
+		return rc;
+
+	DBG(fsg, "set interface %d\n", altsetting);
+
+	fsg->bulk_out_maxpacket = usb_ept_get_max_packet(fsg->bulk_out);
+
+	/* Allocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq);
+		if (rc != 0)
+			goto reset;
+		rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq);
+		if (rc != 0)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+
+	fsg->running = 1;
+	for (i = 0; i < fsg->nluns; ++i)
+		fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+
+	return rc;
+}
+
+static void adjust_wake_lock(struct fsg_dev *fsg)
+{
+	int ums_active = 0;
+	int i;
+	unsigned long		flags;
+	
+	spin_lock_irqsave(&fsg->lock, flags);
+
+	if (fsg->config) {
+		for (i = 0; i < fsg->nluns; ++i) {
+			if (backing_file_is_open(&fsg->luns[i]))
+				ums_active = 1;
+		}
+	}
+
+	if (ums_active)
+		wake_lock(&fsg->wake_lock);
+	else
+		wake_unlock(&fsg->wake_lock);
+
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+/*
+ * Change our operational configuration.  This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions.  Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_dev *fsg, u8 new_config)
+{
+	int	rc = 0;
+
+	if (new_config == fsg->config)
+		return rc;
+
+	/* Disable the single interface */
+	if (fsg->config != 0) {
+		DBG(fsg, "reset config\n");
+		fsg->config = 0;
+		rc = do_set_interface(fsg, -1);
+	}
+
+	/* Enable the interface */
+	if (new_config != 0) {
+		fsg->config = new_config;
+		rc = do_set_interface(fsg, 0);
+		if (rc != 0)
+			fsg->config = 0;	/* Reset on errors */
+		else
+			INFO(fsg, "config #%d\n", fsg->config);
+	}
+
+	switch_set_state(&fsg->sdev, new_config);
+	adjust_wake_lock(fsg);
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_dev *fsg)
+{
+	siginfo_t		info;
+	int			sig;
+	int			i;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	u8			new_config;
+	struct lun		*curlun;
+	int			rc;
+	unsigned long		flags;
+
+	DBG(fsg, "handle_exception state: %d\n", (int)fsg->state);
+	/* Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception. */
+	for (;;) {
+		sig = dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (fsg->state < FSG_STATE_EXIT)
+				DBG(fsg, "Main thread exiting on signal\n");
+			raise_exception(fsg, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler. */
+	spin_lock_irqsave(&fsg->lock, flags);
+
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		bh = &fsg->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
+			&fsg->buffhds[0];
+
+	new_config = fsg->new_config;
+	old_state = fsg->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		fsg->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < fsg->nluns; ++i) {
+			curlun = &fsg->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = curlun->unit_attention_data =
+					SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		fsg->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	default:
+		break;
+
+	case FSG_STATE_ABORT_BULK_OUT:
+		DBG(fsg, "FSG_STATE_ABORT_BULK_OUT\n");
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (fsg->state == FSG_STATE_STATUS_PHASE)
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		break;
+
+	case FSG_STATE_RESET:
+		/* really not much to do here */
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		rc = do_set_config(fsg, new_config);
+		if (new_config == 0) {
+			/* We're using the backing file */
+			down_read(&fsg->filesem);
+			fsync_all(fsg);
+			up_read(&fsg->filesem);
+		}
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_config(fsg, 0);			/* Free resources */
+		spin_lock_irqsave(&fsg->lock, flags);
+		fsg->state = FSG_STATE_TERMINATED;	/* Stop the thread */
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *fsg_)
+{
+	struct fsg_dev		*fsg = fsg_;
+	unsigned long		flags;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/* Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay. */
+	set_fs(get_ds());
+
+	/* The main loop */
+	while (fsg->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(fsg) || signal_pending(current)) {
+			handle_exception(fsg);
+			continue;
+		}
+
+		if (!fsg->running) {
+			sleep_thread(fsg);
+			continue;
+		}
+
+		if (get_next_command(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+
+		if (do_scsi_command(fsg) || finish_reply(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+
+#ifdef CONFIG_USB_CSW_HACK
+		/* Since status is already sent for write scsi command,
+		 * need to skip sending status once again if it is a
+		 * write scsi command.
+		 */
+		if (fsg->cmnd[0] == SC_WRITE_6  || fsg->cmnd[0] == SC_WRITE_10
+					|| fsg->cmnd[0] == SC_WRITE_12)
+			continue;
+#endif
+		if (send_status(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		}
+
+	spin_lock_irqsave(&fsg->lock, flags);
+	fsg->thread_task = NULL;
+	spin_unlock_irqrestore(&fsg->lock, flags);
+
+	/* In case we are exiting because of a signal, unregister the
+	 * gadget driver and close the backing file. */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
+		close_all_backing_files(fsg);
+
+	/* Let the unbind and cleanup routines know the thread has exited */
+	complete_and_exit(&fsg->thread_notifier, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing. */
+
+static int open_backing_file(struct fsg_dev *fsg, struct lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (-EROFS == PTR_ERR(filp))
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	if (filp->f_path.dentry)
+		inode = filp->f_path.dentry->d_inode;
+	if (inode && S_ISBLK(inode->i_mode)) {
+		if (bdev_read_only(inode->i_bdev))
+			ro = 1;
+	} else if (!inode || !S_ISREG(inode->i_mode)) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/* If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only. */
+	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+	num_sectors = size >> 9;	/* File size in 512-byte sectors */
+	if (num_sectors == 0) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	get_file(filp);
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	LDBG(curlun, "open backing file: %s size: %lld num_sectors: %lld\n",
+			filename, size, num_sectors);
+	rc = 0;
+	adjust_wake_lock(fsg);
+
+out:
+	filp_close(filp, current->files);
+	return rc;
+}
+
+
+static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun)
+{
+	if (curlun->filp) {
+		int rc;
+
+		/*
+		 * XXX: San: Ugly hack here added to ensure that
+		 * our pages get synced to disk.
+		 * Also drop caches here just to be extra-safe
+		 */
+		rc = vfs_fsync(curlun->filp, curlun->filp->f_path.dentry, 1);
+		if (rc < 0)
+			printk(KERN_ERR "ums: Error syncing data (%d)\n", rc);
+		/* drop_pagecache and drop_slab are no longer available */
+		/* drop_pagecache(); */
+		/* drop_slab(); */
+
+		LDBG(curlun, "close backing file\n");
+		fput(curlun->filp);
+		curlun->filp = NULL;
+		adjust_wake_lock(fsg);
+	}
+}
+
+static void close_all_backing_files(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		close_backing_file(fsg, &fsg->luns[i]);
+}
+
+static ssize_t show_file(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {	/* Get the complete pathname */
+		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		/* Add a newline */
+			buf[++rc] = 0;
+		}
+	} else {				/* No file, return 0 bytes */
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(&fsg->filesem);
+	return rc;
+}
+
+static ssize_t store_file(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	int		rc = 0;
+
+	DBG(fsg, "store_file: \"%s\"\n", buf);
+#if 0
+	/* disabled because we need to allow closing the backing file if the media was removed */
+	if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
+		LDBG(curlun, "eject attempt prevented\n");
+		return -EBUSY;				/* "Door is locked" */
+	}
+#endif
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	/* Eject current medium */
+	down_write(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {
+		close_backing_file(fsg, curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+
+	/* Load new medium */
+	if (count > 0 && buf[0]) {
+		rc = open_backing_file(fsg, curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	}
+	up_write(&fsg->filesem);
+	return (rc < 0 ? rc : count);
+}
+
+
+static DEVICE_ATTR(file, 0444, show_file, store_file);
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_release(struct kref *ref)
+{
+	struct fsg_dev	*fsg = container_of(ref, struct fsg_dev, ref);
+
+	kfree(fsg->luns);
+	kfree(fsg);
+}
+
+static void lun_release(struct device *dev)
+{
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+
+	kref_put(&fsg->ref, fsg_release);
+}
+
+static void /* __init_or_exit */ fsg_unbind(void *_ctxt)
+{
+	struct fsg_dev		*fsg = _ctxt;
+	int			i;
+	struct lun		*curlun;
+
+	pr_debug("%s ()\n", __func__);
+	if (!fsg)
+		return;
+	if (!fsg->bound)
+		return;
+
+	fsg->running = 0;
+	clear_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Unregister the sysfs attribute files and the LUNs */
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (curlun->registered) {
+			device_remove_file(&curlun->dev, &dev_attr_file);
+			device_unregister(&curlun->dev);
+			curlun->registered = 0;
+		}
+	}
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (fsg->state != FSG_STATE_TERMINATED) {
+		raise_exception(fsg, FSG_STATE_EXIT);
+		wait_for_completion(&fsg->thread_notifier);
+
+	}
+
+	/* Free the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		kfree(fsg->buffhds[i].buf);
+		fsg->buffhds[i].buf = NULL;
+	}
+
+	if (fsg->bulk_in) {
+		usb_ept_fifo_flush(fsg->bulk_in);
+		usb_ept_enable(fsg->bulk_in,  0);
+		usb_free_endpoint(fsg->bulk_in);
+	}
+	if (fsg->bulk_out) {
+		usb_ept_fifo_flush(fsg->bulk_out);
+		usb_ept_enable(fsg->bulk_out,  0);
+		usb_free_endpoint(fsg->bulk_out);
+	}
+	fsg->bound = 0;
+}
+
+static void fsg_bind(void *_ctxt)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	int			rc;
+	int			i;
+	unsigned int 		ret;
+	struct lun		*curlun;
+	char			*pathbuf, *p;
+	struct usb_function	*usb_func = &fsg_function;
+	struct usb_endpoint *ep;
+
+
+	dev_attr_file.attr.mode = 0644;
+	fsg->running = 0;
+
+	/* Find out how many LUNs there should be */
+	i = fsg->nluns;
+	if (i == 0)
+		i = 1;
+	if (i > MAX_LUNS) {
+		ERROR(fsg, "invalid number of LUNs: %d\n", i);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs. */
+	fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
+	if (!fsg->luns) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	fsg->nluns = i;
+
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		curlun->ro = 0;
+		curlun->dev.release = lun_release;
+		curlun->dev.parent = &fsg->pdev->dev;
+		dev_set_drvdata(&curlun->dev, fsg);
+		snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
+				"lun%d", i);
+
+		rc = device_register(&curlun->dev);
+		if (rc != 0) {
+			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
+			goto out;
+		}
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc != 0) {
+			ERROR(fsg, "device_create_file failed: %d\n", rc);
+			device_unregister(&curlun->dev);
+			goto out;
+		}
+		curlun->registered = 1;
+		kref_get(&fsg->ref);
+	}
+	ret = usb_msm_get_next_ifc_number(usb_func);
+	intf_desc.bInterfaceNumber = ret;
+	pr_debug("%s: interface number = %d\n", __func__, ret);
+
+	ep = fsg->bulk_in = usb_alloc_endpoint(USB_DIR_IN);
+	hs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	fs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: bulk in endpoint number = %d\n",
+						__func__, ep->num);
+
+	ep = fsg->bulk_out = usb_alloc_endpoint(USB_DIR_OUT);
+	hs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	fs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	pr_debug("%s: bulk out endpoint number = %d\n",
+						__func__, ep->num);
+
+	/* Allocate the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		/* Allocate for the bulk-in endpoint.  We assume that
+		 * the buffer will also work with the bulk-out (and
+		 * interrupt-in) endpoint. */
+		bh->buf = kmalloc(fsg->buf_size, GFP_KERNEL);
+		if (!bh->buf)
+			goto out;
+		bh->next = bh + 1;
+	}
+	fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+
+	fsg->state = FSG_STATE_IDLE;
+	fsg->thread_task = kthread_create(fsg_main_thread, fsg,
+			"USB mass_storage");
+	if (IS_ERR(fsg->thread_task)) {
+		rc = PTR_ERR(fsg->thread_task);
+		ERROR(fsg, "kthread_create failed: %d\n", rc);
+		goto out;
+	}
+
+	DBG(fsg, "Number of LUNs=%d\n", fsg->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (backing_file_is_open(curlun)) {
+			p = NULL;
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = NULL;
+			}
+			LINFO(curlun, "ro=%d, file: %s\n",
+					curlun->ro, (p ? p : "(error)"));
+		}
+	}
+	kfree(pathbuf);
+
+	set_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Tell the thread to start working */
+	wake_up_process(fsg->thread_task);
+	fsg->bound = 1;
+	return;
+
+out:
+	fsg->state = FSG_STATE_TERMINATED;	/* The thread is dead */
+	fsg->bound = 1;
+	fsg_unbind(fsg);
+	close_all_backing_files(fsg);
+}
+
+static void fsg_configure(int configured, void *_ctxt)
+{
+	struct fsg_dev *fsg = _ctxt;
+
+	if (!fsg)
+		return;
+	if (!fsg->bound)
+		return;
+
+	/* Clear out the controller's fifos */
+	if ((fsg->new_config) && (fsg->bulk_in))
+		usb_ept_fifo_flush(fsg->bulk_in);
+	if ((fsg->new_config) && (fsg->bulk_out))
+		usb_ept_fifo_flush(fsg->bulk_out);
+
+	if (configured) {
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(fsg->bulk_in, &hs_bulk_in_desc);
+			usb_configure_endpoint(fsg->bulk_out,
+						&hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(fsg->bulk_in, &fs_bulk_in_desc);
+			usb_configure_endpoint(fsg->bulk_out,
+						&fs_bulk_out_desc);
+		}
+
+		usb_ept_enable(fsg->bulk_in, 1);
+		usb_ept_enable(fsg->bulk_out, 1);
+		wake_lock(&fsg->wake_lock_idle);
+	} else
+		wake_unlock(&fsg->wake_lock_idle);
+
+	fsg->new_config = configured;
+	raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_function		fsg_function = {
+	.bind		= fsg_bind,
+	.unbind		= fsg_unbind,
+	.configure  = fsg_configure,
+	.setup		= fsg_setup,
+
+	.name = "mass_storage",
+
+};
+
+
+static int __init fsg_alloc(void)
+{
+	struct fsg_dev		*fsg;
+
+	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+	if (!fsg)
+		return -ENOMEM;
+	spin_lock_init(&fsg->lock);
+	init_rwsem(&fsg->filesem);
+	kref_init(&fsg->ref);
+	init_completion(&fsg->thread_notifier);
+
+	the_fsg = fsg;
+	return 0;
+}
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	struct fsg_dev	*fsg = container_of(sdev, struct fsg_dev, sdev);
+	return sprintf(buf, "%s\n", (fsg->config ? "online" : "offline"));
+}
+static int __exit fsg_remove(struct platform_device *pdev)
+{
+	struct fsg_dev  *fsg = the_fsg;
+
+	usb_function_unregister(&fsg_function);
+	wake_lock_destroy(&fsg->wake_lock_idle);
+	switch_dev_unregister(&fsg->sdev);
+	test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags);
+	close_all_backing_files(fsg);
+	kref_put(&fsg->ref, fsg_release);
+
+	return 0;
+}
+
+static int __init fsg_probe(struct platform_device *pdev)
+{
+	struct usb_mass_storage_platform_data *pdata = pdev->dev.platform_data;
+	int		rc;
+
+	rc = fsg_alloc();
+	if (rc != 0)
+		return rc;
+
+	the_fsg->pdev = pdev;
+	the_fsg->sdev.name = DRIVER_NAME;
+	the_fsg->nluns = pdata->nluns;
+	the_fsg->buf_size = pdata->buf_size;
+	the_fsg->vendor = pdata->vendor;
+	the_fsg->product = pdata->product;
+	the_fsg->release = pdata->release;
+	the_fsg->sdev.print_name = print_switch_name;
+	the_fsg->sdev.print_state = print_switch_state;
+	rc = switch_dev_register(&the_fsg->sdev);
+	if (rc < 0)
+		goto err_switch_dev_register;
+
+	wake_lock_init(&the_fsg->wake_lock, WAKE_LOCK_SUSPEND,
+		       "usb_mass_storage");
+	wake_lock_init(&the_fsg->wake_lock_idle, WAKE_LOCK_IDLE,
+		       "mass_storage_hold_idle");
+
+	fsg_function.hs_descriptors = hs_function;
+	fsg_function.fs_descriptors = fs_function;
+	fsg_function.context = the_fsg;
+	rc = usb_function_register(&fsg_function);
+	if (rc != 0)
+		goto err_usb_function_register;
+
+	return 0;
+
+err_usb_function_register:
+	switch_dev_unregister(&the_fsg->sdev);
+err_switch_dev_register:
+	kref_put(&the_fsg->ref, fsg_release);
+
+	return rc;
+}
+
+static struct platform_driver fsg_driver = {
+	.probe = fsg_probe,
+	.remove = __exit_p(fsg_remove),
+	.driver = { .name = DRIVER_NAME, },
+};
+
+static int __init fsg_init(void)
+{
+	return platform_driver_register(&fsg_driver);
+}
+module_init(fsg_init);
+
+static void __exit fsg_cleanup(void)
+{
+	platform_driver_unregister(&fsg_driver);
+
+}
+module_exit(fsg_cleanup);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/msm_hsusb.c b/drivers/usb/function/msm_hsusb.c
new file mode 100644
index 0000000..eebd9d4
--- /dev/null
+++ b/drivers/usb/function/msm_hsusb.c
@@ -0,0 +1,3948 @@
+/* drivers/usb/function/msm_hsusb.c
+ *
+ * Driver for HighSpeed USB Client Controller in MSM7K
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+#include <linux/switch.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <mach/vreg.h>
+#include <mach/board.h>
+#include <mach/msm_hsusb.h>
+#include <mach/rpc_hsusb.h>
+#include <mach/rpc_pmapp.h>
+#include <mach/gpio.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm_otg.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos_params.h>
+#include <mach/clk.h>
+
+#define MSM_USB_BASE ((unsigned) ui->addr)
+
+#include "usb_function.h"
+
+#define EPT_FLAG_IN	0x0001
+#define USB_DIR_MASK	USB_DIR_IN
+#define SETUP_BUF_SIZE	4096
+
+/* IDs for string descriptors */
+#define STRING_LANGUAGE_ID      0
+#define STRING_SERIAL           1
+#define STRING_PRODUCT          2
+#define STRING_MANUFACTURER     3
+
+#define LANGUAGE_ID             0x0409 /* en-US */
+#define SOC_ROC_2_0		0x10002 /* ROC 2.0 */
+
+#define TRUE			1
+#define FALSE			0
+#define USB_LINK_RESET_TIMEOUT	(msecs_to_jiffies(10))
+#define USB_CHG_DET_DELAY	msecs_to_jiffies(1000)
+
+#define is_phy_45nm()     (PHY_MODEL(ui->phy_info) == USB_PHY_MODEL_45NM)
+#define is_phy_external() (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL)
+
+static int pid = 0x9018;
+
+struct usb_fi_ept {
+	struct usb_endpoint *ept;
+	struct usb_endpoint_descriptor desc;
+};
+
+struct usb_function_info {
+	struct list_head list;
+	unsigned enabled;
+	struct usb_function *func;
+};
+
+struct msm_request {
+	struct usb_request req;
+
+	struct usb_info *ui;
+	struct msm_request *next;
+
+	unsigned busy:1;
+	unsigned live:1;
+	unsigned alloced:1;
+	unsigned dead:1;
+
+	dma_addr_t dma;
+
+	struct ept_queue_item *item;
+	dma_addr_t item_dma;
+};
+static unsigned char str_lang_desc[] = {4,
+				USB_DT_STRING,
+				(unsigned char)LANGUAGE_ID,
+				(unsigned char)(LANGUAGE_ID >> 8)};
+
+#define to_msm_request(r) container_of(r, struct msm_request, req)
+static int usb_hw_reset(struct usb_info *ui);
+static void usb_vbus_online(struct usb_info *);
+static void usb_vbus_offline(struct usb_info *ui);
+static void usb_lpm_exit(struct usb_info *ui);
+static void usb_lpm_wakeup_phy(struct work_struct *);
+static void usb_exit(void);
+static int usb_is_online(struct usb_info *ui);
+static void usb_do_work(struct work_struct *w);
+static int usb_lpm_enter(struct usb_info *ui);
+int (*usb_lpm_config_gpio)(int);
+static void usb_enable_pullup(struct usb_info *ui);
+static void usb_disable_pullup(struct usb_info *ui);
+
+static struct workqueue_struct *usb_work;
+static void usb_chg_stop(struct work_struct *w);
+
+#define USB_STATE_IDLE    0
+#define USB_STATE_ONLINE  1
+#define USB_STATE_OFFLINE 2
+
+#define USB_FLAG_START          0x0001
+#define USB_FLAG_VBUS_ONLINE    0x0002
+#define USB_FLAG_VBUS_OFFLINE   0x0004
+#define USB_FLAG_RESET          0x0008
+#define USB_FLAG_SUSPEND	0x0010
+#define USB_FLAG_CONFIGURE	0x0020
+#define USB_FLAG_RESUME	0x0040
+#define USB_FLAG_REG_OTG 0x0080
+
+#define USB_MSC_ONLY_FUNC_MAP	0x10
+#define DRIVER_NAME		"msm_hsusb_peripheral"
+
+struct lpm_info {
+	struct work_struct wakeup_phy;
+};
+
+enum charger_type {
+	USB_CHG_TYPE__SDP,
+	USB_CHG_TYPE__CARKIT,
+	USB_CHG_TYPE__WALLCHARGER,
+	USB_CHG_TYPE__INVALID
+};
+
+struct usb_info {
+	/* lock for register/queue/device state changes */
+	spinlock_t lock;
+
+	/* single request used for handling setup transactions */
+	struct usb_request *setup_req;
+	struct usb_request *ep0out_req;
+
+	struct platform_device *pdev;
+	struct msm_hsusb_platform_data *pdata;
+	int irq;
+	int gpio_irq[2];
+	void *addr;
+
+	unsigned state;
+	unsigned flags;
+
+	unsigned online;
+	unsigned running;
+	unsigned bound;
+
+	struct dma_pool *pool;
+
+	/* dma page to back the queue heads and items */
+	unsigned char *buf;
+	dma_addr_t dma;
+
+	struct ept_queue_head *head;
+
+	/* used for allocation */
+	unsigned next_item;
+	unsigned next_ifc_num;
+	unsigned stopped:1;
+	unsigned remote_wakeup:1;
+	unsigned configured:1;
+	unsigned selfpowered:1;
+	unsigned iad:1;
+	unsigned char maxpower;
+	enum usb_device_speed speed;
+	unsigned phy_info;
+
+	/* endpoints are ordered based on their status bits,
+	** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15
+	*/
+	struct usb_endpoint ept[32];
+
+	struct delayed_work work;
+	struct delayed_work chg_legacy_det;
+	unsigned phy_status;
+	unsigned phy_fail_count;
+	struct usb_composition *composition;
+
+	struct usb_function_info **func;
+	unsigned num_funcs;
+	struct usb_function_map *functions_map;
+
+#define MAX_INTERFACE_NUM	15
+	struct usb_function *func2ifc_map[MAX_INTERFACE_NUM];
+
+#define ep0out ept[0]
+#define ep0in  ept[16]
+
+	struct clk *clk;
+	struct clk *pclk;
+	struct clk *cclk;
+	unsigned int clk_enabled;
+
+	struct vreg *vreg;
+	unsigned int vreg_enabled;
+
+	unsigned in_lpm;
+	struct lpm_info li;
+
+	enum charger_type chg_type;
+	struct work_struct chg_stop;
+#define MAX_STRDESC_NUM		100
+	char **strdesc;
+	int strdesc_index;
+
+	u16 test_mode;
+	struct wake_lock wlock;
+	struct msm_otg_transceiver *xceiv;
+	int active;
+	enum usb_device_state usb_state;
+	int vbus_sn_notif;
+	struct switch_dev sdev;
+};
+static struct usb_info *the_usb_info;
+
+static unsigned short usb_validate_product_id(unsigned short pid);
+static unsigned short usb_get_product_id(unsigned long enabled_functions);
+static void usb_switch_composition(unsigned short pid);
+static unsigned short usb_set_composition(unsigned short pid);
+static void usb_configure_device_descriptor(struct usb_info *ui);
+static void usb_uninit(struct usb_info *ui);
+
+static unsigned ulpi_read(struct usb_info *ui, unsigned reg);
+static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg);
+
+
+
+struct usb_device_descriptor desc_device = {
+	.bLength = USB_DT_DEVICE_SIZE,
+	.bDescriptorType = USB_DT_DEVICE,
+	.bcdUSB = 0x0200,
+	.bDeviceClass = 0,
+	.bDeviceSubClass = 0,
+	.bDeviceProtocol = 0,
+	.bMaxPacketSize0 = 64,
+	/* the following fields are filled in by usb_probe */
+	.idVendor = 0,
+	.idProduct = 0,
+	.bcdDevice = 0,
+	.iManufacturer = 0,
+	.iProduct = 0,
+	.iSerialNumber = 0,
+	.bNumConfigurations = 1,
+};
+
+static void flush_endpoint(struct usb_endpoint *ept);
+static void msm_hsusb_suspend_locks_acquire(struct usb_info *, int);
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return sprintf(buf, "%s\n", (ui->online ? "online" : "offline"));
+}
+
+#define USB_WALLCHARGER_CHG_CURRENT 1800
+static int usb_get_max_power(struct usb_info *ui)
+{
+	unsigned long flags;
+	enum charger_type temp;
+	int suspended;
+	int configured;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	temp = ui->chg_type;
+	suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0;
+	configured = ui->configured;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__INVALID)
+		return -ENODEV;
+
+	if (temp == USB_CHG_TYPE__WALLCHARGER)
+		return USB_WALLCHARGER_CHG_CURRENT;
+
+	if (suspended || !configured)
+		return 0;
+
+	return ui->maxpower * 2;
+}
+
+static void usb_chg_legacy_detect(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	enum charger_type temp = USB_CHG_TYPE__INVALID;
+	int maxpower;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (ui->usb_state == USB_STATE_NOTATTACHED) {
+		ret = -ENODEV;
+		goto chg_legacy_det_out;
+	}
+
+	if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) {
+		ui->chg_type = temp = USB_CHG_TYPE__WALLCHARGER;
+		goto chg_legacy_det_out;
+	}
+
+	ui->chg_type = temp = USB_CHG_TYPE__SDP;
+chg_legacy_det_out:
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (ret)
+		return;
+
+	msm_chg_usb_charger_connected(temp);
+	maxpower = usb_get_max_power(ui);
+	if (maxpower > 0)
+		msm_chg_usb_i_is_available(maxpower);
+
+	/* USB driver prevents idle and suspend power collapse(pc)
+	 * while usb cable is connected. But when dedicated charger is
+	 * connected, driver can vote for idle and suspend pc. In order
+	 * to allow pc, driver has to initiate low power mode which it
+	 * cannot do as phy cannot be accessed when dedicated charger
+	 * is connected due to phy lockup issues. Just to allow idle &
+	 * suspend pc when dedicated charger is connected, release the
+	 * wakelock, set driver latency to default and act as if we are
+	 * in low power mode so that, driver will re-acquire wakelocks
+	 * for any sub-sequent usb interrupts.
+	 */
+	if (temp == USB_CHG_TYPE__WALLCHARGER) {
+		pr_info("\n%s: WALL-CHARGER\n", __func__);
+		spin_lock_irqsave(&ui->lock, flags);
+		if (ui->usb_state == USB_STATE_NOTATTACHED) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return;
+		}
+		ui->in_lpm = 1;
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		msm_hsusb_suspend_locks_acquire(ui, 0);
+	} else
+		pr_info("\n%s: Standard Downstream Port\n", __func__);
+}
+
+int usb_msm_get_next_strdesc_id(char *str)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned id;
+	unsigned long flags;
+	int len;
+
+	len = strlen(str);
+	if (!len) {
+		printk(KERN_ERR "usb next_strdesc_id(); null string\n");
+		return -EPERM;
+	}
+	/* for null character */
+	len = len + 1;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	id = ui->strdesc_index;
+	if (id >= MAX_STRDESC_NUM) {
+		id = -EPERM;
+		printk(KERN_ERR "reached max strdesc number\n");
+		goto get_strd_id_exit;
+	}
+
+	ui->strdesc[id] = kmalloc(len, GFP_ATOMIC);
+	if (ui->strdesc[id]) {
+		memcpy(ui->strdesc[id], str, len);
+		ui->strdesc_index++;
+	} else {
+		id = -EPERM;
+		printk(KERN_ERR "usb next_strdesc_id(); Out of memory:(%s)\n",
+			str);
+	}
+
+get_strd_id_exit:
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return id;
+}
+EXPORT_SYMBOL(usb_msm_get_next_strdesc_id);
+
+
+inline int usb_msm_is_iad(void)
+{
+	return the_usb_info->iad;
+}
+EXPORT_SYMBOL(usb_msm_is_iad);
+
+inline void usb_msm_enable_iad(void)
+{
+	the_usb_info->iad = 1;
+}
+EXPORT_SYMBOL(usb_msm_enable_iad);
+
+int usb_msm_get_speed()
+{
+	return the_usb_info->speed;
+}
+EXPORT_SYMBOL(usb_msm_get_speed);
+
+int usb_msm_get_next_ifc_number(struct usb_function *driver)
+{
+	struct usb_info *ui = the_usb_info;
+	int ifc_num = -1;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	for (i = 0; i < ui->pdata->num_functions; i++) {
+		if (strcmp(ui->functions_map[i].name, driver->name))
+			continue;
+		if (!(ui->composition->functions & (1 << i)))
+			continue;
+		ifc_num = ui->next_ifc_num++;
+		ui->func2ifc_map[ifc_num] = driver;
+		break;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return ifc_num;
+}
+EXPORT_SYMBOL(usb_msm_get_next_ifc_number);
+
+static inline int usb_msm_get_selfpowered(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return ui->selfpowered;
+}
+static inline int usb_msm_get_remotewakeup(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return ui->remote_wakeup;
+}
+
+static void usb_clk_enable(struct usb_info *ui)
+{
+	if (!ui->clk_enabled) {
+		clk_enable(ui->pclk);
+		if (ui->cclk)
+			clk_enable(ui->cclk);
+		ui->clk_enabled = 1;
+	}
+}
+
+static void usb_clk_disable(struct usb_info *ui)
+{
+	if (ui->clk_enabled) {
+		clk_disable(ui->pclk);
+		if (ui->cclk)
+			clk_disable(ui->cclk);
+		ui->clk_enabled = 0;
+	}
+}
+
+static void usb_vreg_enable(struct usb_info *ui)
+{
+	if (ui->vreg && !IS_ERR(ui->vreg) && !ui->vreg_enabled) {
+		vreg_enable(ui->vreg);
+		ui->vreg_enabled = 1;
+	}
+}
+
+static void usb_vreg_disable(struct usb_info *ui)
+{
+	if (ui->vreg && !IS_ERR(ui->vreg) && ui->vreg_enabled) {
+		vreg_disable(ui->vreg);
+		ui->vreg_enabled = 0;
+	}
+}
+
+static unsigned ulpi_read(struct usb_info *ui, unsigned reg)
+{
+	unsigned timeout = 100000;
+
+	/* initiate read operation */
+	writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ;
+
+	if (timeout == 0) {
+		printk(KERN_ERR "ulpi_read: timeout %08x\n",
+			readl(USB_ULPI_VIEWPORT));
+		return 0xffffffff;
+	}
+	return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+}
+
+static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg)
+{
+	unsigned timeout = 10000;
+
+	/* initiate write operation */
+	writel(ULPI_RUN | ULPI_WRITE |
+	       ULPI_ADDR(reg) | ULPI_DATA(val),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ;
+
+	if (timeout == 0) {
+		printk(KERN_ERR "ulpi_write: timeout\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void msm_hsusb_suspend_locks_acquire(struct usb_info *ui, int acquire)
+{
+	if (acquire) {
+		wake_lock(&ui->wlock);
+		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+				DRIVER_NAME, ui->pdata->swfi_latency);
+		/* targets like 7x30 have introduced core clock
+		 * to remove the dependency on max axi frequency
+		 */
+		if (!ui->cclk)
+			pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+					DRIVER_NAME, MSM_AXI_MAX_FREQ);
+	} else {
+		wake_lock_timeout(&ui->wlock, HZ / 2);
+		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+					DRIVER_NAME,
+					PM_QOS_DEFAULT_VALUE);
+		if (!ui->cclk)
+			pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+					DRIVER_NAME, PM_QOS_DEFAULT_VALUE);
+	}
+}
+
+static void msm_hsusb_suspend_locks_init(struct usb_info *ui, int init)
+{
+	if (init) {
+		wake_lock_init(&ui->wlock, WAKE_LOCK_SUSPEND,
+				"usb_bus_active");
+		pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
+				DRIVER_NAME,
+				PM_QOS_DEFAULT_VALUE);
+		pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+				DRIVER_NAME, PM_QOS_DEFAULT_VALUE);
+	} else {
+		wake_lock_destroy(&ui->wlock);
+		pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME);
+		pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME);
+	}
+}
+
+static void init_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++) {
+		struct usb_endpoint *ept = ui->ept + n;
+
+		ept->ui = ui;
+		ept->bit = n;
+		ept->num = n & 15;
+		ept->alloced = 0;
+
+		if (ept->bit > 15) {
+			/* IN endpoint */
+			ept->head = ui->head + (ept->num << 1) + 1;
+			ept->flags = EPT_FLAG_IN;
+		} else {
+			/* OUT endpoint */
+			ept->head = ui->head + (ept->num << 1);
+			ept->flags = 0;
+		}
+	}
+}
+
+void usb_configure_endpoint(struct usb_endpoint *ep,
+			struct usb_endpoint_descriptor *ep_desc)
+{
+	unsigned cfg = 0;
+	unsigned long flags;
+	struct usb_info *ui = ep->ui;
+
+	if (!ui)
+		return;
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (ep_desc) {
+		ep->max_pkt = ep_desc->wMaxPacketSize;
+		ep->ep_descriptor = ep_desc;
+	}
+
+	if (!ep->max_pkt) {
+		printk(KERN_ERR "cannot configure zero length max pkt\n");
+		goto cfg_ept_end;
+	}
+
+	cfg = CONFIG_MAX_PKT(ep->max_pkt) | CONFIG_ZLT;
+	/* ep0 out needs interrupt-on-setup */
+	if (ep->bit == 0)
+		cfg |= CONFIG_IOS;
+	ep->head->config = cfg;
+	ep->head->next = TERMINATE;
+
+	pr_debug("ept #%d %s max:%d head:%p bit:%d\n",
+		       ep->num,
+		       (ep->flags & EPT_FLAG_IN) ? "in" : "out",
+		       ep->max_pkt, ep->head, ep->bit);
+
+cfg_ept_end:
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+EXPORT_SYMBOL(usb_configure_endpoint);
+
+#define NUM_EPTS 15	/* number of in or out non-ctrl endpoints */
+struct usb_endpoint *usb_alloc_endpoint(unsigned direction)
+{
+	struct usb_info *ui = the_usb_info;
+	struct usb_endpoint *ept = NULL;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (direction & USB_DIR_IN)
+		ept = (&ui->ep0in);
+	else
+		ept = (&ui->ep0out);
+
+	for (i = 0; i < NUM_EPTS; i++) {
+		ept++;
+		if (!ept->alloced) {
+			ept->alloced = 1;
+			ept->ui = ui;
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return ept;
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return NULL;
+}
+EXPORT_SYMBOL(usb_alloc_endpoint);
+
+int usb_free_endpoint(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	if (!ept)
+		return -EINVAL;
+	spin_lock_irqsave(&ui->lock, flags);
+	ept->alloced = 0;
+	ept->ui = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_free_endpoint);
+
+struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept,
+			unsigned bufsize)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req;
+
+	if (!ui)
+		return NULL;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		goto fail1;
+
+	req->item = dma_pool_alloc(ui->pool, GFP_ATOMIC, &req->item_dma);
+	if (!req->item)
+		goto fail2;
+
+	if (bufsize) {
+		req->req.buf = kmalloc(bufsize, GFP_ATOMIC);
+		if (!req->req.buf)
+			goto fail3;
+		req->alloced = 1;
+	}
+
+	return &req->req;
+
+fail3:
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+fail2:
+	kfree(req);
+fail1:
+	return NULL;
+}
+EXPORT_SYMBOL(usb_ept_alloc_req);
+
+static void do_free_req(struct usb_info *ui, struct msm_request *req)
+{
+	if (req->alloced)
+		kfree(req->req.buf);
+
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+	kfree(req);
+}
+
+void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	struct msm_request *req, *temp_req, *prev_req;
+	struct usb_info *ui;
+	unsigned long flags;
+	int dead = 0;
+	if (!ept || !_req)
+		return;
+
+	ui = ept->ui;
+	if (!ui)
+		return;
+
+	req = to_msm_request(_req);
+	spin_lock_irqsave(&ui->lock, flags);
+	/* defer freeing resources if request is still busy */
+	if (req->busy)
+		dead = req->dead = 1;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	/* if req->dead, then we will clean up when the request finishes */
+	if (!dead) {
+		temp_req = ept->req;
+		prev_req = temp_req;
+		while (temp_req != NULL) {
+			if (req == temp_req && ept->req != temp_req)
+				prev_req->next = temp_req->next;
+
+			prev_req = temp_req;
+			temp_req = temp_req->next;
+		}
+		if (ept->req == req)
+			ept->req = req->next;
+		req->req.complete = NULL;
+		do_free_req(ui, req);
+	} else
+		pr_err("%s: req is busy, can't free req\n", __func__);
+}
+EXPORT_SYMBOL(usb_ept_free_req);
+
+void usb_ept_enable(struct usb_endpoint *ept, int yes)
+{
+	struct usb_info *ui;
+	int in;
+	unsigned n;
+	unsigned char xfer;
+
+	if (!ept || !ept->ui)
+		return;
+	ui = ept->ui;
+	in = ept->flags & EPT_FLAG_IN;
+	if (!ept->ep_descriptor)
+		return;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return;
+	}
+
+	xfer = ept->ep_descriptor->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (xfer == USB_ENDPOINT_XFER_BULK)
+			n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_BULK;
+		else if (xfer == USB_ENDPOINT_XFER_INT)
+			n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_INT;
+		if (yes)
+			n |= CTRL_TXE | CTRL_TXR;
+		else
+			n &= (~CTRL_TXE);
+	} else {
+		if (xfer == USB_ENDPOINT_XFER_BULK)
+			n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_BULK;
+		else if (xfer == USB_ENDPOINT_XFER_INT)
+			n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_INT;
+		if (yes)
+			n |= CTRL_RXE | CTRL_RXR;
+		else
+			n &= ~(CTRL_RXE);
+	}
+	/* complete all the updates to ept->head before enabling endpoint*/
+	dma_coherent_pre_ops();
+	writel(n, USB_ENDPTCTRL(ept->num));
+}
+EXPORT_SYMBOL(usb_ept_enable);
+
+static void usb_ept_start(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req = ept->req;
+
+	BUG_ON(req->live);
+
+	/* link the hw queue head to the request's transaction item */
+	ept->head->next = req->item_dma;
+	ept->head->info = 0;
+
+	/* memory barrier to flush the data before priming endpoint*/
+	dma_coherent_pre_ops();
+	/* start the endpoint */
+	writel(1 << ept->bit, USB_ENDPTPRIME);
+
+	/* mark this chain of requests as live */
+	while (req) {
+		req->live = 1;
+		if (req->item->next == TERMINATE)
+			break;
+		req = req->next;
+	}
+}
+
+int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	unsigned long flags;
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_request *last;
+	struct usb_info *ui = ept->ui;
+	struct ept_queue_item *item = req->item;
+	unsigned length = req->req.length;
+
+	if (length > 0x4000)
+		return -EMSGSIZE;
+
+	if (ui->in_lpm) {
+		req->req.status = usb_remote_wakeup();
+		if (req->req.status) {
+			pr_debug("%s:RWakeup generation failed, EP = %x\n",
+							__func__, ept->bit);
+			return req->req.status;
+		}
+	}
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (req->busy) {
+		req->req.status = -EBUSY;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		printk(KERN_INFO
+		       "usb_ept_queue_xfer() tried to queue busy request\n");
+		return -EBUSY;
+	}
+
+	if (!ui->online && (ept->num != 0)) {
+		req->req.status = -ENODEV;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		printk(KERN_INFO "usb_ept_queue_xfer() tried to queue request"
+				"while offline; ept->bit: %x\n", ept->bit);
+		return -ENODEV;
+	}
+
+	req->busy = 1;
+	req->live = 0;
+	req->next = 0;
+	req->req.status = -EBUSY;
+
+	req->dma = dma_map_single(NULL, req->req.buf, length,
+				  (ept->flags & EPT_FLAG_IN) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	/* prepare the transaction descriptor item for the hardware */
+	item->next = TERMINATE;
+	item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE;
+	item->page0 = req->dma;
+	item->page1 = (req->dma + 0x1000) & 0xfffff000;
+	item->page2 = (req->dma + 0x2000) & 0xfffff000;
+	item->page3 = (req->dma + 0x3000) & 0xfffff000;
+
+	/* Add the new request to the end of the queue */
+	last = ept->last;
+	if (last) {
+		/* Already requests in the queue. add us to the
+		 * end, but let the completion interrupt actually
+		 * start things going, to avoid hw issues
+		 */
+		last->next = req;
+
+		/* only modify the hw transaction next pointer if
+		 * that request is not live
+		 */
+		if (!last->live)
+			last->item->next = req->item_dma;
+	} else {
+		/* queue was empty -- kick the hardware */
+		ept->req = req;
+		usb_ept_start(ept);
+	}
+	ept->last = req;
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_queue_xfer);
+
+int usb_ept_flush(struct usb_endpoint *ept)
+{
+	printk("usb_ept_flush \n");
+	flush_endpoint(ept);
+	return 0;
+}
+
+int usb_ept_get_max_packet(struct usb_endpoint *ept)
+{
+	return ept->max_pkt;
+}
+EXPORT_SYMBOL(usb_ept_get_max_packet);
+
+int usb_remote_wakeup(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (!ui->remote_wakeup) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: remote wakeup not supported\n", __func__);
+		return -ENOTSUPP;
+	}
+
+	if (!ui->online) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: device is not configured\n", __func__);
+		return -ENODEV;
+	}
+
+	if (ui->in_lpm)
+		usb_lpm_exit(ui);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	/* if usb_lpm_exit is unable to set PHCD,
+	 * it would initiate workthread to set the PHCD
+	 */
+	if (cancel_work_sync(&ui->li.wakeup_phy))
+		usb_lpm_wakeup_phy(NULL);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: cannot bring controller out of lpm\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!usb_is_online(ui)) {
+		pr_debug("%s: enabling force resume\n", __func__);
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+	} else
+		pr_debug("%s: controller seems to be out of suspend already\n",
+				__func__);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_remote_wakeup);
+
+/* --- endpoint 0 handling --- */
+
+static void set_configuration(struct usb_info *ui, int yes)
+{
+	unsigned i;
+
+	ui->online = !!yes;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1 << i)))
+			continue;
+		if (fi->func->configure)
+			fi->func->configure(yes, fi->func->context);
+	}
+}
+
+static void ep0out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	req->complete = 0;
+}
+
+static void ep0in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	/* queue up the receive of the ACK response from the host */
+	if (req->status == 0) {
+		struct usb_info *ui = ept->ui;
+		req->length = 0;
+		req->complete = ep0out_complete;
+		usb_ept_queue_xfer(&ui->ep0out, req);
+	}
+}
+
+static void ep0in_complete_sendzero(
+		struct usb_endpoint *ept, struct usb_request *req)
+{
+	if (req->status == 0) {
+		struct usb_info *ui = ept->ui;
+		req->length = 0;
+		req->complete = ep0in_complete;
+		usb_ept_queue_xfer(&ui->ep0in, req);
+	}
+}
+
+static void ep0_status_complete(
+		struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct usb_info *ui = ept->ui;
+	unsigned int i;
+
+	if (!ui->test_mode)
+		return;
+
+	switch (ui->test_mode) {
+	case J_TEST:
+		pr_info("usb electrical test mode: (J)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_J_STATE, USB_PORTSC);
+		break;
+
+	case K_TEST:
+		pr_info("usb electrical test mode: (K)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_K_STATE, USB_PORTSC);
+		break;
+
+	case SE0_NAK_TEST:
+		pr_info("usb electrical test mode: (SE0-NAK)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_SE0_NAK, USB_PORTSC);
+		break;
+
+	case TST_PKT_TEST:
+		pr_info("usb electrical test mode: (TEST_PKT)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_TST_PKT, USB_PORTSC);
+		break;
+	default:
+		pr_err("usb:%s: undefined test mode: (%x)\n",
+				__func__, ui->test_mode);
+	}
+
+}
+
+static void ep0_setup_ack(struct usb_info *ui)
+{
+	struct usb_request *req = ui->setup_req;
+	req->length = 0;
+	req->complete = ep0_status_complete;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_setup_stall(struct usb_info *ui)
+{
+	writel((1<<16) | (1<<0), USB_ENDPTCTRL(0));
+}
+
+static void ep0_setup_receive(struct usb_info *ui, unsigned len)
+{
+	ui->ep0out_req->length = len;
+	usb_ept_queue_xfer(&ui->ep0out, ui->ep0out_req);
+}
+
+static void ep0_setup_send(struct usb_info *ui, unsigned wlen)
+{
+	struct usb_request *req = ui->setup_req;
+	struct usb_endpoint *ept = &ui->ep0in;
+
+	/* never send more data than the host requested */
+	if (req->length > wlen)
+		req->length = wlen;
+
+	/* if we are sending a short response that ends on
+	 * a packet boundary, we'll need to send a zero length
+	 * packet as well.
+	 */
+	if ((req->length != wlen) && ((req->length & 63) == 0)) {
+		req->complete = ep0in_complete_sendzero;
+	} else {
+		req->complete = ep0in_complete;
+	}
+
+	usb_ept_queue_xfer(ept, req);
+}
+
+
+static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl,
+				struct usb_request *req);
+
+static void handle_setup(struct usb_info *ui)
+{
+	struct usb_ctrlrequest ctl;
+
+	memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl));
+	writel(EPT_RX(0), USB_ENDPTSETUPSTAT);
+
+	/* any pending ep0 transactions must be canceled */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	/* let functions handle vendor and class requests */
+	if ((ctl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+		struct usb_function *func;
+
+		/* Send stall if received interface number is invalid */
+		if (ctl.wIndex >= ui->next_ifc_num)
+			goto stall;
+
+		func = ui->func2ifc_map[ctl.wIndex];
+		if (func && func->setup) {
+			if (ctl.bRequestType & USB_DIR_IN) {
+				struct usb_request *req = ui->setup_req;
+				int ret = func->setup(&ctl,
+						req->buf, SETUP_BUF_SIZE,
+						func->context);
+				if (ret >= 0) {
+					req->length = ret;
+					ep0_setup_send(ui, ctl.wLength);
+					return;
+				}
+			} else {
+				int ret = func->setup(&ctl, NULL, 0,
+							func->context);
+				if (ret == 0) {
+					ep0_setup_ack(ui);
+					return;
+				} else if (ret > 0) {
+					ep0_setup_receive(ui, ret);
+					return;
+				}
+			}
+		}
+		goto stall;
+		return;
+	}
+
+	switch (ctl.bRequest) {
+	case USB_REQ_GET_STATUS:
+	{
+		struct usb_request *req = ui->setup_req;
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN))
+			break;
+		if (ctl.wLength != 2)
+			break;
+		req->length = 2;
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			struct usb_endpoint *ept;
+
+			if (num == 0)
+				break;
+			if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+				num += 16;
+			ept = ui->ept + num;
+			memcpy(req->buf, &ept->ept_halted, 2);
+			break;
+		}
+
+		case USB_RECIP_DEVICE:
+		{
+			unsigned short temp = 0;
+			if (usb_msm_get_selfpowered())
+				temp = 1 << USB_DEVICE_SELF_POWERED;
+			if (usb_msm_get_remotewakeup())
+				temp |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+			memcpy(req->buf, &temp, 2);
+			break;
+		}
+
+		case USB_RECIP_INTERFACE:
+			memset(req->buf, 0, 2);
+			break;
+		default:
+			printk(KERN_ERR "Unreconginized recipient\n");
+			break;
+		}
+
+		ep0_setup_send(ui, 2);
+		return;
+	}
+
+	case USB_REQ_GET_DESCRIPTOR:
+	{
+		struct usb_request *req;
+
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN))
+			break;
+
+		req = ui->setup_req;
+		if (!usb_find_descriptor(ui, &ctl, req)) {
+			if (req->length > ctl.wLength)
+				req->length = ctl.wLength;
+			ep0_setup_send(ui, ctl.wLength);
+			return;
+		}
+		break;
+	}
+
+	case USB_REQ_SET_FEATURE:
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT))
+			break;
+		if (ctl.wLength != 0)
+			break;
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_DEVICE:
+			if (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP) {
+				ui->remote_wakeup = 1;
+				ep0_setup_ack(ui);
+				return;
+			} else if (ctl.wValue == USB_DEVICE_TEST_MODE) {
+				if (ctl.wIndex & 0x0f)
+					break;
+				ui->test_mode = ctl.wIndex;
+				ep0_setup_ack(ui);
+				return;
+			}
+			break;
+
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			if ((num == 0) || (ctl.wValue != 0))
+				break;
+			if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+				num += 16;
+			usb_ept_set_halt(ui->ept + num);
+			ep0_setup_ack(ui);
+			return;
+		}
+
+		default:
+			pr_err("usb: %s: set_feature: unrecognized recipient\n",
+					__func__);
+			break;
+		}
+		break;
+
+	case USB_REQ_CLEAR_FEATURE:
+	{
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT))
+			break;
+		if (ctl.wLength != 0)
+			break;
+
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_DEVICE:
+			if (ctl.wValue != USB_DEVICE_REMOTE_WAKEUP)
+				break;
+			ui->remote_wakeup = 0;
+			ep0_setup_ack(ui);
+			return;
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num;
+			if (ctl.wValue != USB_ENDPOINT_HALT)
+				break;
+			num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			if (num != 0) {
+				if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+					num += 16;
+				usb_ept_clear_halt(ui->ept + num);
+			}
+			ep0_setup_ack(ui);
+			return;
+		}
+		default:
+			pr_info("unsupported clear feature command\n");
+			pr_info("Request-type:(%08x) wValue:(%08x) "
+					"wIndex:(%08x) wLength:(%08x)\n",
+						ctl.bRequestType, ctl.wValue,
+						ctl.wIndex, ctl.wLength);
+			break;
+		}
+		break;
+	}
+
+	case USB_REQ_SET_INTERFACE:
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+			!= (USB_DIR_OUT | USB_RECIP_INTERFACE))
+			break;
+		if (ui->func2ifc_map[ctl.wIndex]->set_interface) {
+			ui->func2ifc_map[ctl.wIndex]->set_interface(ctl.wIndex,
+					ctl.wValue,
+					ui->func2ifc_map[ctl.wIndex]->context);
+			ep0_setup_ack(ui);
+			return;
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		{
+		struct usb_function *f;
+		struct usb_request *req = ui->setup_req;
+		int ifc_num = ctl.wIndex;
+		int ret = 0;
+
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+					!= (USB_DIR_IN | USB_RECIP_INTERFACE))
+			break;
+
+		f = ui->func2ifc_map[ifc_num];
+		if (!f->get_interface)
+			break;
+		ret = f->get_interface(ifc_num,
+				ui->func2ifc_map[ifc_num]->context);
+		if (ret < 0)
+			break;
+		req->length = ctl.wLength;
+		memcpy(req->buf, &ret, req->length);
+		ep0_setup_send(ui, ctl.wLength);
+		return;
+		}
+	case USB_REQ_SET_CONFIGURATION:
+		if ((ctl.bRequestType & USB_DIR_MASK) != USB_DIR_OUT)
+			break;
+		ui->configured = ctl.wValue;
+		pr_info("hsusb set_configuration wValue = %d usbcmd = %x\n",
+						ctl.wValue, readl(USB_USBCMD));
+		set_configuration(ui, ctl.wValue);
+		ep0_setup_ack(ui);
+		ui->flags = USB_FLAG_CONFIGURE;
+		if (ui->configured)
+			ui->usb_state = USB_STATE_CONFIGURED;
+		queue_delayed_work(usb_work, &ui->work, 0);
+		return;
+
+	case USB_REQ_GET_CONFIGURATION:
+	{
+		unsigned conf;
+		struct usb_request *req = ui->setup_req;
+		req->length = 1;
+		conf = ui->configured;
+		memcpy(req->buf, &conf, req->length);
+		ep0_setup_send(ui, ctl.wLength);
+		return;
+	}
+
+	case USB_REQ_SET_ADDRESS:
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+			!= (USB_DIR_OUT | USB_RECIP_DEVICE))
+			break;
+		ui->usb_state = USB_STATE_ADDRESS;
+		writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR);
+		ep0_setup_ack(ui);
+		return;
+	}
+
+stall:
+	ep0_setup_stall(ui);
+	return;
+
+}
+
+static void handle_endpoint(struct usb_info *ui, unsigned bit)
+{
+	struct usb_endpoint *ept = ui->ept + bit;
+	struct msm_request *req;
+	unsigned long flags;
+	unsigned info;
+
+#if 0
+	printk(KERN_INFO "handle_endpoint() %d %s req=%p(%08x)\n",
+	       ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+	       ept->req, ept->req ? ept->req->item_dma : 0);
+#endif
+	if (!ept) {
+		pr_err("%s: ept is null: ep bit = %d\n", __func__, bit);
+		return;
+	}
+
+	/* expire all requests that are no longer active */
+	spin_lock_irqsave(&ui->lock, flags);
+	while ((req = ept->req)) {
+		/* clean speculative fetches on req->item->info */
+		dma_coherent_post_ops();
+		info = req->item->info;
+
+		/* if we've processed all live requests, time to
+		 * restart the hardware on the next non-live request
+		 */
+		if (!req->live) {
+			usb_ept_start(ept);
+			break;
+		}
+
+		/* if the transaction is still in-flight, stop here */
+		if (info & INFO_ACTIVE)
+			break;
+
+		/* advance ept queue to the next request */
+		ept->req = req->next;
+		if (ept->req == 0)
+			ept->last = 0;
+
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
+			/* XXX pass on more specific error code */
+			req->req.status = -EIO;
+			req->req.actual = 0;
+			printk(KERN_INFO "hsusb: ept %d %s error. info=%08x\n",
+				ept->num,
+				(ept->flags & EPT_FLAG_IN) ? "in" : "out",
+			       info);
+		} else {
+			req->req.status = 0;
+			req->req.actual = req->req.length - ((info >> 16) & 0x7FFF);
+		}
+		req->busy = 0;
+		req->live = 0;
+		if (req->dead)
+			do_free_req(ui, req);
+
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(ept, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint_hw(struct usb_info *ui, unsigned bits)
+{
+	/* flush endpoint, canceling transactions
+	** - this can take a "large amount of time" (per databook)
+	** - the flush can fail in some cases, thus we check STAT
+	**   and repeat if we're still operating
+	**   (does the fact that this doesn't use the tripwire matter?!)
+	*/
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return;
+	}
+
+	do {
+		writel(bits, USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & bits)
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & bits);
+}
+
+static void flush_endpoint_sw(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req, *next;
+	unsigned long flags;
+
+	/* inactive endpoints have nothing to do here */
+	if (!ui || !ept->alloced || !ept->max_pkt)
+		return;
+
+	/* put the queue head in a sane state */
+	ept->head->info = 0;
+	ept->head->next = TERMINATE;
+
+	/* cancel any pending requests */
+	spin_lock_irqsave(&ui->lock, flags);
+	req = ept->req;
+	ept->req = 0;
+	ept->last = 0;
+	while (req != 0) {
+		next = req->next;
+
+		req->busy = 0;
+		req->live = 0;
+		req->req.status = -ENODEV;
+		req->req.actual = 0;
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(ept, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+		if (req->dead)
+			do_free_req(ui, req);
+		req = req->next;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint(struct usb_endpoint *ept)
+{
+	if (!ept->ui)
+		return;
+
+	flush_endpoint_hw(ept->ui, (1 << ept->bit));
+	flush_endpoint_sw(ept);
+}
+
+static void flush_all_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	flush_endpoint_hw(ui, 0xffffffff);
+
+	for (n = 0; n < 32; n++)
+		flush_endpoint_sw(ui->ept + n);
+}
+
+#define HW_DELAY_FOR_LPM msecs_to_jiffies(1000)
+#define DELAY_FOR_USB_VBUS_STABILIZE msecs_to_jiffies(500)
+static irqreturn_t usb_interrupt(int irq, void *data)
+{
+	struct usb_info *ui = data;
+	unsigned n;
+	unsigned speed;
+
+	if (!ui->active)
+		return IRQ_HANDLED;
+
+	if (ui->in_lpm) {
+		usb_lpm_exit(ui);
+		return IRQ_HANDLED;
+	}
+
+	n = readl(USB_USBSTS);
+	writel(n, USB_USBSTS);
+
+	/* somehow we got an IRQ while in the reset sequence: ignore it */
+	if (ui->running == 0) {
+		pr_err("%s: ui->running is zero\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	if (n & STS_PCI) {
+		if (!(readl(USB_PORTSC) & PORTSC_PORT_RESET)) {
+			speed = (readl(USB_PORTSC) & PORTSC_PORT_SPEED_MASK);
+			switch (speed) {
+			case PORTSC_PORT_SPEED_HIGH:
+				pr_info("hsusb resume: speed = HIGH\n");
+				ui->speed = USB_SPEED_HIGH;
+				break;
+
+			case PORTSC_PORT_SPEED_FULL:
+				pr_info("hsusb resume: speed = FULL\n");
+				ui->speed = USB_SPEED_FULL;
+				break;
+
+			default:
+				pr_err("hsusb resume: Unknown Speed\n");
+				ui->speed = USB_SPEED_UNKNOWN;
+				break;
+			}
+		}
+
+		/* pci interrutpt would also be generated when resuming
+		 * from bus suspend, following check would avoid kick
+		 * starting usb main thread in case of pci interrupts
+		 * during enumeration
+		 */
+		if (ui->configured && ui->chg_type == USB_CHG_TYPE__SDP) {
+			ui->usb_state = USB_STATE_CONFIGURED;
+			ui->flags = USB_FLAG_RESUME;
+			queue_delayed_work(usb_work, &ui->work, 0);
+		}
+	}
+
+	if (n & STS_URI) {
+		pr_info("hsusb reset interrupt\n");
+		ui->usb_state = USB_STATE_DEFAULT;
+		ui->configured = 0;
+		schedule_work(&ui->chg_stop);
+
+		writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT);
+		writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE);
+		writel(0xffffffff, USB_ENDPTFLUSH);
+		writel(0, USB_ENDPTCTRL(1));
+
+		if (ui->online != 0) {
+			/* marking us offline will cause ept queue attempts to fail */
+			ui->online = 0;
+
+			flush_all_endpoints(ui);
+
+			/* XXX: we can't seem to detect going offline, so deconfigure
+			 * XXX: on reset for the time being
+			 */
+			set_configuration(ui, 0);
+		}
+	}
+
+	if (n & STS_SLI) {
+		pr_info("hsusb suspend interrupt\n");
+		ui->usb_state = USB_STATE_SUSPENDED;
+
+		/* stop usb charging */
+		schedule_work(&ui->chg_stop);
+	}
+
+	if (n & STS_UI) {
+		n = readl(USB_ENDPTSETUPSTAT);
+		if (n & EPT_RX(0))
+			handle_setup(ui);
+
+		n = readl(USB_ENDPTCOMPLETE);
+		writel(n, USB_ENDPTCOMPLETE);
+		while (n) {
+			unsigned bit = __ffs(n);
+			handle_endpoint(ui, bit);
+			n = n & (~(1 << bit));
+		}
+	}
+
+	n = readl(USB_OTGSC);
+	writel(n, USB_OTGSC);
+
+	if (n & OTGSC_BSVIS) {
+		/*Verify B Session Valid Bit to verify vbus status*/
+		if (B_SESSION_VALID & n)	{
+			pr_info("usb cable connected\n");
+			ui->usb_state = USB_STATE_POWERED;
+			ui->flags = USB_FLAG_VBUS_ONLINE;
+			/* Wait for 100ms to stabilize VBUS before initializing
+			 * USB and detecting charger type
+			 */
+			queue_delayed_work(usb_work, &ui->work, 0);
+		} else {
+			int i;
+
+			usb_disable_pullup(ui);
+
+			printk(KERN_INFO "usb cable disconnected\n");
+			ui->usb_state = USB_STATE_NOTATTACHED;
+			ui->configured = 0;
+			for (i = 0; i < ui->num_funcs; i++) {
+				struct usb_function_info *fi = ui->func[i];
+				if (!fi ||
+				!(ui->composition->functions & (1 << i)))
+					continue;
+				if (fi->func->disconnect)
+					fi->func->disconnect
+						(fi->func->context);
+			}
+			ui->flags = USB_FLAG_VBUS_OFFLINE;
+			queue_delayed_work(usb_work, &ui->work, 0);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void usb_prepare(struct usb_info *ui)
+{
+	memset(ui->buf, 0, 4096);
+	ui->head = (void *) (ui->buf + 0);
+
+	/* only important for reset/reinit */
+	memset(ui->ept, 0, sizeof(ui->ept));
+	ui->next_item = 0;
+	ui->speed = USB_SPEED_UNKNOWN;
+
+	init_endpoints(ui);
+
+	ui->ep0in.max_pkt = 64;
+	ui->ep0in.ui = ui;
+	ui->ep0in.alloced = 1;
+	ui->ep0out.max_pkt = 64;
+	ui->ep0out.ui = ui;
+	ui->ep0out.alloced = 1;
+
+	ui->setup_req = usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE);
+	ui->ep0out_req = usb_ept_alloc_req(&ui->ep0out, ui->ep0out.max_pkt);
+
+	INIT_WORK(&ui->chg_stop, usb_chg_stop);
+	INIT_WORK(&ui->li.wakeup_phy, usb_lpm_wakeup_phy);
+	INIT_DELAYED_WORK(&ui->work, usb_do_work);
+	INIT_DELAYED_WORK(&ui->chg_legacy_det, usb_chg_legacy_detect);
+}
+
+static int usb_is_online(struct usb_info *ui)
+{
+	/* continue lpm if bus is suspended or disconnected or stopped*/
+	if (((readl(USB_PORTSC) & PORTSC_SUSP) == PORTSC_SUSP) ||
+			((readl(USB_PORTSC) & PORTSC_CCS) == 0) ||
+			((readl(USB_USBCMD) & USBCMD_RS) == 0))
+		return 0;
+
+	pr_debug("usb is online\n");
+	pr_debug("usbcmd:(%08x) usbsts:(%08x) portsc:(%08x)\n",
+			readl(USB_USBCMD),
+			readl(USB_USBSTS),
+			readl(USB_PORTSC));
+	return -1;
+}
+
+static int usb_wakeup_phy(struct usb_info *ui)
+{
+	int i;
+
+	writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);
+
+	/* some circuits automatically clear PHCD bit */
+	for (i = 0; i < 5 && (readl(USB_PORTSC) & PORTSC_PHCD); i++) {
+		writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+		msleep(1);
+	}
+
+	if ((readl(USB_PORTSC) & PORTSC_PHCD)) {
+		pr_err("%s: cannot clear phcd bit\n", __func__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int usb_suspend_phy(struct usb_info *ui)
+{
+	int i;
+	unsigned long flags;
+
+	if (usb_is_online(ui))
+		return -1;
+
+	/* spec talks about following bits in LPM for external phy.
+	 * But they are ignored because
+	 * 1. disabling interface protection circuit: by disabling
+	 * interface protection curcuit we cannot come out
+	 * of lpm as async interrupts would be disabled
+	 * 2. setting the suspendM bit: this bit would be set by usb
+	 * controller once we set phcd bit.
+	 */
+	switch (PHY_TYPE(ui->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (!is_phy_45nm())
+			ulpi_read(ui, 0x14);
+
+		/* turn on/off otg comparators */
+		if (ui->vbus_sn_notif &&
+			ui->usb_state == USB_STATE_NOTATTACHED)
+			ulpi_write(ui, 0x00, 0x30);
+		else
+			ulpi_write(ui, 0x01, 0x30);
+
+		if (!is_phy_45nm())
+			ulpi_write(ui, 0x08, 0x09);
+
+		break;
+
+	case USB_PHY_UNDEFINED:
+		pr_err("%s: undefined phy type\n", __func__);
+		return -1;
+	}
+
+	/* loop for large amount of time */
+	for (i = 0; i < 500; i++) {
+		spin_lock_irqsave(&ui->lock, flags);
+		if (usb_is_online(ui)) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return -1;
+		}
+		/* set phy to be in lpm */
+		writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		msleep(1);
+		if (readl(USB_PORTSC) & PORTSC_PHCD)
+			goto blk_stp_sig;
+	}
+
+	if (!(readl(USB_PORTSC) & PORTSC_PHCD)) {
+		pr_err("unable to set phcd of portsc reg\n");
+		pr_err("Reset HW link and phy to recover from phcd error\n");
+		usb_hw_reset(ui);
+		return -1;
+	}
+
+	/* we have to set this bit again to work-around h/w bug */
+	writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+
+blk_stp_sig:
+	/* block the stop signal */
+	writel(readl(USB_USBCMD) | ULPI_STP_CTRL, USB_USBCMD);
+
+	return 0;
+}
+
+/* SW workarounds
+Issue#2		- Integrated PHY Calibration
+Symptom		- Electrical compliance failure in eye-diagram tests
+SW workaround		- Try to raise amplitude to 400mV
+
+Issue#3		- AHB Posted Writes
+Symptom		- USB stability
+SW workaround		- This programs xtor ON, BURST disabled and
+			unspecified length of INCR burst enabled
+*/
+static int usb_hw_reset(struct usb_info *ui)
+{
+	unsigned i;
+	struct msm_hsusb_platform_data *pdata;
+	unsigned long timeout;
+	unsigned val = 0;
+
+	pdata = ui->pdev->dev.platform_data;
+
+	clk_enable(ui->clk);
+	/* reset the phy before resetting link */
+	if (readl(USB_PORTSC) & PORTSC_PHCD)
+		usb_wakeup_phy(ui);
+	/* rpc call for phy_reset */
+	if (ui->pdata->phy_reset)
+		ui->pdata->phy_reset(ui->addr);
+	else
+		msm_hsusb_phy_reset();
+	/* Give some delay to settle phy after reset */
+	msleep(100);
+
+	/* RESET */
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	while (readl(USB_USBCMD) & USBCMD_RESET) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ui->pdev->dev, "usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	}
+
+	/* select DEVICE mode with SDIS active */
+	writel((USBMODE_SDIS | USBMODE_DEVICE), USB_USBMODE);
+	msleep(1);
+
+	/* select ULPI phy */
+	i = (readl(USB_PORTSC) & ~PORTSC_PTS);
+	writel(i | PORTSC_PTS_ULPI, USB_PORTSC);
+	/* set usb controller interrupt latency to zero*/
+	writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0),
+							USB_USBCMD);
+
+	/* If the target is 7x01 and roc version is > 1.2, set
+	 * the AHB mode to 2 for maximum performance, else set
+	 * it to 1, to bypass the AHB transactor for stability.
+	 */
+	if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+		if (pdata->soc_version >= SOC_ROC_2_0)
+			writel(0x02, USB_ROC_AHB_MODE);
+		else
+			writel(0x01, USB_ROC_AHB_MODE);
+	} else {
+		unsigned cfg_val;
+
+		/* Raise  amplitude to 400mV
+		 * SW workaround, Issue#2
+		 */
+		cfg_val = ulpi_read(ui, ULPI_CONFIG_REG);
+		cfg_val |= ULPI_AMPLITUDE_MAX;
+		ulpi_write(ui, cfg_val, ULPI_CONFIG_REG);
+
+		writel(0x0, USB_AHB_BURST);
+		writel(0x00, USB_AHB_MODE);
+	}
+
+	/* TBD: do we have to add DpRise, ChargerRise and
+	 * IdFloatRise for 45nm
+	 */
+	/* Disable VbusValid and SessionEnd comparators */
+	val = ULPI_VBUS_VALID | ULPI_SESS_END;
+
+	/* enable id interrupt only when transceiver is available */
+	if (ui->xceiv)
+		writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC);
+	else {
+		writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU,
+							USB_OTGSC);
+		ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR);
+		val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND;
+	}
+	ulpi_write(ui, val, ULPI_INT_RISE_CLR);
+	ulpi_write(ui, val, ULPI_INT_FALL_CLR);
+
+	/* we are just setting the pointer in the hwblock. Since the
+	 * endpoint isnt enabled the hw block doenst read the contents
+	 * of ui->dma - so we dont need a barrier here
+	 * */
+	writel(ui->dma, USB_ENDPOINTLISTADDR);
+
+	clk_disable(ui->clk);
+
+	return 0;
+}
+
+static void usb_reset(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->running = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+#if 0
+	/* we should flush and shutdown cleanly if already running */
+	writel(0xffffffff, USB_ENDPTFLUSH);
+	msleep(2);
+#endif
+
+	if (usb_hw_reset(ui)) {
+		pr_info("%s: h/w reset failed\n", __func__);
+		return;
+	}
+
+	usb_configure_endpoint(&ui->ep0in, NULL);
+	usb_configure_endpoint(&ui->ep0out, NULL);
+
+	/* marking us offline will cause ept queue attempts to fail */
+	ui->online = 0;
+
+	/* terminate any pending transactions */
+	flush_all_endpoints(ui);
+
+	set_configuration(ui, 0);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->running = 1;
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void usb_enable(void *handle, int enable)
+{
+	struct usb_info *ui = handle;
+	unsigned long flags;
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (enable) {
+		ui->flags |= USB_FLAG_RESET;
+		ui->active = 1;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_do_work(&ui->work.work);
+	} else {
+		ui->active = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_clk_disable(ui);
+		msm_hsusb_suspend_locks_acquire(ui, 0);
+	}
+}
+
+static struct msm_otg_ops dcd_ops = {
+	.request = usb_enable,
+};
+
+void usb_start(struct usb_info *ui)
+{
+	int i, ret;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1<<i)))
+			continue;
+		if (fi->enabled) {
+			pr_info("usb_bind_func() (%s)\n", fi->func->name);
+			fi->func->bind(fi->func->context);
+		}
+	}
+
+	ui->clk_enabled = 0;
+	ui->vreg_enabled = 0;
+
+	ui->xceiv = msm_otg_get_transceiver();
+	if (ui->xceiv) {
+		ui->flags = USB_FLAG_REG_OTG;
+		queue_delayed_work(usb_work, &ui->work, 0);
+	} else {
+		/*Initialize pm app RPC */
+		ret = msm_pm_app_rpc_init();
+		if (ret) {
+			pr_err("%s: pm_app_rpc connect failed\n", __func__);
+			goto out;
+		}
+		pr_info("%s: pm_app_rpc connect success\n", __func__);
+
+		ret = msm_pm_app_register_vbus_sn(&msm_hsusb_set_vbus_state);
+		if (ret) {
+			pr_err("%s:PMIC VBUS SN notif not supported\n", \
+					__func__);
+			msm_pm_app_rpc_deinit();
+			goto out;
+		}
+		pr_info("%s:PMIC VBUS SN notif supported\n", \
+					__func__);
+
+		ret = msm_pm_app_enable_usb_ldo(1);
+		if (ret) {
+			pr_err("%s: unable to turn on internal LDO", \
+					__func__);
+			msm_pm_app_unregister_vbus_sn(
+					&msm_hsusb_set_vbus_state);
+			msm_pm_app_rpc_deinit();
+			goto out;
+		}
+		ui->vbus_sn_notif = 1;
+out:
+		ui->active = 1;
+		ui->flags |= (USB_FLAG_START | USB_FLAG_RESET);
+		queue_delayed_work(usb_work, &ui->work, 0);
+	}
+
+}
+
+static LIST_HEAD(usb_function_list);
+static DEFINE_MUTEX(usb_function_list_lock);
+
+
+static struct usb_function_info *usb_find_function(const char *name)
+{
+	struct list_head *entry;
+	list_for_each(entry, &usb_function_list) {
+		struct usb_function_info *fi =
+			list_entry(entry, struct usb_function_info, list);
+		if (fi) {
+			if (!strcmp(name, fi->func->name))
+				return fi;
+		}
+	}
+
+	return NULL;
+}
+
+static void usb_try_to_bind(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long enabled_functions = 0;
+	int i;
+
+	if (!ui || ui->bound || !ui->pdev || !ui->composition)
+		return;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		if (ui->func[i])
+			enabled_functions |= (1 << i);
+	}
+	if ((enabled_functions & ui->composition->functions)
+					!= ui->composition->functions)
+		return;
+
+	usb_set_composition(ui->composition->product_id);
+	usb_configure_device_descriptor(ui);
+
+	/* we have found all the needed functions */
+	ui->bound = 1;
+	printk(KERN_INFO "msm_hsusb: functions bound. starting.\n");
+	usb_start(ui);
+}
+
+static int usb_get_function_index(const char *name)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		if (!strcmp(name, ui->functions_map[i].name))
+			return i;
+	}
+	return -1;
+}
+
+int usb_function_register(struct usb_function *driver)
+{
+	struct usb_info *ui = the_usb_info;
+	struct usb_function_info *fi;
+	int ret = 0;
+	int index;
+
+	mutex_lock(&usb_function_list_lock);
+
+	index = usb_get_function_index(driver->name);
+	if (index < 0) {
+		pr_err("%s: unsupported function = %s\n",
+				__func__, driver->name);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	fi = kzalloc(sizeof(*fi), GFP_KERNEL);
+	if (!fi) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fi->func = driver;
+	list_add(&fi->list, &usb_function_list);
+	ui->func[index] = fi;
+	fi->func->ep0_out_req = ui->ep0out_req;
+	fi->func->ep0_in_req = ui->setup_req;
+	fi->func->ep0_out = &ui->ep0out;
+	fi->func->ep0_in = &ui->ep0in;
+	pr_info("%s: name = '%s',  map = %d\n", __func__, driver->name, index);
+
+	usb_try_to_bind();
+fail:
+	mutex_unlock(&usb_function_list_lock);
+	return ret;
+}
+EXPORT_SYMBOL(usb_function_register);
+
+static unsigned short usb_validate_product_id(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!ui || !ui->pdata)
+		return -1;
+
+	/* set idProduct based on which functions are enabled */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].product_id == pid)
+			break;
+	}
+
+	if (i < ui->pdata->num_compositions) {
+		struct usb_composition *comp = &ui->pdata->compositions[i];
+		for (i = 0; i < ui->num_funcs; i++) {
+			if (comp->functions & (1 << i)) {
+				if (!ui->func[i]) {
+					pr_err("%s: func(%d) not available\n",
+								__func__, i);
+					return 0;
+				}
+			}
+		}
+		return comp->product_id;
+	} else
+		pr_err("%s: Product id (%x) is not supported\n", __func__, pid);
+	return 0;
+}
+
+static unsigned short usb_get_product_id(unsigned long enabled_functions)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!(ui && ui->pdata))
+		return -1;
+
+	/* set idProduct based on which functions are enabled */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].functions == enabled_functions)
+			return ui->pdata->compositions[i].product_id;
+	}
+	return 0;
+}
+
+static void usb_uninit(struct usb_info *ui)
+{
+	int i;
+
+	for (i = 0; i < ui->strdesc_index; i++)
+		kfree(ui->strdesc[i]);
+	ui->strdesc_index = 1;
+	ui->next_ifc_num = 0;
+}
+
+static unsigned short usb_set_composition(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!(ui && ui->pdata))
+		return 0;
+
+	/* Retrieve product id on enabled functions */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].product_id == pid) {
+			ui->composition = &ui->pdata->compositions[i];
+			for (i = 0; i < ui->num_funcs; i++) {
+				struct usb_function_info *fi = ui->func[i];
+				if (ui->func && fi && fi->func) {
+					fi->enabled = (ui->composition->
+							functions >> i) & 1;
+				}
+			}
+			pr_info("%s: composition set to product id = %x\n",
+				__func__, ui->composition->product_id);
+			return ui->composition->product_id;
+		}
+	}
+	pr_err("%s: product id (%x) not supported\n", __func__, pid);
+	return 0;
+}
+
+static void usb_switch_composition(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	unsigned long flags;
+
+
+	if (!ui->active)
+		return;
+	if (!usb_validate_product_id(pid))
+		return;
+
+	disable_irq(ui->irq);
+	if (cancel_delayed_work_sync(&ui->work))
+		pr_info("%s: Removed work successfully\n", __func__);
+	if (ui->running) {
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->running = 0;
+		ui->online = 0;
+		ui->bound = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		/* we should come out of lpm to access registers */
+		if (ui->in_lpm) {
+			if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+				disable_irq(ui->gpio_irq[0]);
+				disable_irq(ui->gpio_irq[1]);
+			}
+
+			if (ui->usb_state == USB_STATE_NOTATTACHED
+						&& ui->vbus_sn_notif)
+				msm_pm_app_enable_usb_ldo(1);
+
+			usb_lpm_exit(ui);
+			if (cancel_work_sync(&ui->li.wakeup_phy))
+				usb_lpm_wakeup_phy(NULL);
+			ui->in_lpm = 0;
+		}
+		/* disable usb and session valid interrupts */
+		writel(0, USB_USBINTR);
+		writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+
+		/* stop the controller */
+		usb_disable_pullup(ui);
+		ui->usb_state = USB_STATE_NOTATTACHED;
+		switch_set_state(&ui->sdev, 0);
+		/* Before starting again, wait for 300ms
+		 * to make sure host detects soft disconnection
+		 **/
+		msleep(300);
+	}
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !fi->func || !fi->enabled)
+			continue;
+		if (fi->func->configure)
+			fi->func->configure(0, fi->func->context);
+		if (fi->func->unbind)
+			fi->func->unbind(fi->func->context);
+	}
+
+	usb_uninit(ui);
+	usb_set_composition(pid);
+	usb_configure_device_descriptor(ui);
+
+	/* initialize functions */
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1 << i)))
+			continue;
+		if (fi->enabled) {
+			if (fi->func->bind)
+				fi->func->bind(fi->func->context);
+		}
+	}
+
+	ui->bound = 1;
+	ui->flags = USB_FLAG_RESET;
+	queue_delayed_work(usb_work, &ui->work, 0);
+	enable_irq(ui->irq);
+}
+
+void usb_function_enable(const char *function, int enable)
+{
+	struct usb_function_info *fi;
+	struct usb_info *ui = the_usb_info;
+	unsigned long functions_mask;
+	int curr_enable;
+	unsigned short pid;
+	int i;
+
+	if (!ui)
+		return;
+
+	pr_info("%s: name = %s, enable = %d\n", __func__, function, enable);
+
+	fi = usb_find_function(function);
+	if (!fi) {
+		pr_err("%s: function (%s) not registered with DCD\n",
+							__func__, function);
+		return;
+	}
+	if (fi->enabled == enable) {
+		pr_err("%s: function (%s) state is same\n",
+						__func__, function);
+		return;
+	}
+	functions_mask = 0;
+	curr_enable = fi->enabled;
+	fi->enabled = enable;
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (fi && fi->enabled)
+			functions_mask |= (1 << i);
+	}
+
+	pid = usb_get_product_id(functions_mask);
+	if (!pid) {
+		fi->enabled = curr_enable;
+		pr_err("%s: mask (%lx) not matching with any products\n",
+						__func__, functions_mask);
+		pr_err("%s: continuing with current composition\n", __func__);
+		return;
+	}
+	usb_switch_composition(pid);
+}
+EXPORT_SYMBOL(usb_function_enable);
+
+static int usb_free(struct usb_info *ui, int ret)
+{
+	disable_irq_wake(ui->irq);
+	free_irq(ui->irq, ui);
+	if (ui->gpio_irq[0])
+		free_irq(ui->gpio_irq[0], NULL);
+	if (ui->gpio_irq[1])
+		free_irq(ui->gpio_irq[1], NULL);
+
+	dma_pool_destroy(ui->pool);
+	dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma);
+	kfree(ui->func);
+	kfree(ui->strdesc);
+	iounmap(ui->addr);
+	clk_put(ui->clk);
+	clk_put(ui->pclk);
+	clk_put(ui->cclk);
+	msm_hsusb_suspend_locks_init(ui, 0);
+	kfree(ui);
+
+	return ret;
+}
+
+static int usb_vbus_is_on(struct usb_info *ui)
+{
+	unsigned tmp;
+
+	/* disable session valid raising and falling interrupts */
+	ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_C);
+	ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_C);
+
+	tmp = ulpi_read(ui, ULPI_USBINTR_STATUS);
+
+	/* enable session valid raising and falling interrupts */
+	ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_S);
+	ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_S);
+
+	if (tmp & (1 << 2))
+		return 1;
+	return 0;
+}
+static void usb_do_work(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, work.work);
+	unsigned long iflags;
+	unsigned long flags, ret;
+
+	for (;;) {
+		spin_lock_irqsave(&ui->lock, iflags);
+		flags = ui->flags;
+		ui->flags = 0;
+		spin_unlock_irqrestore(&ui->lock, iflags);
+
+		/* give up if we have nothing to do */
+		if (flags == 0)
+			break;
+
+		switch (ui->state) {
+		case USB_STATE_IDLE:
+			if (flags & USB_FLAG_REG_OTG) {
+				dcd_ops.handle = (void *) ui;
+				ret = ui->xceiv->set_peripheral(ui->xceiv,
+								&dcd_ops);
+				if (ret)
+					pr_err("%s: Can't register peripheral"
+						"driver with OTG", __func__);
+				break;
+			}
+			if ((flags & USB_FLAG_START) ||
+					(flags & USB_FLAG_RESET)) {
+				disable_irq(ui->irq);
+				if (ui->vbus_sn_notif)
+					msm_pm_app_enable_usb_ldo(1);
+				usb_clk_enable(ui);
+				usb_vreg_enable(ui);
+				usb_vbus_online(ui);
+
+				/* if VBUS is present move to ONLINE state
+				 * otherwise move to OFFLINE state
+				 */
+				if (usb_vbus_is_on(ui)) {
+					ui->usb_state = USB_STATE_POWERED;
+					msm_hsusb_suspend_locks_acquire(ui, 1);
+					ui->state = USB_STATE_ONLINE;
+					usb_enable_pullup(ui);
+					schedule_delayed_work(
+							&ui->chg_legacy_det,
+							USB_CHG_DET_DELAY);
+					pr_info("hsusb: IDLE -> ONLINE\n");
+				} else {
+					ui->usb_state = USB_STATE_NOTATTACHED;
+					ui->state = USB_STATE_OFFLINE;
+
+					msleep(500);
+					usb_lpm_enter(ui);
+					pr_info("hsusb: IDLE -> OFFLINE\n");
+					if (ui->vbus_sn_notif)
+						msm_pm_app_enable_usb_ldo(0);
+				}
+				enable_irq(ui->irq);
+				break;
+			}
+			goto reset;
+
+		case USB_STATE_ONLINE:
+			/* If at any point when we were online, we received
+			 * the signal to go offline, we must honor it
+			 */
+			if (flags & USB_FLAG_VBUS_OFFLINE) {
+				enum charger_type temp;
+				unsigned long f;
+
+				cancel_delayed_work_sync(&ui->chg_legacy_det);
+
+				spin_lock_irqsave(&ui->lock, f);
+				temp = ui->chg_type;
+				ui->chg_type = USB_CHG_TYPE__INVALID;
+				spin_unlock_irqrestore(&ui->lock, f);
+
+				if (temp != USB_CHG_TYPE__INVALID) {
+					/* re-acquire wakelock and restore axi
+					 * freq if they have been reduced by
+					 * charger work item
+					 */
+					msm_hsusb_suspend_locks_acquire(ui, 1);
+
+					msm_chg_usb_i_is_not_available();
+					msm_chg_usb_charger_disconnected();
+				}
+
+				/* reset usb core and usb phy */
+				disable_irq(ui->irq);
+				if (ui->in_lpm)
+					usb_lpm_exit(ui);
+				usb_vbus_offline(ui);
+				usb_lpm_enter(ui);
+				if ((ui->vbus_sn_notif) &&
+				(ui->usb_state == USB_STATE_NOTATTACHED))
+					msm_pm_app_enable_usb_ldo(0);
+				ui->state = USB_STATE_OFFLINE;
+				enable_irq(ui->irq);
+				switch_set_state(&ui->sdev, 0);
+				pr_info("hsusb: ONLINE -> OFFLINE\n");
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				ui->usb_state = USB_STATE_SUSPENDED;
+				usb_lpm_enter(ui);
+				msm_hsusb_suspend_locks_acquire(ui, 1);
+				break;
+			}
+			if ((flags & USB_FLAG_RESUME) ||
+					(flags & USB_FLAG_CONFIGURE)) {
+				int maxpower = usb_get_max_power(ui);
+
+				if (maxpower > 0)
+					msm_chg_usb_i_is_available(maxpower);
+
+				if (flags & USB_FLAG_CONFIGURE)
+					switch_set_state(&ui->sdev, 1);
+
+				break;
+			}
+			goto reset;
+
+		case USB_STATE_OFFLINE:
+			/* If we were signaled to go online and vbus is still
+			 * present when we received the signal, go online.
+			 */
+			if ((flags & USB_FLAG_VBUS_ONLINE)) {
+				msm_hsusb_suspend_locks_acquire(ui, 1);
+				disable_irq(ui->irq);
+				ui->state = USB_STATE_ONLINE;
+				if (ui->in_lpm)
+					usb_lpm_exit(ui);
+				usb_vbus_online(ui);
+				if (!(B_SESSION_VALID & readl(USB_OTGSC))) {
+					writel(((readl(USB_OTGSC) &
+						~OTGSC_INTR_STS_MASK) |
+						OTGSC_BSVIS), USB_OTGSC);
+					enable_irq(ui->irq);
+					goto reset;
+				}
+				usb_enable_pullup(ui);
+				schedule_delayed_work(
+						&ui->chg_legacy_det,
+						USB_CHG_DET_DELAY);
+				pr_info("hsusb: OFFLINE -> ONLINE\n");
+				enable_irq(ui->irq);
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				usb_lpm_enter(ui);
+				wake_unlock(&ui->wlock);
+				break;
+			}
+		default:
+reset:
+			/* For RESET or any unknown flag in a particular state
+			 * go to IDLE state and reset HW to bring to known state
+			 */
+			ui->flags = USB_FLAG_RESET;
+			ui->state = USB_STATE_IDLE;
+		}
+	}
+}
+
+void msm_hsusb_set_vbus_state(int online)
+{
+	struct usb_info *ui = the_usb_info;
+
+	if (ui && online) {
+		msm_pm_app_enable_usb_ldo(1);
+		usb_lpm_exit(ui);
+		/* Turn on PHY comparators */
+		if (!(ulpi_read(ui, 0x30) & 0x01))
+				ulpi_write(ui, 0x01, 0x30);
+	}
+}
+
+static irqreturn_t usb_lpm_gpio_isr(int irq, void *data)
+{
+	disable_irq(irq);
+
+	return IRQ_HANDLED;
+}
+
+static void usb_lpm_exit(struct usb_info *ui)
+{
+	if (ui->in_lpm == 0)
+		return;
+
+	if (usb_lpm_config_gpio)
+		usb_lpm_config_gpio(0);
+
+	wake_lock(&ui->wlock);
+	usb_clk_enable(ui);
+	usb_vreg_enable(ui);
+
+	writel(readl(USB_USBCMD) & ~ASYNC_INTR_CTRL, USB_USBCMD);
+	writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);
+
+	if (readl(USB_PORTSC) & PORTSC_PHCD) {
+		disable_irq(ui->irq);
+		schedule_work(&ui->li.wakeup_phy);
+	} else {
+		ui->in_lpm = 0;
+		if (ui->xceiv)
+			ui->xceiv->set_suspend(ui->xceiv, 0);
+	}
+	pr_info("%s(): USB exited from low power mode\n", __func__);
+}
+
+static int usb_lpm_enter(struct usb_info *ui)
+{
+	unsigned long flags;
+	unsigned connected;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_debug("already in lpm, nothing to do\n");
+		return 0;
+	}
+
+	if (usb_is_online(ui)) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: lpm procedure aborted\n", __func__);
+		return -1;
+	}
+
+	ui->in_lpm = 1;
+	if (ui->xceiv)
+		ui->xceiv->set_suspend(ui->xceiv, 1);
+	disable_irq(ui->irq);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (usb_suspend_phy(ui)) {
+		ui->in_lpm = 0;
+		ui->flags = USB_FLAG_RESET;
+		enable_irq(ui->irq);
+		pr_err("%s: phy suspend failed, lpm procedure aborted\n",
+				__func__);
+		return -1;
+	}
+
+	if ((B_SESSION_VALID & readl(USB_OTGSC)) &&
+				(ui->usb_state == USB_STATE_NOTATTACHED)) {
+		ui->in_lpm = 0;
+		writel(((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
+						OTGSC_BSVIS), USB_OTGSC);
+		ui->flags = USB_FLAG_VBUS_ONLINE;
+		ui->usb_state = USB_STATE_POWERED;
+		usb_wakeup_phy(ui);
+		enable_irq(ui->irq);
+		return -1;
+	}
+
+	/* enable async interrupt */
+	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL, USB_USBCMD);
+	connected = readl(USB_USBCMD) & USBCMD_RS;
+
+	usb_vreg_disable(ui);
+	usb_clk_disable(ui);
+
+	if (usb_lpm_config_gpio) {
+		if (usb_lpm_config_gpio(1)) {
+			spin_lock_irqsave(&ui->lock, flags);
+			usb_lpm_exit(ui);
+			spin_unlock_irqrestore(&ui->lock, flags);
+			enable_irq(ui->irq);
+			return -1;
+		}
+		enable_irq(ui->gpio_irq[0]);
+		enable_irq(ui->gpio_irq[1]);
+	}
+
+	enable_irq(ui->irq);
+	msm_hsusb_suspend_locks_acquire(ui, 0);
+	pr_info("%s: usb in low power mode\n", __func__);
+	return 0;
+}
+
+static void usb_enable_pullup(struct usb_info *ui)
+{
+	disable_irq(ui->irq);
+	writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR);
+	writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+	enable_irq(ui->irq);
+}
+
+/* SW workarounds
+Issue #1	- USB Spoof Disconnect Failure
+Symptom	- Writing 0 to run/stop bit of USBCMD doesn't cause disconnect
+SW workaround	- Making opmode non-driving and SuspendM set in function
+		register of SMSC phy
+*/
+static void usb_disable_pullup(struct usb_info *ui)
+{
+	disable_irq(ui->irq);
+	writel(readl(USB_USBINTR) & ~(STS_URI | STS_SLI | STS_UI | STS_PCI),
+			USB_USBINTR);
+	writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+
+	/* S/W workaround, Issue#1 */
+	if (!is_phy_external() && !is_phy_45nm())
+		ulpi_write(ui, 0x48, 0x04);
+
+	enable_irq(ui->irq);
+}
+
+static void usb_chg_stop(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	enum charger_type temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	temp = ui->chg_type;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__SDP)
+		msm_chg_usb_i_is_not_available();
+}
+
+static void usb_vbus_online(struct usb_info *ui)
+{
+	if (ui->in_lpm) {
+		if (usb_lpm_config_gpio)
+			usb_lpm_config_gpio(0);
+		usb_vreg_enable(ui);
+		usb_clk_enable(ui);
+		usb_wakeup_phy(ui);
+		ui->in_lpm = 0;
+	}
+
+	usb_reset(ui);
+}
+
+static void usb_vbus_offline(struct usb_info *ui)
+{
+	unsigned long timeout;
+	unsigned val = 0;
+
+	if (ui->online != 0) {
+		ui->online = 0;
+		flush_all_endpoints(ui);
+		set_configuration(ui, 0);
+	}
+
+	/* reset h/w at cable disconnetion becasuse
+	 * of h/w bugs and to flush any resource that
+	 * h/w might be holding
+	 */
+	clk_enable(ui->clk);
+
+	if (readl(USB_PORTSC) & PORTSC_PHCD)
+		usb_wakeup_phy(ui);
+
+	if (ui->pdata->phy_reset)
+		ui->pdata->phy_reset(ui->addr);
+	else
+		msm_hsusb_phy_reset();
+	/* Give some delay to settle phy after reset */
+	msleep(100);
+
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	while (readl(USB_USBCMD) & USBCMD_RESET) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ui->pdev->dev, "usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	}
+
+	/* Disable VbusValid and SessionEnd comparators */
+	val = ULPI_VBUS_VALID | ULPI_SESS_END;
+
+	/* enable id interrupt only when transceiver is available */
+	if (ui->xceiv)
+		writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC);
+	else {
+		writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU,
+							USB_OTGSC);
+		ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR);
+		val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND;
+	}
+	ulpi_write(ui, val, ULPI_INT_RISE_CLR);
+	ulpi_write(ui, val, ULPI_INT_FALL_CLR);
+
+	clk_disable(ui->clk);
+}
+
+static void usb_lpm_wakeup_phy(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	if (usb_wakeup_phy(ui)) {
+		pr_err("fatal error: cannot bring phy out of lpm\n");
+		pr_err("%s: resetting controller\n", __func__);
+
+		spin_lock_irqsave(&ui->lock, flags);
+		usb_disable_pullup(ui);
+		ui->flags = USB_FLAG_RESET;
+		queue_delayed_work(usb_work, &ui->work, 0);
+		enable_irq(ui->irq);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+
+	ui->in_lpm = 0;
+	if (ui->xceiv)
+		ui->xceiv->set_suspend(ui->xceiv, 0);
+	enable_irq(ui->irq);
+}
+
+void usb_function_reenumerate(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	/* disable and re-enable the D+ pullup */
+	pr_info("hsusb: disable pullup\n");
+	usb_disable_pullup(ui);
+
+	msleep(10);
+
+	pr_info("hsusb: enable pullup\n");
+	usb_enable_pullup(ui);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	struct usb_endpoint *ept;
+	struct msm_request *req;
+	int n;
+	int i = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "regs: setup=%08x prime=%08x stat=%08x done=%08x\n",
+		       readl(USB_ENDPTSETUPSTAT),
+		       readl(USB_ENDPTPRIME),
+		       readl(USB_ENDPTSTAT),
+		       readl(USB_ENDPTCOMPLETE));
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "regs:   cmd=%08x   sts=%08x intr=%08x port=%08x\n\n",
+		       readl(USB_USBCMD),
+		       readl(USB_USBSTS),
+		       readl(USB_USBINTR),
+		       readl(USB_PORTSC));
+
+
+	for (n = 0; n < 32; n++) {
+		ept = ui->ept + n;
+		if (ept->max_pkt == 0)
+			continue;
+
+		i += scnprintf(buf + i, PAGE_SIZE - i,
+			       "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n",
+			       ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
+			       ept->head->config, ept->head->active,
+			       ept->head->next, ept->head->info);
+
+		for (req = ept->req; req; req = req->next)
+			i += scnprintf(buf + i, PAGE_SIZE - i,
+				       "  req @%08x next=%08x info=%08x page0=%08x %c %c\n",
+				       req->item_dma, req->item->next,
+				       req->item->info, req->item->page0,
+				       req->busy ? 'B' : ' ',
+				       req->live ? 'L' : ' '
+				);
+	}
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "phy failure count: %d\n", ui->phy_fail_count);
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_RESET;
+	queue_delayed_work(usb_work, &ui->work, 0);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+
+
+static ssize_t debug_write_cycle(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	usb_function_reenumerate();
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_stat_ops = {
+	.open = debug_open,
+	.read = debug_read_status,
+};
+
+
+
+const struct file_operations debug_reset_ops = {
+	.open = debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_cycle_ops = {
+	.open = debug_open,
+	.write = debug_write_cycle,
+};
+
+static struct dentry *debugfs_dent;
+static struct dentry *debugfs_status;
+static struct dentry *debugfs_reset;
+static struct dentry *debugfs_cycle;
+static void usb_debugfs_init(struct usb_info *ui)
+{
+	debugfs_dent = debugfs_create_dir("usb", 0);
+	if (IS_ERR(debugfs_dent))
+		return;
+
+	debugfs_status = debugfs_create_file("status", 0444,
+				debugfs_dent, ui, &debug_stat_ops);
+	debugfs_reset = debugfs_create_file("reset", 0222,
+				debugfs_dent, ui, &debug_reset_ops);
+	debugfs_cycle = debugfs_create_file("cycle", 0222,
+				debugfs_dent, ui, &debug_cycle_ops);
+}
+
+static void usb_debugfs_uninit(void)
+{
+	debugfs_remove(debugfs_status);
+	debugfs_remove(debugfs_reset);
+	debugfs_remove(debugfs_cycle);
+	debugfs_remove(debugfs_dent);
+}
+
+#else
+static void usb_debugfs_init(struct usb_info *ui) {}
+static void usb_debugfs_uninit(void) {}
+#endif
+
+static void usb_configure_device_descriptor(struct usb_info *ui)
+{
+	desc_device.idVendor = ui->pdata->vendor_id;
+	desc_device.idProduct = ui->composition->product_id;
+	desc_device.bcdDevice = ui->pdata->version;
+
+	if (ui->pdata->serial_number)
+		desc_device.iSerialNumber =
+			usb_msm_get_next_strdesc_id(ui->pdata->serial_number);
+	if (ui->pdata->product_name)
+		desc_device.iProduct =
+			usb_msm_get_next_strdesc_id(ui->pdata->product_name);
+	if (ui->pdata->manufacturer_name)
+		desc_device.iManufacturer =
+			usb_msm_get_next_strdesc_id(
+				ui->pdata->manufacturer_name);
+
+	/* Send Serial number to A9 for software download */
+	if (ui->pdata->serial_number) {
+		msm_hsusb_is_serial_num_null(FALSE);
+		msm_hsusb_send_serial_number(ui->pdata->serial_number);
+	} else
+		msm_hsusb_is_serial_num_null(TRUE);
+
+	msm_hsusb_send_productID(desc_device.idProduct);
+
+}
+static ssize_t msm_hsusb_store_func_enable(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	char name[20];
+	int enable = 0;
+	int i;
+
+	for (i = 0; buf[i] != 0; i++) {
+		if (buf[i] == '=')
+			break;
+		name[i] = buf[i];
+	}
+	name[i++] = 0;
+	if (buf[i] == '0' || buf[i] == '1')
+		enable = buf[i] - '0';
+	else
+		return size;
+
+	pr_info("%s: name = %s, enable = %d\n", __func__, name, enable);
+	usb_function_enable(name, enable);
+	return size;
+}
+static ssize_t msm_hsusb_show_compswitch(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (ui->composition)
+		i = scnprintf(buf, PAGE_SIZE,
+				"composition product id = %x\n",
+					ui->composition->product_id);
+	else
+		i = scnprintf(buf, PAGE_SIZE,
+				"composition product id = 0\n");
+	return i;
+}
+
+static ssize_t msm_hsusb_store_compswitch(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	unsigned long pid;
+
+	if (!strict_strtoul(buf, 16, &pid)) {
+		pr_info("%s: Requested New Product id = %lx\n", __func__, pid);
+		usb_switch_composition((unsigned short)pid);
+	} else
+		pr_info("%s: strict_strtoul conversion failed\n", __func__);
+
+	return size;
+}
+static ssize_t msm_hsusb_store_autoresume(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	usb_remote_wakeup();
+
+	return size;
+}
+
+static ssize_t msm_hsusb_show_state(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED",
+			"USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED",
+			"USB_STATE_RECONNECTING", "USB_STATE_DEFAULT",
+			"USB_STATE_ADDRESS", "USB_STATE_CONFIGURED",
+			"USB_STATE_SUSPENDED"
+	};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", state[ui->usb_state]);
+	return i;
+}
+
+static ssize_t msm_hsusb_show_lpm(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	i = scnprintf(buf, PAGE_SIZE, "%d\n", ui->in_lpm);
+	return i;
+}
+
+static ssize_t msm_hsusb_show_speed(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW",
+			"USB_SPEED_FULL", "USB_SPEED_HIGH"};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->speed]);
+	return i;
+}
+
+static DEVICE_ATTR(composition, 0664,
+		msm_hsusb_show_compswitch, msm_hsusb_store_compswitch);
+static DEVICE_ATTR(func_enable, S_IWUSR,
+		NULL, msm_hsusb_store_func_enable);
+static DEVICE_ATTR(autoresume, 0222,
+		NULL, msm_hsusb_store_autoresume);
+static DEVICE_ATTR(state, 0664, msm_hsusb_show_state, NULL);
+static DEVICE_ATTR(lpm, 0664, msm_hsusb_show_lpm, NULL);
+static DEVICE_ATTR(speed, 0664, msm_hsusb_show_speed, NULL);
+
+static struct attribute *msm_hsusb_attrs[] = {
+	&dev_attr_composition.attr,
+	&dev_attr_func_enable.attr,
+	&dev_attr_autoresume.attr,
+	&dev_attr_state.attr,
+	&dev_attr_lpm.attr,
+	&dev_attr_speed.attr,
+	NULL,
+};
+static struct attribute_group msm_hsusb_attr_grp = {
+	.attrs = msm_hsusb_attrs,
+};
+
+#define msm_hsusb_func_attr(function, index)				\
+static ssize_t  show_##function(struct device *dev,			\
+		struct device_attribute *attr, char *buf)		\
+{									\
+	struct usb_info *ui = the_usb_info;				\
+	struct usb_function_info *fi = ui->func[index];			\
+									\
+	return sprintf(buf, "%d", fi->enabled);				\
+									\
+}									\
+									\
+static DEVICE_ATTR(function, S_IRUGO, show_##function, NULL);
+
+msm_hsusb_func_attr(diag, 0);
+msm_hsusb_func_attr(adb, 1);
+msm_hsusb_func_attr(modem, 2);
+msm_hsusb_func_attr(nmea, 3);
+msm_hsusb_func_attr(mass_storage, 4);
+msm_hsusb_func_attr(ethernet, 5);
+msm_hsusb_func_attr(rmnet, 6);
+
+static struct attribute *msm_hsusb_func_attrs[] = {
+	&dev_attr_diag.attr,
+	&dev_attr_adb.attr,
+	&dev_attr_modem.attr,
+	&dev_attr_nmea.attr,
+	&dev_attr_mass_storage.attr,
+	&dev_attr_ethernet.attr,
+	&dev_attr_rmnet.attr,
+	NULL,
+};
+
+static struct attribute_group msm_hsusb_func_attr_grp = {
+	.name  = "functions",
+	.attrs = msm_hsusb_func_attrs,
+};
+
+static int __init usb_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct usb_info *ui;
+	int irq;
+	int ulpi_irq1 = 0;
+	int ulpi_irq2 = 0;
+	int i;
+	int ret = 0;
+
+	if (!pdev || !pdev->dev.platform_data) {
+		pr_err("%s:pdev or platform data is null\n", __func__);
+		return -ENODEV;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		pr_err("%s: failed to get irq num from platform_get_irq\n",
+				__func__);
+		return -ENODEV;
+	}
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("%s: failed to get mem resource\n", __func__);
+		return -ENODEV;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_attr_grp);
+	if (ret) {
+		pr_err("%s: unable to create sysfs group\n", __func__);
+		return ret;
+	}
+
+	usb_work = create_singlethread_workqueue("usb_work");
+	if (!usb_work) {
+		pr_err("%s: unable to create work queue\n", __func__);
+		ret = -ENOMEM;
+		goto free_sysfs_grp;
+	}
+
+	ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
+	if (!ui) {
+		pr_err("%s: unable to allocate memory for ui\n", __func__);
+		ret = -ENOMEM;
+		goto free_workqueue;
+	}
+
+	ui->pdev = pdev;
+	ui->pdata = pdev->dev.platform_data;
+
+	for (i = 0; i < ui->pdata->num_compositions; i++)
+		if (ui->pdata->compositions[i].product_id == pid) {
+			ui->composition = &ui->pdata->compositions[i];
+			break;
+		}
+	if (!ui->composition) {
+		pr_err("%s: unable to find the composition with pid:(%d)\n",
+				__func__, pid);
+		ret = -ENODEV;
+		goto free_ui;
+	}
+
+	ui->phy_info = ui->pdata->phy_info;
+	if (ui->phy_info == USB_PHY_UNDEFINED) {
+		pr_err("undefined phy_info: (%d)\n", ui->phy_info);
+		ret = -ENOMEM;
+		goto free_ui;
+	}
+
+	/* zero is reserved for language id */
+	ui->strdesc_index = 1;
+	ui->strdesc = kzalloc(sizeof(char *) * MAX_STRDESC_NUM, GFP_KERNEL);
+	if (!ui->strdesc) {
+		pr_err("%s: unable allocate mem for string descriptors\n",
+				__func__);
+		ret = -ENOMEM;
+		goto free_ui;
+	}
+
+	ui->num_funcs = ui->pdata->num_functions;
+	ui->func = kzalloc(sizeof(struct usb_function *) * ui->num_funcs,
+				GFP_KERNEL);
+	if (!ui->func) {
+		pr_err("%s: unable allocate mem for functions\n", __func__);
+		ret = -ENOMEM;
+		goto free_str_desc;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+	if (ret) {
+		pr_err("%s: unable to create functions sysfs group\n",
+				__func__);
+		goto free_func;
+	}
+
+	ui->addr = ioremap(res->start, resource_size(res));
+	if (!ui->addr) {
+		pr_err("%s: unable ioremap\n", __func__);
+		ret = -ENOMEM;
+		goto free_func_sysfs_grp;
+	}
+
+	ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
+	if (!ui->buf) {
+		pr_err("%s: failed allocate dma coherent memory\n", __func__);
+		ret = -ENOMEM;
+		goto free_iounmap;
+	}
+
+	ui->pool = dma_pool_create("hsusb", NULL, 32, 32, 0);
+	if (!ui->pool) {
+		pr_err("%s: unable to allocate dma pool\n", __func__);
+		ret = -ENOMEM;
+		goto free_dma_coherent;
+	}
+
+	ui->clk = clk_get(&pdev->dev, "usb_hs_clk");
+	if (IS_ERR(ui->clk)) {
+		pr_err("%s: unable get usb_hs_clk\n", __func__);
+		ret = PTR_ERR(ui->clk);
+		goto free_dma_pool;
+	}
+
+	ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk");
+	if (IS_ERR(ui->pclk)) {
+		pr_err("%s: unable get usb_hs_pclk\n", __func__);
+		ret = PTR_ERR(ui->pclk);
+		goto free_hs_clk;
+	}
+
+	if (ui->pdata->core_clk) {
+		ui->cclk = clk_get(&pdev->dev, "usb_hs_core_clk");
+		if (IS_ERR(ui->cclk)) {
+			pr_err("%s: unable get usb_hs_core_clk\n", __func__);
+			ret = PTR_ERR(ui->cclk);
+			goto free_hs_pclk;
+		}
+	}
+
+	if (ui->pdata->vreg5v_required) {
+		ui->vreg = vreg_get(NULL, "boost");
+		if (IS_ERR(ui->vreg)) {
+			pr_err("%s: vreg get failed\n", __func__);
+			ui->vreg = NULL;
+			ret = PTR_ERR(ui->vreg);
+			goto free_hs_cclk;
+		}
+	}
+
+	/* disable interrupts before requesting irq */
+	usb_clk_enable(ui);
+	writel(0, USB_USBINTR);
+	writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC);
+	usb_clk_disable(ui);
+
+	ret = request_irq(irq, usb_interrupt, IRQF_SHARED, pdev->name, ui);
+	if (ret) {
+		pr_err("%s: request_irq failed\n", __func__);
+		goto free_vreg5v;
+	}
+	ui->irq = irq;
+
+	if (ui->pdata->config_gpio) {
+		usb_lpm_config_gpio = ui->pdata->config_gpio;
+
+		ulpi_irq1 = platform_get_irq_byname(pdev, "vbus_interrupt");
+		if (ulpi_irq1 < 0) {
+			pr_err("%s: failed to get vbus gpio interrupt\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		ulpi_irq2 = platform_get_irq_byname(pdev, "id_interrupt");
+		if (ulpi_irq2 < 0) {
+			pr_err("%s: failed to get id gpio interrupt\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		ret = request_irq(ulpi_irq1,
+				&usb_lpm_gpio_isr,
+				IRQF_TRIGGER_HIGH,
+				"vbus_interrupt", NULL);
+		if (ret) {
+			pr_err("%s: failed to request vbus interrupt:(%d)\n",
+					__func__, ulpi_irq1);
+			goto free_irq;
+		}
+
+		ret = request_irq(ulpi_irq2,
+				&usb_lpm_gpio_isr,
+				IRQF_TRIGGER_RISING,
+				"usb_ulpi_data3", NULL);
+		if (ret) {
+			pr_err("%s: failed to request irq ulpi_data_3:(%d)\n",
+							__func__, ulpi_irq2);
+			goto free_ulpi_irq1;
+		}
+
+		ui->gpio_irq[0] = ulpi_irq1;
+		ui->gpio_irq[1] = ulpi_irq2;
+	}
+
+	ui->sdev.name = DRIVER_NAME;
+	ui->sdev.print_name = print_switch_name;
+	ui->sdev.print_state = print_switch_state;
+
+	ret = switch_dev_register(&ui->sdev);
+	if (ret < 0) {
+		pr_err("%s(): switch_dev_register failed ret = %d\n",
+				__func__, ret);
+		goto free_ulpi_irq2;
+	}
+
+	the_usb_info = ui;
+	ui->functions_map = ui->pdata->function_map;
+	ui->selfpowered = 0;
+	ui->remote_wakeup = 0;
+	ui->maxpower = 0xFA;
+	ui->chg_type = USB_CHG_TYPE__INVALID;
+	/* to allow swfi latency, driver latency
+	 * must be above listed swfi latency
+	 */
+	ui->pdata->swfi_latency += 1;
+
+	spin_lock_init(&ui->lock);
+	msm_hsusb_suspend_locks_init(ui, 1);
+	enable_irq_wake(irq);
+
+	/* memory barrier initialization in non-interrupt context */
+	dmb();
+
+	usb_debugfs_init(ui);
+	usb_prepare(ui);
+
+	pr_info("%s: io=%p, irq=%d, dma=%p(%x)\n",
+			__func__, ui->addr, ui->irq, ui->buf, ui->dma);
+	return 0;
+
+free_ulpi_irq2:
+	free_irq(ulpi_irq2, NULL);
+free_ulpi_irq1:
+	free_irq(ulpi_irq1, NULL);
+free_irq:
+	free_irq(ui->irq, ui);
+free_vreg5v:
+	if (ui->pdata->vreg5v_required)
+		vreg_put(ui->vreg);
+free_hs_cclk:
+	clk_put(ui->cclk);
+free_hs_pclk:
+	clk_put(ui->pclk);
+free_hs_clk:
+	clk_put(ui->clk);
+free_dma_pool:
+	dma_pool_destroy(ui->pool);
+free_dma_coherent:
+	dma_free_coherent(&pdev->dev, 4096, ui->buf, ui->dma);
+free_iounmap:
+	iounmap(ui->addr);
+free_func_sysfs_grp:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+free_func:
+	kfree(ui->func);
+free_str_desc:
+	kfree(ui->strdesc);
+free_ui:
+	kfree(ui);
+free_workqueue:
+	destroy_workqueue(usb_work);
+free_sysfs_grp:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_attr_grp);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int usb_platform_suspend(struct platform_device *pdev,
+		pm_message_t state)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (!ui->active) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: peripheral mode is not active"
+				"nothing to be done\n", __func__);
+		return 0;
+	}
+
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: we are already in lpm, nothing to be done\n",
+					__func__);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	ret = usb_lpm_enter(ui);
+	if (ret)
+		pr_err("%s: failed to enter lpm\n", __func__);
+
+	return ret;
+}
+#endif
+
+static struct platform_driver usb_driver = {
+	.probe = usb_probe,
+#ifdef CONFIG_PM
+	.suspend = usb_platform_suspend,
+#endif
+	.driver = { .name = DRIVER_NAME, },
+};
+
+static int __init usb_module_init(void)
+{
+	/* rpc connect for phy_reset */
+	msm_hsusb_rpc_connect();
+	/* rpc connect for charging */
+	msm_chg_rpc_connect();
+
+	return platform_driver_register(&usb_driver);
+}
+
+static void free_usb_info(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	int i;
+	if (ui) {
+		INIT_LIST_HEAD(&usb_function_list);
+
+		for (i = 0; i < ui->num_funcs; i++)
+			kfree(ui->func[i]);
+		ui->num_funcs = 0;
+		usb_uninit(ui);
+		kfree(ui->strdesc);
+		usb_ept_free_req(&ui->ep0in, ui->setup_req);
+		if (ui->ept[0].ui == ui)
+			flush_all_endpoints(ui);
+		spin_lock_irqsave(&ui->lock, flags);
+		usb_clk_disable(ui);
+		usb_vreg_disable(ui);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_free(ui, 0);
+		the_usb_info = NULL;
+	}
+}
+static void usb_exit(void)
+{
+	struct usb_info *ui = the_usb_info;
+	/* free the dev state structure */
+	if (!ui)
+		return;
+
+	if (ui->xceiv) {
+		ui->xceiv->set_peripheral(ui->xceiv, NULL);
+		msm_otg_put_transceiver(ui->xceiv);
+	}
+
+	cancel_work_sync(&ui->li.wakeup_phy);
+
+	destroy_workqueue(usb_work);
+	/* free the usb_info structure */
+	free_usb_info();
+	switch_dev_unregister(&ui->sdev);
+	sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+	sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_attr_grp);
+	usb_debugfs_uninit();
+	platform_driver_unregister(&usb_driver);
+	msm_hsusb_rpc_close();
+	msm_chg_rpc_close();
+	msm_pm_app_unregister_vbus_sn(&msm_hsusb_set_vbus_state);
+	msm_pm_app_rpc_deinit();
+}
+
+static void __exit usb_module_exit(void)
+{
+	usb_exit();
+}
+
+module_param(pid, int, 0);
+MODULE_PARM_DESC(pid, "Product ID of the desired composition");
+
+module_init(usb_module_init);
+module_exit(usb_module_exit);
+
+static void copy_string_descriptor(char *string, char *buffer)
+{
+	int length, i;
+
+	if (string) {
+		length = strlen(string);
+		buffer[0] = 2 * length + 2;
+		buffer[1] = USB_DT_STRING;
+		for (i = 0; i < length; i++) {
+			buffer[2 * i + 2] = string[i];
+			buffer[2 * i + 3] = 0;
+		}
+	}
+}
+static int get_qualifier_descriptor(struct usb_qualifier_descriptor *dq)
+{
+	struct usb_qualifier_descriptor *dev_qualifier = dq;
+	dev_qualifier->bLength = sizeof(struct usb_qualifier_descriptor),
+	dev_qualifier->bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+	dev_qualifier->bcdUSB =  __constant_cpu_to_le16(0x0200),
+	dev_qualifier->bDeviceClass = USB_CLASS_PER_INTERFACE,
+	dev_qualifier->bDeviceSubClass = 0;
+	dev_qualifier->bDeviceProtocol = 0;
+	dev_qualifier->bMaxPacketSize0 = 64;
+	dev_qualifier->bNumConfigurations = 1;
+	dev_qualifier->bRESERVED = 0;
+	return sizeof(struct usb_qualifier_descriptor);
+}
+
+static int usb_fill_descriptors(void *ptr,
+		struct usb_descriptor_header **descriptors)
+{
+	unsigned char *buf = ptr;
+	struct usb_descriptor_header *item = descriptors[0];
+	unsigned cnt = 0;
+
+	while (NULL != item) {
+		unsigned len = item->bLength;
+		memcpy(buf, item, len);
+		buf += len;
+		cnt++;
+		item = descriptors[cnt];
+	}
+
+	return buf-(u8 *)ptr;
+}
+
+static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl,
+				struct usb_request *req)
+{
+	int i;
+	unsigned short id = ctl->wValue;
+	unsigned short type = id >> 8;
+	id &= 0xff;
+
+	if ((type == USB_DT_DEVICE) && (id == 0)) {
+		req->length = sizeof(desc_device);
+		if (usb_msm_is_iad()) {
+			desc_device.bDeviceClass = 0xEF;
+			desc_device.bDeviceSubClass = 0x02;
+			desc_device.bDeviceProtocol = 0x01;
+		}
+		memcpy(req->buf, &desc_device, req->length);
+		return 0;
+	}
+	if ((type == USB_DT_DEVICE_QUALIFIER) && (id == 0)) {
+		struct usb_qualifier_descriptor dq;
+		req->length = get_qualifier_descriptor(&dq);
+		if (usb_msm_is_iad()) {
+			dq.bDeviceClass = 0xEF;
+			dq.bDeviceSubClass = 0x02;
+			dq.bDeviceProtocol = 0x01;
+		}
+		memcpy(req->buf, &dq, req->length);
+		return 0;
+	}
+
+	if ((type == USB_DT_OTHER_SPEED_CONFIG) && (id == 0))
+		goto get_config;
+
+	if ((type == USB_DT_CONFIG) && (id == 0)) {
+		struct usb_config_descriptor cfg;
+		unsigned ifc_count = 0;
+		char *ptr, *start;
+get_config:
+		ifc_count = 0;
+		start = req->buf;
+		ptr = start + USB_DT_CONFIG_SIZE;
+		ifc_count = ui->next_ifc_num;
+
+		for (i = 0; i < ui->num_funcs; i++) {
+			struct usb_function_info *fi = ui->func[i];
+			struct usb_descriptor_header **dh = NULL;
+
+			if (!fi || !(ui->composition->functions & (1 << i)))
+				continue;
+			switch (ui->speed) {
+			case USB_SPEED_HIGH:
+				if (type == USB_DT_OTHER_SPEED_CONFIG)
+					dh = fi->func->fs_descriptors;
+				else
+					dh = fi->func->hs_descriptors;
+				break;
+
+			case USB_SPEED_FULL:
+				if (type == USB_DT_OTHER_SPEED_CONFIG)
+					dh = fi->func->hs_descriptors;
+				else
+					dh = fi->func->fs_descriptors;
+				break;
+
+			default:
+				printk(KERN_ERR "Unsupported speed(%x)\n",
+						ui->speed);
+				return -1;
+			}
+			ptr += usb_fill_descriptors(ptr, dh);
+		}
+
+#define	USB_REMOTE_WAKEUP_SUPPORT	1
+		cfg.bLength = USB_DT_CONFIG_SIZE;
+		if (type == USB_DT_OTHER_SPEED_CONFIG)
+			cfg.bDescriptorType =  USB_DT_OTHER_SPEED_CONFIG;
+		else
+			cfg.bDescriptorType = USB_DT_CONFIG;
+		cfg.wTotalLength = ptr - start;
+		cfg.bNumInterfaces = ifc_count;
+		cfg.bConfigurationValue = 1;
+		cfg.iConfiguration = 0;
+		cfg.bmAttributes = USB_CONFIG_ATT_ONE |
+			ui->selfpowered << USB_CONFIG_ATT_SELFPOWER_POS |
+			USB_REMOTE_WAKEUP_SUPPORT << USB_CONFIG_ATT_WAKEUP_POS;
+		cfg.bMaxPower = ui->maxpower;
+
+		memcpy(start, &cfg, USB_DT_CONFIG_SIZE);
+
+		req->length = ptr - start;
+		return 0;
+	}
+
+	if (type == USB_DT_STRING) {
+		char *buffer = req->buf;
+
+		buffer[0] = 0;
+		if (id > ui->strdesc_index)
+			return -1;
+		 if (id == STRING_LANGUAGE_ID)
+			memcpy(buffer, str_lang_desc, str_lang_desc[0]);
+		 else
+			copy_string_descriptor(ui->strdesc[id], buffer);
+
+		if (buffer[0]) {
+			req->length = buffer[0];
+			return 0;
+		} else
+			return -1;
+	}
+	return -1;
+}
+
+/*****Gadget Framework Functions***/
+struct device *usb_get_device(void)
+{
+	if (the_usb_info) {
+		if (the_usb_info->pdev)
+			return &(the_usb_info->pdev->dev);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(usb_get_device);
+
+int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	struct usb_info 	*ui = the_usb_info;
+	struct msm_request      *req = to_msm_request(_req);
+	struct msm_request 	*temp_req, *prev_req;
+	unsigned long		flags;
+
+	if (!(ui && req && ept->req))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (req->busy) {
+		req->req.status = 0;
+		req->busy = 0;
+
+		/* See if the request is the first request in the ept queue */
+		if (ept->req == req) {
+			/* Stop the transfer */
+			do {
+				writel((1 << ept->bit), USB_ENDPTFLUSH);
+				while (readl(USB_ENDPTFLUSH) & (1 << ept->bit))
+					udelay(100);
+			} while (readl(USB_ENDPTSTAT) & (1 << ept->bit));
+			if (!req->next)
+				ept->last = NULL;
+			ept->req = req->next;
+			ept->head->next = req->item->next;
+			goto cancel_req;
+		}
+		/* Request could be in the middle of ept queue */
+		prev_req = temp_req = ept->req;
+		do {
+			if (req == temp_req) {
+				if (req->live) {
+					/* Stop the transfer */
+					do {
+						writel((1 << ept->bit),
+							USB_ENDPTFLUSH);
+						while (readl(USB_ENDPTFLUSH) &
+							(1 << ept->bit))
+							udelay(100);
+					} while (readl(USB_ENDPTSTAT) &
+						(1 << ept->bit));
+				}
+				prev_req->next = temp_req->next;
+				prev_req->item->next = temp_req->item->next;
+				if (!req->next)
+					ept->last = prev_req;
+				goto cancel_req;
+			}
+			prev_req = temp_req;
+			temp_req = temp_req->next;
+		} while (temp_req != NULL);
+		goto error;
+cancel_req:
+	if (req->live) {
+		/* prepare the transaction descriptor item for the hardware */
+		req->item->next = TERMINATE;
+		req->item->info = 0;
+		req->live = 0;
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		/* Reprime the endpoint for the remaining transfers */
+		if (ept->req) {
+			temp_req = ept->req;
+			while (temp_req != NULL) {
+				temp_req->live = 0;
+				temp_req = temp_req->next;
+			}
+			usb_ept_start(ept);
+		}
+	} else
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+	}
+error:
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(usb_ept_cancel_xfer);
+
+int usb_ept_set_halt(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return -1;
+	}
+
+	ept->ept_halted = 1;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in)
+		n |= CTRL_TXS;
+	else
+		n |= CTRL_RXS;
+
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_set_halt);
+
+int usb_ept_clear_halt(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return -1;
+	}
+
+	if (ept->ept_halted)
+		ept->ept_halted = 0;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	/*clear stall bit and set data toggle bit*/
+	if (in) {
+		n &= (~CTRL_TXS);
+		n |= (CTRL_TXR);
+	} else {
+		n &= ~(CTRL_RXS);
+		n |= (CTRL_RXR);
+	}
+
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_clear_halt);
+
+int usb_ept_is_stalled(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in && (n & CTRL_TXS))
+		return 1;
+	else if (n & CTRL_RXS)
+		return 1;
+	return 0;
+}
+
+void usb_ept_fifo_flush(struct usb_endpoint *ept)
+{
+	flush_endpoint(ept);
+}
+EXPORT_SYMBOL(usb_ept_fifo_flush);
+
+struct usb_function *usb_ept_get_function(struct usb_endpoint *ept)
+{
+	return NULL;
+}
+EXPORT_SYMBOL(usb_ept_get_function);
+
+
+void usb_free_endpoint_all_req(struct usb_endpoint *ep)
+{
+	struct msm_request *temp;
+	struct msm_request *req;
+	if (!ep)
+		return;
+	req = ep->req;
+	while (req) {
+		temp = req->next;
+		req->busy = 0;
+		if (&req->req)
+			usb_ept_free_req(ep, &req->req);
+		req = temp;
+	}
+}
+EXPORT_SYMBOL(usb_free_endpoint_all_req);
+
+int usb_function_unregister(struct usb_function *func)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	struct usb_function_info *fi;
+	unsigned long flags;
+
+	if (!func)
+		return -EINVAL;
+
+	fi = usb_find_function(func->name);
+	if (!fi)
+		return -EINVAL;
+
+	if (ui->running) {
+		disable_irq(ui->irq);
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->running = 0;
+		ui->online = 0;
+		ui->bound = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_uninit(ui);
+		/* we should come out of lpm to access registers */
+		if (ui->in_lpm) {
+			if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+				disable_irq(ui->gpio_irq[0]);
+				disable_irq(ui->gpio_irq[1]);
+			}
+			usb_lpm_exit(ui);
+			if (cancel_work_sync(&ui->li.wakeup_phy))
+				usb_lpm_wakeup_phy(NULL);
+			ui->in_lpm = 0;
+		}
+		/* disable usb and session valid interrupts */
+		writel(0, USB_USBINTR);
+		writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+
+		/* stop the controller */
+		usb_disable_pullup(ui);
+		msleep(100);
+		enable_irq(ui->irq);
+	}
+
+	pr_info("%s: func->name = %s\n", __func__, func->name);
+
+	ui->composition = NULL;
+
+	if (func->configure)
+		func->configure(0, func->context);
+	if (func->unbind)
+		func->unbind(func->context);
+
+	list_del(&fi->list);
+	for (i = 0; i < ui->num_funcs; i++)
+		if (fi == ui->func[i])
+			ui->func[i] = NULL;
+	kfree(fi);
+	return 0;
+}
+EXPORT_SYMBOL(usb_function_unregister);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/function/msm_hsusb_hw.h b/drivers/usb/function/msm_hsusb_hw.h
new file mode 100644
index 0000000..c016c3f
--- /dev/null
+++ b/drivers/usb/function/msm_hsusb_hw.h
@@ -0,0 +1,163 @@
+/* drivers/usb/function/msm_hsusb_hw.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _USB_FUNCTION_MSM_HSUSB_HW_H
+#define _USB_FUNCTION_MSM_HSUSB_HW_H
+
+#define USB_ID               (MSM_USB_BASE + 0x0000)
+#define USB_HWGENERAL        (MSM_USB_BASE + 0x0004)
+#define USB_HWHOST           (MSM_USB_BASE + 0x0008)
+#define USB_HWDEVICE         (MSM_USB_BASE + 0x000C)
+#define USB_HWTXBUF          (MSM_USB_BASE + 0x0010)
+#define USB_HWRXBUF          (MSM_USB_BASE + 0x0014)
+#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
+
+#define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
+#define USB_HCIVERSION       (MSM_USB_BASE + 0x0102) /* 16 bit */
+#define USB_HCSPARAMS        (MSM_USB_BASE + 0x0104)
+#define USB_HCCPARAMS        (MSM_USB_BASE + 0x0108)
+#define USB_DCIVERSION       (MSM_USB_BASE + 0x0120) /* 16 bit */
+#define USB_USBCMD           (MSM_USB_BASE + 0x0140)
+#define USB_USBSTS           (MSM_USB_BASE + 0x0144)
+#define USB_USBINTR          (MSM_USB_BASE + 0x0148)
+#define USB_FRINDEX          (MSM_USB_BASE + 0x014C)
+#define USB_DEVICEADDR       (MSM_USB_BASE + 0x0154)
+#define USB_ENDPOINTLISTADDR (MSM_USB_BASE + 0x0158)
+#define USB_BURSTSIZE        (MSM_USB_BASE + 0x0160)
+#define USB_TXFILLTUNING     (MSM_USB_BASE + 0x0164)
+#define USB_ULPI_VIEWPORT    (MSM_USB_BASE + 0x0170)
+#define USB_ENDPTNAK         (MSM_USB_BASE + 0x0178)
+#define USB_ENDPTNAKEN       (MSM_USB_BASE + 0x017C)
+#define USB_PORTSC           (MSM_USB_BASE + 0x0184)
+#define USB_OTGSC            (MSM_USB_BASE + 0x01A4)
+#define USB_USBMODE          (MSM_USB_BASE + 0x01A8)
+#define USB_ENDPTSETUPSTAT   (MSM_USB_BASE + 0x01AC)
+#define USB_ENDPTPRIME       (MSM_USB_BASE + 0x01B0)
+#define USB_ENDPTFLUSH       (MSM_USB_BASE + 0x01B4)
+#define USB_ENDPTSTAT        (MSM_USB_BASE + 0x01B8)
+#define USB_ENDPTCOMPLETE    (MSM_USB_BASE + 0x01BC)
+#define USB_ENDPTCTRL(n)     (MSM_USB_BASE + 0x01C0 + (4 * (n)))
+
+
+#define USBCMD_RESET   2
+#define USBCMD_ATTACH  1
+#define USBCMD_ATDTW   (1 << 14)
+
+#define USBMODE_DEVICE 2
+#define USBMODE_HOST   3
+
+struct ept_queue_head
+{
+    unsigned config;
+    unsigned active; /* read-only */
+
+    unsigned next;
+    unsigned info;
+    unsigned page0;
+    unsigned page1;
+    unsigned page2;
+    unsigned page3;
+    unsigned page4;
+    unsigned reserved_0;
+
+    unsigned char setup_data[8];
+
+    unsigned reserved_1;
+    unsigned reserved_2;
+    unsigned reserved_3;
+    unsigned reserved_4;
+};
+
+#define CONFIG_MAX_PKT(n)     ((n) << 16)
+#define CONFIG_ZLT            (1 << 29)    /* stop on zero-len xfer */
+#define CONFIG_IOS            (1 << 15)    /* IRQ on setup */
+
+struct ept_queue_item
+{
+    unsigned next;
+    unsigned info;
+    unsigned page0;
+    unsigned page1;
+    unsigned page2;
+    unsigned page3;
+    unsigned page4;
+    unsigned reserved;
+};
+
+#define TERMINATE 1
+
+#define INFO_BYTES(n)         ((n) << 16)
+#define INFO_IOC              (1 << 15)
+#define INFO_ACTIVE           (1 << 7)
+#define INFO_HALTED           (1 << 6)
+#define INFO_BUFFER_ERROR     (1 << 5)
+#define INFO_TXN_ERROR        (1 << 3)
+
+
+#define STS_NAKI              (1 << 16)  /* */
+#define STS_SLI               (1 << 8)   /* R/WC - suspend state entered */
+#define STS_SRI               (1 << 7)   /* R/WC - SOF recv'd */
+#define STS_URI               (1 << 6)   /* R/WC - RESET recv'd - write to clear */
+#define STS_FRI               (1 << 3)   /* R/WC - Frame List Rollover */
+#define STS_PCI               (1 << 2)   /* R/WC - Port Change Detect */
+#define STS_UEI               (1 << 1)   /* R/WC - USB Error */
+#define STS_UI                (1 << 0)   /* R/WC - USB Transaction Complete */
+
+
+/* bits used in all the endpoint status registers */
+#define EPT_TX(n) (1 << ((n) + 16))
+#define EPT_RX(n) (1 << (n))
+
+
+#define CTRL_TXE              (1 << 23)
+#define CTRL_TXR              (1 << 22)
+#define CTRL_TXI              (1 << 21)
+#define CTRL_TXD              (1 << 17)
+#define CTRL_TXS              (1 << 16)
+#define CTRL_RXE              (1 << 7)
+#define CTRL_RXR              (1 << 6)
+#define CTRL_RXI              (1 << 5)
+#define CTRL_RXD              (1 << 1)
+#define CTRL_RXS              (1 << 0)
+
+#define CTRL_TXT_MASK         (3 << 18)
+#define CTRL_TXT_CTRL         (0 << 18)
+#define CTRL_TXT_ISOCH        (1 << 18)
+#define CTRL_TXT_BULK         (2 << 18)
+#define CTRL_TXT_INT          (3 << 18)
+
+#define CTRL_RXT_MASK         (3 << 2)
+#define CTRL_RXT_CTRL         (0 << 2)
+#define CTRL_RXT_ISOCH        (1 << 2)
+#define CTRL_RXT_BULK         (2 << 2)
+#define CTRL_RXT_INT          (3 << 2)
+
+#define ULPI_WAKEUP           (1 << 31)
+#define ULPI_RUN              (1 << 30)
+#define ULPI_WRITE            (1 << 29)
+#define ULPI_READ             (0 << 29)
+#define ULPI_STATE_NORMAL     (1 << 27)
+#define ULPI_ADDR(n)          (((n) & 255) << 16)
+#define ULPI_DATA(n)          ((n) & 255)
+#define ULPI_DATA_READ(n)     (((n) >> 8) & 255)
+
+/* USB_PORTSC bits for determining port speed */
+#define PORTSC_PSPD_FS        (0 << 26)
+#define PORTSC_PSPD_LS        (1 << 26)
+#define PORTSC_PSPD_HS        (2 << 26)
+#define PORTSC_PSPD_MASK      (3 << 26)
+
+#endif
diff --git a/drivers/usb/function/msm_otg.c b/drivers/usb/function/msm_otg.c
new file mode 100644
index 0000000..c931290
--- /dev/null
+++ b/drivers/usb/function/msm_otg.c
@@ -0,0 +1,368 @@
+/* drivers/usb/otg/msm_otg.c
+ *
+ * OTG Driver for HighSpeed USB
+ *
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/msm_otg.h>
+#include <mach/msm_hsusb.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/board.h>
+
+#define MSM_USB_BASE (xceiv->regs)
+
+#define A_HOST 0
+#define B_DEVICE 1
+#define A_TO_B 0
+#define B_TO_A 1
+
+static struct msm_otg_transceiver *xceiv;
+
+struct msm_otg_transceiver *msm_otg_get_transceiver(void)
+{
+	if (xceiv)
+		get_device(xceiv->dev);
+	return xceiv;
+}
+EXPORT_SYMBOL(msm_otg_get_transceiver);
+
+void msm_otg_put_transceiver(struct msm_otg_transceiver *xceiv)
+{
+	if (xceiv)
+		put_device(xceiv->dev);
+}
+EXPORT_SYMBOL(msm_otg_put_transceiver);
+
+static void msm_otg_set_clk(int on)
+{
+	if (on) {
+		clk_enable(xceiv->clk);
+		clk_enable(xceiv->pclk);
+	} else {
+		clk_disable(xceiv->clk);
+		clk_disable(xceiv->pclk);
+	}
+}
+
+static inline int is_host(void)
+{
+	int ret;
+
+	ret = (OTGSC_ID & readl(USB_OTGSC)) ? 0 : 1;
+	return ret;
+}
+
+static void msm_otg_enable(void)
+{
+	msm_otg_set_clk(1);
+	/* Enable ID interrupts */
+	writel(readl(USB_OTGSC) | OTGSC_IDIE, USB_OTGSC);
+
+	if (is_host()) {
+		pr_info("%s: configuring USB in host mode\n", __func__);
+		xceiv->hcd_ops->request(xceiv->hcd_ops->handle, REQUEST_START);
+		xceiv->state = A_HOST;
+	} else {
+		pr_info("%s: configuring USB in device mode\n", __func__);
+		xceiv->dcd_ops->request(xceiv->dcd_ops->handle, REQUEST_START);
+		xceiv->state = B_DEVICE;
+	}
+	msm_otg_set_clk(0);
+	xceiv->active = 1;
+	wake_lock_timeout(&xceiv->wlock, HZ/2);
+	enable_irq(xceiv->irq);
+}
+
+static void msm_otg_disable(int mode)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&xceiv->lock, flags);
+	xceiv->active = 0;
+	spin_unlock_irqrestore(&xceiv->lock, flags);
+
+	pr_info("%s: OTG is disabled\n", __func__);
+
+	if (mode != xceiv->state)
+		return;
+	switch (mode) {
+	case A_HOST:
+		if (xceiv->state == A_HOST) {
+			pr_info("%s: configuring USB in device mode\n",
+					__func__);
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = B_DEVICE;
+		}
+		break;
+	case B_DEVICE:
+		if (xceiv->state == B_DEVICE) {
+			pr_info("%s: configuring USB in host mode\n",
+					__func__);
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = A_HOST;
+		}
+		break;
+	}
+
+}
+
+static void msm_otg_do_work(struct work_struct *w)
+{
+	switch (xceiv->state) {
+	case A_HOST:
+		if (xceiv->flags == A_TO_B) {
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_STOP);
+			pr_info("%s: configuring USB in device mode\n",
+					__func__);
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = B_DEVICE;
+		}
+		break;
+	case B_DEVICE:
+		if (xceiv->flags == B_TO_A) {
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_STOP);
+			pr_info("%s: configuring USB in host mode\n",
+					__func__);
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = A_HOST;
+		}
+		break;
+	}
+	wake_lock_timeout(&xceiv->wlock, HZ/2);
+	enable_irq(xceiv->irq);
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+	u32 otgsc;
+	u32 temp;
+
+	if (!xceiv->active)
+		return IRQ_HANDLED;
+
+	if (xceiv->in_lpm)
+		return IRQ_HANDLED;
+
+	otgsc = readl(USB_OTGSC);
+	temp = otgsc & ~OTGSC_INTR_STS_MASK;
+	if (otgsc & OTGSC_IDIS) {
+		wake_lock(&xceiv->wlock);
+		if (is_host()) {
+			xceiv->flags = B_TO_A;
+			schedule_work(&xceiv->work);
+		} else {
+			xceiv->flags = A_TO_B;
+			schedule_work(&xceiv->work);
+		}
+		disable_irq(xceiv->irq);
+		writel(temp | OTGSC_IDIS, USB_OTGSC);
+	}
+
+	return IRQ_HANDLED;
+
+}
+
+static DEFINE_MUTEX(otg_register_lock);
+
+static int msm_otg_set_peripheral(struct msm_otg_transceiver *xceiv,
+					struct msm_otg_ops *ops)
+{
+	int ret = 0;
+
+	mutex_lock(&otg_register_lock);
+	if (!xceiv) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (!ops) {
+		xceiv->dcd_ops = NULL;
+		pr_info("%s: Peripheral driver is deregistered with OTG\n",
+				__func__);
+		msm_otg_disable(B_DEVICE);
+		goto unlock;
+	}
+	if (xceiv->dcd_ops) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	xceiv->dcd_ops = ops;
+	xceiv->dcd_ops->request(xceiv->dcd_ops->handle, REQUEST_STOP);
+	if (xceiv->hcd_ops)
+		msm_otg_enable();
+unlock:
+	mutex_unlock(&otg_register_lock);
+	return ret;
+}
+
+static int msm_otg_set_host(struct msm_otg_transceiver *xceiv,
+				struct msm_otg_ops *hcd_ops)
+{
+	int ret = 0;
+
+	mutex_lock(&otg_register_lock);
+	if (!xceiv) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (!hcd_ops) {
+		xceiv->hcd_ops = NULL;
+		pr_info("%s: Host driver is deregistered with OTG\n",
+				__func__);
+		msm_otg_disable(A_HOST);
+		goto unlock;
+	}
+	if (xceiv->hcd_ops) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	xceiv->hcd_ops = hcd_ops;
+	xceiv->hcd_ops->request(xceiv->hcd_ops->handle, REQUEST_STOP);
+	if (xceiv->dcd_ops)
+		msm_otg_enable();
+
+unlock:
+	mutex_unlock(&otg_register_lock);
+	return ret;
+}
+
+static int msm_otg_set_suspend(struct msm_otg_transceiver *otg, int suspend)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&xceiv->lock, flags);
+	xceiv->in_lpm = suspend;
+	spin_unlock_irqrestore(&xceiv->lock, flags);
+	return 0;
+}
+
+static int __init msm_otg_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+	xceiv = kzalloc(sizeof(struct msm_otg_transceiver), GFP_KERNEL);
+	if (!xceiv)
+		return -ENOMEM;
+
+	xceiv->clk = clk_get(NULL, "usb_hs_clk");
+	if (IS_ERR(xceiv->clk)) {
+		ret = PTR_ERR(xceiv->clk);
+		goto free_xceiv;
+	}
+	xceiv->pclk = clk_get(NULL, "usb_hs_pclk");
+	if (IS_ERR(xceiv->clk)) {
+		ret = PTR_ERR(xceiv->pclk);
+		goto put_clk;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		goto put_pclk;
+	}
+
+	xceiv->regs = ioremap(res->start, resource_size(res));
+	if (!xceiv->regs) {
+		ret = -ENOMEM;
+		goto put_pclk;
+	}
+	xceiv->irq = platform_get_irq(pdev, 0);
+	if (!xceiv->irq) {
+		ret = -ENODEV;
+		goto free_regs;
+	}
+
+	/* disable interrupts before requesting irq */
+	msm_otg_set_clk(1);
+	writel(0, USB_USBINTR);
+	writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC);
+	msm_otg_set_clk(0);
+
+	ret = request_irq(xceiv->irq, msm_otg_irq, IRQF_SHARED,
+					"msm_otg", pdev);
+	if (ret)
+		goto free_regs;
+	disable_irq(xceiv->irq);
+
+	INIT_WORK(&xceiv->work, msm_otg_do_work);
+	spin_lock_init(&xceiv->lock);
+	wake_lock_init(&xceiv->wlock, WAKE_LOCK_SUSPEND, "usb_otg");
+	wake_lock(&xceiv->wlock);
+
+	xceiv->set_host = msm_otg_set_host;
+	xceiv->set_peripheral = msm_otg_set_peripheral;
+	xceiv->set_suspend = msm_otg_set_suspend;
+
+	return 0;
+free_regs:
+	iounmap(xceiv->regs);
+put_pclk:
+	clk_put(xceiv->pclk);
+put_clk:
+	clk_put(xceiv->clk);
+free_xceiv:
+	kfree(xceiv);
+	return ret;
+
+}
+
+static int __exit msm_otg_remove(struct platform_device *pdev)
+{
+	cancel_work_sync(&xceiv->work);
+	free_irq(xceiv->irq, pdev);
+	iounmap(xceiv->regs);
+	clk_put(xceiv->pclk);
+	clk_put(xceiv->clk);
+	kfree(xceiv);
+	return 0;
+}
+
+static struct platform_driver msm_otg_driver = {
+	.remove = __exit_p(msm_otg_remove),
+	.driver = {
+		.name = "msm_hsusb_otg",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_otg_init(void)
+{
+	return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
+}
+
+static void __exit msm_otg_exit(void)
+{
+	platform_driver_unregister(&msm_otg_driver);
+}
+
+subsys_initcall(msm_otg_init);
+module_exit(msm_otg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM USB OTG driver");
+MODULE_VERSION("1.00");
diff --git a/drivers/usb/function/null.c b/drivers/usb/function/null.c
new file mode 100644
index 0000000..68f1e35
--- /dev/null
+++ b/drivers/usb/function/null.c
@@ -0,0 +1,118 @@
+/* driver/usb/function/null.c
+ *
+ * Null Function Device - A Data Sink
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct null_context
+{
+	struct usb_endpoint *out;
+	struct usb_request *req0;
+	struct usb_request *req1;
+};
+
+static struct null_context _context;
+
+static void null_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	ctxt->out = ept[0];
+	printk(KERN_INFO "null_bind() %p\n", ctxt->out);
+
+	ctxt->req0 = usb_ept_alloc_req(ctxt->out, 4096);
+	ctxt->req1 = usb_ept_alloc_req(ctxt->out, 4096);
+}
+
+static void null_unbind(void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_unbind()\n");
+	if (ctxt->req0) {
+		usb_ept_free_req(ctxt->out, ctxt->req0);
+		ctxt->req0 = 0;
+	}
+	if (ctxt->req1) {
+		usb_ept_free_req(ctxt->out, ctxt->req1);
+		ctxt->req1 = 0;
+	}
+	ctxt->out = 0;
+}
+
+
+static void null_queue_out(struct null_context *ctxt, struct usb_request *req);
+
+static void null_out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct null_context *ctxt = req->context;
+	unsigned char *data = req->buf;
+
+	if (req->status != -ENODEV)
+		null_queue_out(ctxt, req);
+}
+
+static void null_queue_out(struct null_context *ctxt, struct usb_request *req)
+{
+	req->complete = null_out_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->out, req);
+}
+
+static void null_configure(int configured, void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_configure() %d\n", configured);
+
+	if (configured) {
+		null_queue_out(ctxt, ctxt->req0);
+		null_queue_out(ctxt, ctxt->req1);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_null = {
+	.bind = null_bind,
+	.unbind = null_unbind,
+	.configure = null_configure,
+
+	.name = "null",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xfe,
+	.ifc_protocol = 0x01,
+
+	.ifc_name = "null",
+
+	.ifc_ept_count = 1,
+	.ifc_ept_type = { EPT_BULK_OUT },
+};
+
+static int __init null_init(void)
+{
+	printk(KERN_INFO "null_init()\n");
+	usb_function_register(&usb_func_null);
+	return 0;
+}
+
+module_init(null_init);
diff --git a/drivers/usb/function/rmnet.c b/drivers/usb/function/rmnet.c
new file mode 100644
index 0000000..e618ec0
--- /dev/null
+++ b/drivers/usb/function/rmnet.c
@@ -0,0 +1,1086 @@
+/*
+ * rmnet.c -- RmNet function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+
+#include <mach/msm_smd.h>
+#include <linux/usb/cdc.h>
+
+#include "usb_function.h"
+
+static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
+module_param(rmnet_ctl_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
+
+static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
+module_param(rmnet_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+#define QMI_REQ_MAX		4
+#define QMI_REQ_SIZE		2048
+#define QMI_RESP_MAX		8
+#define QMI_RESP_SIZE		2048
+
+#define RX_REQ_MAX		8
+#define RX_REQ_SIZE		2048
+#define TX_REQ_MAX		8
+#define TX_REQ_SIZE		2048
+
+#define TXN_MAX 		2048
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+/* QMI requests & responses buffer*/
+struct qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+/* Control & data SMD channel private data */
+struct rmnet_smd_info {
+	struct smd_channel 	*ch;
+	struct tasklet_struct	tx_tlet;
+	struct tasklet_struct	rx_tlet;
+#define CH_OPENED	0
+	unsigned long		flags;
+	/* pending rx packet length */
+	atomic_t		rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t	wait;
+};
+
+struct rmnet_dev {
+	struct usb_endpoint	*epout;
+	struct usb_endpoint	*epin;
+	struct usb_endpoint	*epnotify;
+	struct usb_request 	*notify_req;
+
+	u8			ifc_id;
+	/* QMI lists */
+	struct list_head	qmi_req_pool;
+	struct list_head	qmi_resp_pool;
+	struct list_head	qmi_req_q;
+	struct list_head	qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head 	tx_idle;
+	struct list_head 	rx_idle;
+	struct list_head	rx_queue;
+
+	spinlock_t		lock;
+	atomic_t		online;
+	atomic_t		notify_count;
+
+	struct rmnet_smd_info	smd_ctl;
+	struct rmnet_smd_info	smd_data;
+
+	struct workqueue_struct *wq;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+};
+
+static struct usb_function rmnet_function;
+
+struct qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+{
+	struct qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+void rmnet_free_qmi(struct qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+struct usb_request *
+rmnet_alloc_req(struct usb_endpoint *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ept_alloc_req(ep, 0);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ept_free_req(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+void rmnet_free_req(struct usb_endpoint *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ept_free_req(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		pr_err("%s: rmnet notify ep error %d\n", __func__, status);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != dev->epnotify)
+			break;
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			pr_err("%s: rmnet notify ep enqueue error %d\n",
+					__func__, status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		pr_err("%s: rmnet notify ep enqueue error %d\n",
+				__func__, status);
+	}
+}
+
+/* TODO
+ * handle modem restart events
+ */
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_smd_info *smd_info = priv;
+	int len = atomic_read(&smd_info->rx_pkt);
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static void rmnet_control_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct qmi_buf *qmi_resp;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(dev->smd_ctl.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_ctl.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_resp_pool)) {
+			pr_err("%s: rmnet QMI Tx buffers full\n", __func__);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		qmi_resp = list_first_entry(&dev->qmi_resp_pool,
+				struct qmi_buf, list);
+		list_del(&qmi_resp->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_response_available(dev);
+	}
+
+}
+
+static void rmnet_control_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct qmi_buf *qmi_req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+
+		if (list_empty(&dev->qmi_req_q)) {
+			atomic_set(&dev->smd_ctl.rx_pkt, 0);
+			break;
+		}
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+				struct qmi_buf, list);
+		if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
+			atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
+			pr_debug("%s: rmnet control smd channel full\n",
+					__func__);
+			break;
+		}
+
+		list_del(&qmi_req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != qmi_req->len) {
+			pr_err("%s: rmnet control smd write failed\n",
+					__func__);
+			break;
+		}
+
+		list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_command_complete(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_function *func = &rmnet_function;
+	struct usb_request *in_req;
+	struct qmi_buf *qmi_req;
+	int ret;
+
+	if (req->status < 0) {
+		pr_err("%s: rmnet command error %d\n", __func__, req->status);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	/* no pending control rx packet */
+	if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
+		if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
+			atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			pr_err("%s: rmnet control smd write failed\n",
+					__func__);
+		goto ep0_ack;
+	}
+queue_req:
+	if (list_empty(&dev->qmi_req_pool)) {
+		spin_unlock(&dev->lock);
+		pr_err("%s: rmnet QMI pool is empty\n", __func__);
+		return;
+	}
+
+	qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
+	list_del(&qmi_req->list);
+	spin_unlock(&dev->lock);
+	memcpy(qmi_req->buf, req->buf, req->actual);
+	qmi_req->len = req->actual;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+ep0_ack:
+	/* Send ACK on EP0 IN */
+	in_req = func->ep0_in_req;
+	in_req->length = 0;
+	in_req->complete = 0;
+	usb_ept_queue_xfer(func->ep0_in, in_req);
+}
+
+static int rmnet_setup(struct usb_ctrlrequest *ctrl, void *buf,
+				int len, void *context)
+{
+	struct rmnet_dev *dev = context;
+	struct usb_request *req = rmnet_function.ep0_out_req;
+	int			ret = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+	struct qmi_buf *resp;
+	int schedule = 0;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != dev->ifc_id)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != dev->ifc_id)
+			goto invalid;
+		else {
+			spin_lock(&dev->lock);
+			resp = list_first_entry(&dev->qmi_resp_q,
+					struct qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+			memcpy(buf, resp->buf, resp->len);
+			ret = resp->len;
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_pool))
+				schedule = 1;
+			list_add_tail(&resp->list, &dev->qmi_resp_pool);
+
+			if (schedule)
+				tasklet_schedule(&dev->smd_ctl.tx_tlet);
+			spin_unlock(&dev->lock);
+		}
+		break;
+	default:
+
+invalid:
+		pr_debug("%s: invalid control req%02x.%02x v%04x i%04x l%d\n",
+			__func__, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	return ret;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	int status;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ept_queue_xfer(dev->epout, req);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			pr_err("%s: rmnet data rx enqueue err %d\n",
+					__func__, status);
+			list_add_tail(&req->list, &dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+
+		sz = smd_cur_packet_size(dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			pr_debug("%s: rmnet data Tx buffers full\n", __func__);
+			break;
+		}
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(dev->smd_data.ch, req->buf, sz);
+		status = usb_ept_queue_xfer(dev->epin, req);
+		if (status) {
+			pr_err("%s: rmnet tx data enqueue err %d\n",
+					__func__, status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+	}
+
+}
+
+static void rmnet_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (list_empty(&dev->rx_queue)) {
+			atomic_set(&dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			pr_debug("%s: rmnet SMD data channel full\n", __func__);
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			pr_err("%s: rmnet SMD data write failed\n", __func__);
+			break;
+		}
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void rmnet_complete_epout(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+	int ret;
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		pr_err("%s: response error %d, %d/%d\n",
+			__func__, status, req->actual,
+			req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!atomic_read(&dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			pr_err("%s: rmnet data smd write failed\n", __func__);
+		/* Restart Rx */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_complete_epin(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+	int schedule = 0;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		pr_err("%s: rmnet data tx ep error %d\n", __func__, status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &dev->tx_idle);
+
+		if (schedule)
+			tasklet_schedule(&dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+					disconnect_work);
+
+	atomic_set(&dev->notify_count, 0);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+
+	list_for_each_safe(act, tmp, &dev->rx_queue) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	smd_close(dev->smd_ctl.ch);
+	dev->smd_ctl.flags = 0;
+
+	smd_close(dev->smd_data.ch);
+	dev->smd_data.flags = 0;
+}
+
+static void rmnet_connect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
+	int ret;
+
+	/* Control channel for QMI messages */
+	ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
+			&dev->smd_ctl, rmnet_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open control smd channel\n", __func__);
+		return;
+	}
+	wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
+				&dev->smd_ctl.flags));
+
+	/* Data channel for network packets */
+	ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
+			&dev->smd_data, rmnet_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open data smd channel\n", __func__);
+		smd_close(dev->smd_ctl.ch);
+	}
+	wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
+				&dev->smd_data.flags));
+
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->epin, &rmnet_hs_in_desc);
+		usb_configure_endpoint(dev->epout, &rmnet_hs_out_desc);
+		usb_configure_endpoint(dev->epnotify, &rmnet_hs_notify_desc);
+	} else {
+		usb_configure_endpoint(dev->epin, &rmnet_fs_in_desc);
+		usb_configure_endpoint(dev->epout, &rmnet_fs_out_desc);
+		usb_configure_endpoint(dev->epnotify, &rmnet_fs_notify_desc);
+	}
+
+	usb_ept_enable(dev->epin,  1);
+	usb_ept_enable(dev->epout, 1);
+	usb_ept_enable(dev->epnotify, 1);
+
+	atomic_set(&dev->online, 1);
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+}
+
+static void rmnet_configure(int configured, void *context)
+
+{
+	struct rmnet_dev *dev = context;
+
+	if (configured) {
+		queue_work(dev->wq, &dev->connect_work);
+	} else {
+		/* all pending requests will be canceled */
+		if (!atomic_read(&dev->online))
+			return;
+
+		atomic_set(&dev->online, 0);
+
+		usb_ept_fifo_flush(dev->epnotify);
+		usb_ept_enable(dev->epnotify, 0);
+
+		usb_ept_fifo_flush(dev->epout);
+		usb_ept_enable(dev->epout, 0);
+
+		usb_ept_fifo_flush(dev->epin);
+		usb_ept_enable(dev->epin, 0);
+
+		/* cleanup work */
+		queue_work(dev->wq, &dev->disconnect_work);
+	}
+
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+}
+
+static void rmnet_bind(void *context)
+{
+	struct rmnet_dev *dev = context;
+	int i, ret;
+	struct usb_request *req;
+	struct qmi_buf *qmi;
+
+	dev->ifc_id = usb_msm_get_next_ifc_number(&rmnet_function);
+	rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+	/*Configuring IN Endpoint*/
+	dev->epin = usb_alloc_endpoint(USB_DIR_IN);
+	if (!dev->epin)
+		return;
+
+	rmnet_hs_in_desc.bEndpointAddress = USB_DIR_IN |
+					dev->epin->num;
+	rmnet_fs_in_desc.bEndpointAddress = USB_DIR_IN |
+					dev->epin->num;
+
+	/*Configuring OUT Endpoint*/
+	dev->epout = usb_alloc_endpoint(USB_DIR_OUT);
+	if (!dev->epout)
+		goto free_epin;
+
+	rmnet_hs_out_desc.bEndpointAddress = USB_DIR_OUT |
+					dev->epout->num;
+	rmnet_fs_out_desc.bEndpointAddress = USB_DIR_OUT |
+					dev->epout->num;
+
+	/*Configuring NOTIFY Endpoint*/
+	dev->epnotify = usb_alloc_endpoint(USB_DIR_IN);
+	if (!dev->epnotify)
+		goto free_epout;
+
+	rmnet_hs_notify_desc.bEndpointAddress = USB_DIR_IN |
+				dev->epnotify->num;
+	rmnet_fs_notify_desc.bEndpointAddress = USB_DIR_IN |
+				dev->epnotify->num;
+
+	dev->notify_req = usb_ept_alloc_req(dev->epnotify, 0);
+	if (!dev->notify_req)
+		goto free_epnotify;
+
+	dev->notify_req->buf = kmalloc(RMNET_MAX_NOTIFY_SIZE, GFP_KERNEL);
+	if (!dev->notify_req->buf)
+		goto free_buf;;
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
+
+	/* Allocate the qmi request and response buffers */
+	for (i = 0; i < QMI_REQ_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	for (i = 0; i < QMI_RESP_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	/* Allocate bulk in/out requests for data transfer */
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->length = TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, TX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->context = dev;
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+
+	pr_info("Rmnet function bind completed\n");
+
+	return;
+
+free_buf:
+	rmnet_free_buf(dev);
+free_epnotify:
+	usb_free_endpoint(dev->epnotify);
+free_epout:
+	usb_free_endpoint(dev->epout);
+free_epin:
+	usb_free_endpoint(dev->epin);
+
+}
+
+static void rmnet_unbind(void *context)
+{
+	struct rmnet_dev *dev = context;
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	flush_workqueue(dev->wq);
+
+	rmnet_free_buf(dev);
+	usb_free_endpoint(dev->epin);
+	usb_free_endpoint(dev->epout);
+	usb_free_endpoint(dev->epnotify);
+
+	kfree(dev);
+
+}
+static struct usb_function rmnet_function = {
+	.bind = rmnet_bind,
+	.configure = rmnet_configure,
+	.unbind = rmnet_unbind,
+	.setup  = rmnet_setup,
+	.name = "rmnet",
+};
+
+struct usb_descriptor_header *rmnet_hs_descriptors[5];
+struct usb_descriptor_header *rmnet_fs_descriptors[5];
+static int __init rmnet_init(void)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->smd_ctl.rx_pkt, 0);
+	atomic_set(&dev->smd_data.rx_pkt, 0);
+
+	INIT_WORK(&dev->connect_work, rmnet_connect_work);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+
+	tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&dev->smd_ctl.wait);
+	init_waitqueue_head(&dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&dev->qmi_req_pool);
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_pool);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->rx_queue);
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	rmnet_hs_descriptors[0] =
+		(struct usb_descriptor_header *)&rmnet_interface_desc;
+	rmnet_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&rmnet_hs_in_desc;
+	rmnet_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&rmnet_hs_out_desc;
+	rmnet_hs_descriptors[3] =
+		(struct usb_descriptor_header *)&rmnet_hs_notify_desc;
+	rmnet_hs_descriptors[4] = NULL;
+
+	rmnet_fs_descriptors[0] =
+		(struct usb_descriptor_header *)&rmnet_interface_desc;
+	rmnet_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&rmnet_fs_in_desc;
+	rmnet_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&rmnet_fs_out_desc;
+	rmnet_fs_descriptors[3] =
+		(struct usb_descriptor_header *)&rmnet_fs_notify_desc;
+	rmnet_fs_descriptors[4] = NULL;
+
+	rmnet_function.hs_descriptors = rmnet_hs_descriptors;
+	rmnet_function.fs_descriptors = rmnet_fs_descriptors;
+	rmnet_function.context = dev;
+
+	ret = usb_function_register(&rmnet_function);
+	if (ret)
+		goto free_wq;
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+static void __exit rmnet_exit(void)
+{
+	usb_function_unregister(&rmnet_function);
+}
+
+module_init(rmnet_init);
+module_exit(rmnet_exit);
+MODULE_DESCRIPTION("RmNet usb function driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/serial.c b/drivers/usb/function/serial.c
new file mode 100644
index 0000000..0539351
--- /dev/null
+++ b/drivers/usb/function/serial.c
@@ -0,0 +1,2252 @@
+/*
+ * serial.c -- USB Serial Function driver
+ *
+ * Copyright 2003 (C) Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This code is based in part on the Gadget Zero driver, which
+ * is Copyright (C) 2003 by David Brownell, all rights reserved.
+ *
+ * This code also borrows from usbserial.c, which is
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/wait.h>
+#include <linux/serial.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/cdc.h>
+#include "usb_function.h"
+
+#include <linux/workqueue.h>
+/* Defines */
+
+#define GS_VERSION_STR			"v2.2"
+#define GS_VERSION_NUM			0x0202
+
+#define GS_LONG_NAME			"Serial Function"
+#define GS_SHORT_NAME			"serial"
+
+static int instances = 2;
+#define MAX_INSTANCES 2
+
+#define GS_MAJOR			127
+#define GS_MINOR_START			0
+
+#define GS_NUM_PORTS			16
+
+#define GS_NO_CONFIG_ID			0
+#define GS_ACM_CONFIG_ID		2
+
+#define GS_MAX_DESC_LEN			256
+
+/* defines for maintaining serial states */
+#define	MSR_CTS		(1 << 4)
+#define	MSR_DSR		(1 << 5)
+#define	MSR_RI		(1 << 6)
+#define	MSR_CD		(1 << 7)
+#define	MCR_DTR		(1 << 0)
+#define	MCR_RTS		(1 << 1)
+#define	MCR_LOOP	(1 << 4)
+
+/* USB CDC control line state defines */
+#define USB_CDC_SET_CONTROL_LINE_STATE_DTR 0x1
+#define USB_CDC_SET_CONTROL_LINE_STATE_RTS 0x2
+
+#define GS_DEFAULT_READ_Q_SIZE		16
+#define GS_DEFAULT_WRITE_Q_SIZE		16
+#define GS_DEFAULT_INT_REQ		1
+
+#define GS_DEFAULT_WRITE_BUF_SIZE	8192
+#define GS_TMP_BUF_SIZE			8192
+
+#define GS_CLOSE_TIMEOUT		15
+
+#define GS_DEFAULT_USE_ACM		0
+
+#define GS_DEFAULT_DTE_RATE		9600
+#define GS_DEFAULT_DATA_BITS		8
+#define GS_DEFAULT_PARITY		USB_CDC_NO_PARITY
+#define GS_DEFAULT_CHAR_FORMAT		USB_CDC_1_STOP_BITS
+
+/* #define GS_DEBUG */
+
+/* debug settings */
+#ifdef GS_DEBUG
+static int debug = 1;
+
+#define gs_debug(format, arg...) \
+	do { if (debug) printk(KERN_DEBUG format, ## arg); } while (0)
+#define gs_debug_level(level, format, arg...) \
+	do { if (debug >= level) printk(KERN_DEBUG format, ## arg); } while (0)
+
+#else
+
+#define gs_debug(format, arg...) \
+	do { } while (0)
+#define gs_debug_level(level, format, arg...) \
+	do { } while (0)
+
+#endif /* GS_DEBUG */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		8
+#define SERIAL_CONFIGURED        1
+#define SERIAL_UNCONFIGURED      0
+
+/* Structures */
+
+struct gs_dev;
+
+/* circular buffer */
+struct gs_buf {
+	unsigned int buf_size;
+	char *buf_buf;
+	char *buf_get;
+	char *buf_put;
+};
+
+/* list of requests */
+struct gs_req_entry {
+	struct list_head re_entry;
+	struct usb_request *re_req;
+};
+
+/* the port structure holds info for each port, one for each minor number */
+struct gs_port {
+	struct gs_dev *port_dev;	/* pointer to device struct */
+	struct tty_struct *port_tty;	/* pointer to tty struct */
+	spinlock_t port_lock;
+	struct mutex	mutex_lock;	/* protect open/close */
+	int port_num;
+	int port_open_count;
+	int port_in_use;	/* open/close in progress */
+	wait_queue_head_t port_write_wait;	/* waiting to write */
+	struct gs_buf *port_write_buf;
+	struct usb_cdc_line_coding port_line_coding;
+	struct list_head        read_pool;
+	struct list_head        read_queue;
+	struct list_head	write_pool;
+	unsigned                n_read;
+	unsigned int msr;
+	unsigned int prev_msr;
+	unsigned int mcr;
+	struct work_struct push_work;
+};
+
+/*-------------------------------------------------------------*/
+/*Allocate DMA buffer in non interrupt context(gs_bind)*/
+
+struct gs_reqbuf {
+	void *buf;
+};
+
+/*-------------------------------------------------------------*/
+
+/* the device structure holds info for the USB device */
+struct gs_dev {
+	/* lock for set/reset config */
+	spinlock_t dev_lock;
+	/* configuration number */
+	int dev_config;
+	/* address of notify endpoint */
+	struct usb_endpoint *dev_notify_ep;
+	/* address of in endpoint */
+	struct usb_endpoint *dev_in_ep;
+	struct usb_request *notify_req;
+	unsigned long notify_queued;
+	/* address of out endpoint */
+	struct usb_endpoint *dev_out_ep;
+	/* list of write requests */
+	struct list_head dev_req_list;
+	/* round robin port scheduled */
+	int dev_sched_port;
+	struct gs_port *dev_port[GS_NUM_PORTS];	/* the ports */
+	struct gs_reqbuf statusreqbuf;
+	u16 interface_num;
+
+	/*interface, endpoint descriptors*/
+	struct usb_interface_descriptor gs_ifc_desc;
+	struct usb_endpoint_descriptor gs_hs_bulkin_desc, gs_fs_bulkin_desc;
+	struct usb_endpoint_descriptor gs_hs_bulkout_desc, gs_fs_bulkout_desc;
+	struct usb_endpoint_descriptor gs_hs_notifyin_desc, gs_fs_notifyin_desc;
+	struct usb_descriptor_header **gs_fullspeed_header;
+	struct usb_descriptor_header **gs_highspeed_header;
+
+	struct usb_function *func;
+	int configured;
+	int bound;
+};
+
+/* Functions */
+
+/* module */
+static int __init gs_module_init(void);
+static void __exit gs_module_exit(void);
+
+static void send_notify_data(struct usb_endpoint *ep, struct usb_request *req);
+/* tty driver */
+static int gs_open(struct tty_struct *tty, struct file *file);
+static void gs_close(struct tty_struct *tty, struct file *file);
+static int gs_write(struct tty_struct *tty,
+		    const unsigned char *buf, int count);
+static int gs_put_char(struct tty_struct *tty, unsigned char ch);
+static void gs_flush_chars(struct tty_struct *tty);
+static int gs_write_room(struct tty_struct *tty);
+static int gs_chars_in_buffer(struct tty_struct *tty);
+static void gs_throttle(struct tty_struct *tty);
+static void gs_unthrottle(struct tty_struct *tty);
+static int gs_break(struct tty_struct *tty, int break_state);
+static int gs_ioctl(struct tty_struct *tty, struct file *file,
+		    unsigned int cmd, unsigned long arg);
+static void gs_set_termios(struct tty_struct *tty, struct ktermios *old);
+static unsigned gs_start_rx(struct gs_dev *dev);
+
+static int gs_send(struct gs_dev *dev);
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size);
+static void gs_read_complete(struct usb_endpoint *ep, struct usb_request *req);
+static void gs_write_complete(struct usb_endpoint *ep, struct usb_request *req);
+static int gs_tiocmget(struct tty_struct *tty, struct file *file);
+static int gs_tiocmset(struct tty_struct *tty, struct file *file,
+			unsigned int set, unsigned int clear);
+
+/* Function driver */
+static void gs_bind(void *);
+static void gs_unbind(void *);
+static int gs_setup(struct usb_ctrlrequest *req,
+		void *buf, int len, void *_ctxt);
+
+static void gs_configure(int config, void *_ctxt);
+static void gs_disconnect(void *_ctxt);
+static void gs_reset_config(struct gs_dev *dev);
+
+static struct usb_request *gs_alloc_req(struct usb_endpoint *ep,
+					unsigned int len);
+static void gs_free_req(struct usb_endpoint *ep, struct usb_request *req);
+
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
+static void gs_free_ports(struct gs_dev *dev);
+
+/* circular buffer */
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags);
+static void gs_buf_free(struct gs_buf *gb);
+static void gs_buf_clear(struct gs_buf *gb);
+static unsigned int gs_buf_data_avail(struct gs_buf *gb);
+static unsigned int gs_buf_space_avail(struct gs_buf *gb);
+static unsigned int gs_buf_put(struct gs_buf *gb, const char *buf,
+			       unsigned int count);
+static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
+			       unsigned int count);
+
+/* Globals */
+static struct gs_dev **gs_devices;
+
+static struct semaphore gs_open_close_sem[GS_NUM_PORTS];
+
+static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
+static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
+
+static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
+
+static struct workqueue_struct *gs_tty_wq;
+
+
+/* tty driver struct */
+static const struct tty_operations gs_tty_ops = {
+	.open = gs_open,
+	.close = gs_close,
+	.write = gs_write,
+	.put_char = gs_put_char,
+	.flush_chars = gs_flush_chars,
+	.write_room = gs_write_room,
+	.ioctl = gs_ioctl,
+	.set_termios = gs_set_termios,
+	.throttle = gs_throttle,
+	.unthrottle = gs_unthrottle,
+	.break_ctl = gs_break,
+	.chars_in_buffer = gs_chars_in_buffer,
+	.tiocmget = gs_tiocmget,
+	.tiocmset = gs_tiocmset,
+};
+static struct tty_driver *gs_tty_driver;
+
+/* Function  driver struct */
+static struct usb_function usb_function_serial[2];
+
+struct usb_function *global_func_serial;
+struct gs_dev **dum_device;
+
+/* Module */
+MODULE_DESCRIPTION(GS_LONG_NAME);
+MODULE_AUTHOR("Al Borchers");
+MODULE_LICENSE("GPL");
+
+#ifdef GS_DEBUG
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
+#endif
+
+module_param(read_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
+
+module_param(write_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
+
+module_param(write_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
+
+module_param(instances, int, 0);
+MODULE_PARM_DESC(instances, "Number of serial instances");
+
+module_init(gs_module_init);
+module_exit(gs_module_exit);
+
+/******************************************************************************/
+
+/*
+ * CDC-ACM Class specific Descriptors
+ */
+
+static const struct usb_cdc_header_desc gs_header_desc = {
+	.bLength = sizeof(gs_header_desc),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_HEADER_TYPE,
+	.bcdCDC = __constant_cpu_to_le16(0x0110),
+};
+
+static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = {
+	.bLength = sizeof(gs_call_mgmt_descriptor),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities = 0,
+	.bDataInterface = 0,
+};
+
+static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
+	.bLength = sizeof(gs_acm_descriptor),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_ACM_TYPE,
+	.bmCapabilities = 3,  /* bits should be 00000011 (refer to 5.2.3.3) */
+};
+
+static const struct usb_cdc_union_desc gs_union_desc = {
+	.bLength = sizeof(gs_union_desc),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_UNION_TYPE,
+	.bMasterInterface0 = 0,
+	.bSlaveInterface0 = 0,
+};
+
+static void gs_init_ifc_desc(struct usb_interface_descriptor *ifc_desc)
+{
+	ifc_desc->bLength =		USB_DT_INTERFACE_SIZE;
+	ifc_desc->bDescriptorType =	USB_DT_INTERFACE;
+	ifc_desc->bNumEndpoints =	3;
+	ifc_desc->bInterfaceClass =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->iInterface =		0;
+}
+
+#define HIGHSPEED	1
+#define	FULLSPEED	2
+
+#define BULK	1
+#define INTERRUPT	2
+static void gs_init_ep_desc(struct usb_endpoint_descriptor *ep_desc,
+				unsigned type, unsigned speed)
+{
+	ep_desc->bLength =		USB_DT_ENDPOINT_SIZE;
+	ep_desc->bDescriptorType =	USB_DT_ENDPOINT;
+
+	if (type == BULK) {
+		ep_desc->bmAttributes = USB_ENDPOINT_XFER_BULK;
+		if (speed == HIGHSPEED)
+			ep_desc->wMaxPacketSize = 512;
+		else
+			ep_desc->wMaxPacketSize = 64;
+	} else {
+
+		ep_desc->bmAttributes = USB_ENDPOINT_XFER_INT;
+		ep_desc->wMaxPacketSize = 64;
+		ep_desc->bInterval = 4;
+	}
+}
+
+static void gs_init_header_desc(struct gs_dev *dev)
+{
+	dev->gs_highspeed_header[0] =
+		(struct usb_descriptor_header *)&dev->gs_ifc_desc;
+	dev->gs_highspeed_header[1] =
+		(struct usb_descriptor_header *)&dev->gs_hs_bulkin_desc;
+	dev->gs_highspeed_header[2] =
+		(struct usb_descriptor_header *)&dev->gs_hs_bulkout_desc;
+	dev->gs_highspeed_header[3] =
+		(struct usb_descriptor_header *)&dev->gs_hs_notifyin_desc;
+	dev->gs_highspeed_header[4] = NULL;
+
+	dev->gs_fullspeed_header[0] =
+		(struct usb_descriptor_header *)&dev->gs_ifc_desc;
+	dev->gs_fullspeed_header[1] =
+		(struct usb_descriptor_header *)&dev->gs_fs_bulkin_desc;
+	dev->gs_fullspeed_header[2] =
+		(struct usb_descriptor_header *)&dev->gs_fs_bulkout_desc;
+	dev->gs_fullspeed_header[3] =
+		(struct usb_descriptor_header *)&dev->gs_fs_notifyin_desc;
+	dev->gs_fullspeed_header[4] = NULL;
+}
+
+/*****************************************************************************/
+/*
+ *  gs_module_init
+ *
+ *  Register as a USB gadget driver and a tty driver.
+ */
+
+char *a[] = {"modem", "nmea"};
+
+static int __init gs_module_init(void)
+{
+	int i, retval;
+	struct usb_function *func;
+
+	if (instances > MAX_INSTANCES || instances == 0) {
+		printk(KERN_ERR "Incorrect instances entered \n");
+		return -ENODEV;
+	}
+
+	gs_tty_wq = create_singlethread_workqueue("gs_tty");
+	if (gs_tty_wq == 0)
+		return -ENOMEM;
+	gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
+	if (!gs_tty_driver) {
+		destroy_workqueue(gs_tty_wq);
+		return -ENOMEM;
+	}
+	gs_tty_driver->owner = THIS_MODULE;
+	gs_tty_driver->driver_name = GS_SHORT_NAME;
+	gs_tty_driver->name = "ttyHSUSB";
+	gs_tty_driver->major = GS_MAJOR;
+	gs_tty_driver->minor_start = GS_MINOR_START;
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags =  TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+				| TTY_DRIVER_RESET_TERMIOS;
+	gs_tty_driver->init_termios = tty_std_termios;
+	gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL
+	    | CLOCAL;
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	for (i = 0; i < GS_NUM_PORTS; i++)
+		sema_init(&gs_open_close_sem[i], 1);
+
+	retval = tty_register_driver(gs_tty_driver);
+	if (retval) {
+		/*usb_function_unregister(&usb_func_serial); */
+		put_tty_driver(gs_tty_driver);
+		printk(KERN_ERR
+		       "gs_module_init: cannot register tty driver,ret = %d\n",
+		       retval);
+		return retval;
+	}
+	for (i = 0; i < MAX_INSTANCES; i++)
+		tty_register_device(gs_tty_driver, i, NULL);
+
+	gs_devices = kzalloc(sizeof(struct gs_dev *) * instances,
+				GFP_KERNEL);
+	if (!gs_devices)
+		return -ENOMEM;
+
+	for (i = 0; i < instances; i++) {
+		func = &usb_function_serial[i];
+
+		gs_devices[i] = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
+		if (!gs_devices[i])
+			return -ENOMEM;
+		spin_lock_init(&gs_devices[i]->dev_lock);
+		INIT_LIST_HEAD(&gs_devices[i]->dev_req_list);
+		gs_devices[i]->func = func;
+		/*1 - Interface, 3 Endpoints-> Total 4 + 1 for NULL*/
+		gs_devices[i]->gs_fullspeed_header =
+		kmalloc(sizeof(struct usb_descriptor_header *) * 5, GFP_KERNEL);
+		gs_devices[i]->gs_highspeed_header =
+		kmalloc(sizeof(struct usb_descriptor_header *) * 5, GFP_KERNEL);
+
+		gs_init_ifc_desc(&gs_devices[i]->gs_ifc_desc);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_bulkin_desc, BULK,
+				HIGHSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_bulkout_desc, BULK,
+				HIGHSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_notifyin_desc, INTERRUPT,
+				HIGHSPEED);
+
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_bulkin_desc, BULK,
+				FULLSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_bulkout_desc, BULK,
+				FULLSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_notifyin_desc, INTERRUPT,
+				FULLSPEED);
+		gs_init_header_desc(gs_devices[i]);
+
+		/*Initializing Directions*/
+		gs_devices[i]->gs_hs_bulkin_desc.bEndpointAddress = USB_DIR_IN;
+		gs_devices[i]->gs_hs_bulkout_desc.bEndpointAddress =
+								USB_DIR_OUT;
+		gs_devices[i]->gs_hs_notifyin_desc.bEndpointAddress =
+								USB_DIR_IN;
+		gs_devices[i]->gs_fs_bulkin_desc.bEndpointAddress = USB_DIR_IN;
+		gs_devices[i]->gs_fs_bulkout_desc.bEndpointAddress =
+								USB_DIR_OUT;
+		gs_devices[i]->gs_fs_notifyin_desc.bEndpointAddress =
+								USB_DIR_IN;
+
+		func->bind = gs_bind;
+		func->unbind = gs_unbind;
+		func->configure = gs_configure;
+		func->disconnect = gs_disconnect;
+		func->setup = gs_setup;
+		func->name = a[i];
+		func->context = gs_devices[i];
+		func->fs_descriptors = gs_devices[i]->gs_fullspeed_header;
+		func->hs_descriptors = gs_devices[i]->gs_highspeed_header;
+
+		retval = usb_function_register(func);
+		if (retval) {
+			printk(KERN_ERR
+	      "gs_module_init: cannot register Function driver, ret = %d\n",
+			       retval);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+/*
+* gs_module_exit
+*
+* Unregister as a tty driver and a USB gadget driver.
+*/
+static void __exit gs_module_exit(void)
+{
+	int i;
+	for (i = 0; i < instances; i++)
+		usb_function_unregister(&usb_function_serial[i]);
+
+	for (i = 0; i < instances; ++i) {
+		kfree(gs_devices[i]->gs_fullspeed_header);
+		kfree(gs_devices[i]->gs_highspeed_header);
+		kfree(gs_devices[i]);
+	}
+	for (i = 0; i < MAX_INSTANCES; i++)
+		tty_unregister_device(gs_tty_driver, i);
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	printk(KERN_INFO "gs_module_exit: %s %s unloaded\n", GS_LONG_NAME,
+	       GS_VERSION_STR);
+}
+
+/* TTY Driver */
+/*
+ * gs_open
+ */
+static int gs_open(struct tty_struct *tty, struct file *file)
+{
+	int port_num;
+	unsigned long flags;
+	struct gs_port *port;
+	struct gs_dev *dev;
+	struct gs_buf *buf;
+	struct semaphore *sem;
+	int ret;
+
+	port_num = tty->index;
+
+	gs_debug("gs_open: (%d,%p,%p)\n", port_num, tty, file);
+
+	if (port_num < 0 || port_num >= GS_NUM_PORTS) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) invalid port number\n",
+		       port_num, tty, file);
+		return -ENODEV;
+	}
+
+	dev = gs_devices[tty->index];
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL device pointer\n",
+		       port_num, tty, file);
+		return -ENODEV;
+	}
+
+	sem = &gs_open_close_sem[port_num];
+	if (down_interruptible(sem)) {
+		printk(KERN_ERR
+	       "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
+		       port_num, tty, file);
+		return -ERESTARTSYS;
+	}
+
+	spin_lock_irqsave(&dev->dev_lock, flags);
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL port pointer\n",
+		       port_num, tty, file);
+		ret = -ENODEV;
+		goto exit_unlock_dev;
+	}
+
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (1)\n",
+		       port_num, tty, file);
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	if (port->port_open_count > 0) {
+		++port->port_open_count;
+		gs_debug("gs_open: (%d,%p,%p) already open\n",
+			 port_num, tty, file);
+		ret = 0;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = NULL;
+
+	/* mark port as in use, we can drop port lock and sleep if necessary */
+	port->port_in_use = 1;
+
+	/* allocate write buffer on first open */
+	if (port->port_write_buf == NULL) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		buf = gs_buf_alloc(write_buf_size, GFP_KERNEL);
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		/* might have been disconnected while asleep, check */
+		if (port->port_dev == NULL) {
+			printk(KERN_ERR
+			       "gs_open: (%d,%p,%p) port disconnected (2)\n",
+			       port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -EIO;
+			goto exit_unlock_port;
+		}
+
+		port->port_write_buf = buf;
+		if (port->port_write_buf == NULL) {
+			printk(KERN_ERR
+	       "gs_open: (%d,%p,%p) cannot allocate port write buffer\n",
+			       port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -ENOMEM;
+			goto exit_unlock_port;
+		}
+
+	}
+
+	/* wait for carrier detect (not implemented) */
+
+	/* might have been disconnected while asleep, check */
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (3)\n",
+		       port_num, tty, file);
+		port->port_in_use = 0;
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = port;
+	port->port_tty = tty;
+	port->port_tty->low_latency = 1;
+	port->port_open_count = 1;
+	port->port_in_use = 0;
+
+	gs_debug("gs_open: (%d,%p,%p) completed\n", port_num, tty, file);
+	/* Queue RX requests */
+	port->n_read = 0;
+	gs_start_rx(dev);
+
+	ret = 0;
+
+exit_unlock_port:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	up(sem);
+	return ret;
+
+exit_unlock_dev:
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+	up(sem);
+	return ret;
+
+}
+
+/*
+ * gs_close
+ */
+
+#define GS_WRITE_FINISHED_EVENT_SAFELY(p)			\
+({								\
+	int cond;						\
+								\
+	spin_lock_irq(&(p)->port_lock);				\
+	cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf); \
+	spin_unlock_irq(&(p)->port_lock);			\
+	cond;							\
+})
+
+static void gs_close(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port = tty->driver_data;
+	struct semaphore *sem;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_close: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
+
+	sem = &gs_open_close_sem[port->port_num];
+	down(sem);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR
+		       "gs_close: (%d,%p,%p) port is already closed\n",
+		       port->port_num, tty, file);
+		goto exit;
+	}
+
+	if (port->port_open_count > 1) {
+		--port->port_open_count;
+		goto exit;
+	}
+
+	/* free disconnected port on final close */
+	if (port->port_dev == NULL)
+		goto exit;
+
+
+	/* mark port as closed but in use, we can drop port lock */
+	/* and sleep if necessary */
+	port->port_in_use = 1;
+	port->port_open_count = 0;
+
+	/* wait for write buffer to drain, or */
+	/* at most GS_CLOSE_TIMEOUT seconds */
+	if (gs_buf_data_avail(port->port_write_buf) > 0) {
+		spin_unlock_irq(&port->port_lock);
+		wait_event_interruptible_timeout(port->port_write_wait,
+						 GS_WRITE_FINISHED_EVENT_SAFELY
+						 (port), GS_CLOSE_TIMEOUT * HZ);
+		spin_lock_irq(&port->port_lock);
+	}
+
+	/* free disconnected port on final close */
+	/* (might have happened during the above sleep) */
+	if (port->port_dev == NULL)
+		goto exit;
+
+
+	gs_buf_clear(port->port_write_buf);
+
+	/* Flush bulk-out pipe */
+	usb_ept_fifo_flush(port->port_dev->dev_out_ep);
+	tty->driver_data = NULL;
+	port->port_tty = NULL;
+	port->port_in_use = 0;
+
+	gs_debug("gs_close: (%d,%p,%p) completed\n", port->port_num, tty, file);
+
+exit:
+	spin_unlock_irq(&port->port_lock);
+	up(sem);
+	if (port->port_dev == NULL)
+		kfree(port);
+}
+
+/*
+ * gs_write
+ */
+static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+	int ret;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_write: NULL port pointer\n");
+		return -EIO;
+	}
+
+	gs_debug("gs_write: (%d,%p) writing %d bytes\n", port->port_num, tty,
+		 count);
+
+	if (count == 0)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		ret = -EIO;
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		ret = -EBADF;
+		goto exit;
+	}
+
+	count = gs_buf_put(port->port_write_buf, buf, count);
+
+
+	if (port->port_dev->dev_config)
+		gs_send(gs_devices[tty->index]);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write: (%d,%p) wrote %d bytes\n", port->port_num, tty,
+		 count);
+
+	return count;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	return ret;
+}
+
+/*
+ * gs_put_char
+ */
+static int gs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_put_char: NULL port pointer\n");
+		goto out;
+	}
+
+	gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n",
+		 port->port_num, tty, ch, __builtin_return_address(0),
+		 __builtin_return_address(1), __builtin_return_address(2));
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		goto exit_unlock;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		goto exit_unlock;
+	}
+
+	ret = gs_buf_put(port->port_write_buf, &ch, 1);
+
+exit_unlock:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+	return ret;
+}
+
+/*
+ * gs_flush_chars
+ */
+static void gs_flush_chars(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_flush_chars: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR
+		       "gs_flush_chars: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_flush_chars: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_dev->dev_config)
+		gs_send(gs_devices[tty->index]);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+
+	return;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_write_room
+ */
+static int gs_write_room(struct tty_struct *tty)
+{
+
+	int room = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	    && port->port_write_buf != NULL)
+		room = gs_buf_space_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write_room: (%d,%p) room=%d\n", port->port_num, tty, room);
+
+	return room;
+}
+
+/*
+ * gs_chars_in_buffer
+ */
+static int gs_chars_in_buffer(struct tty_struct *tty)
+{
+	int chars = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	    && port->port_write_buf != NULL)
+		chars = gs_buf_data_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+		 port->port_num, tty, chars);
+
+	return chars;
+}
+
+/*
+ * gs_throttle
+ */
+static void gs_throttle(struct tty_struct *tty)
+{
+}
+
+/*
+ * gs_unthrottle
+ */
+static void gs_unthrottle(struct tty_struct *tty)
+{
+	struct gs_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	queue_work(gs_tty_wq, &port->push_work);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_break
+ */
+static int gs_break(struct tty_struct *tty, int break_state)
+{
+	return 0;
+}
+
+/*
+ * gs_ioctl
+ */
+static int gs_ioctl(struct tty_struct *tty, struct file *file,
+		    unsigned int cmd, unsigned long arg)
+{
+	/* could not handle ioctl */
+	return -ENOIOCTLCMD;
+}
+
+/*
+ * gs_set_termios
+ */
+static void gs_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+}
+
+/*
+* gs_send
+*
+* This function finds available write requests, calls
+* gs_send_packet to fill these packets with data, and
+* continues until either there are no more write requests
+* available or no more data to send.  This function is
+* run whenever data arrives or write requests are available.
+*/
+static int gs_send(struct gs_dev *dev)
+{
+	struct gs_port *port = dev->dev_port[0];
+	struct list_head *pool = &port->write_pool;
+	int status = 0;
+	static long prev_len;
+	bool do_tty_wake = false;
+	struct usb_endpoint *ep = dev->dev_in_ep;
+
+	while (!list_empty(pool)) {
+		struct usb_request *req;
+		int len;
+		req = list_entry(pool->next, struct usb_request, list);
+		len = gs_send_packet(dev, req->buf, usb_ept_get_max_packet(ep));
+		if (len == 0) {
+			/* Queue zero length packet */
+			if (prev_len == usb_ept_get_max_packet(ep)) {
+				req->length = 0;
+				list_del(&req->list);
+
+				spin_unlock(&port->port_lock);
+				status = usb_ept_queue_xfer(ep, req);
+				spin_lock(&port->port_lock);
+				if (status) {
+					printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+					list_add(&req->list, pool);
+				}
+				prev_len = 0;
+			}
+			wake_up_interruptible(&port->port_write_wait);
+			break;
+		}
+		do_tty_wake = true;
+
+		req->length = len;
+		list_del(&req->list);
+
+		/* Drop lock while we call out of driver; completions
+		 * could be issued while we do so.  Disconnection may
+		 * happen too; maybe immediately before we queue this!
+		 * NOTE that we may keep sending data for a while after
+		 * the TTY closed (dev->ioport->port_tty is NULL).
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ept_queue_xfer(ep, req);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+			list_add(&req->list, pool);
+			break;
+		}
+		prev_len = req->length;
+
+	}
+
+	if (do_tty_wake && port->port_tty)
+		tty_wakeup(port->port_tty);
+	return status;
+
+}
+
+/*
+ * gs_send_packet
+ *
+ * If there is data to send, a packet is built in the given
+ * buffer and the size is returned.  If there is no data to
+ * send, 0 is returned.  If there is any error a negative
+ * error number is returned.
+ *
+ * Called during USB completion routine, on interrupt time.
+ *
+ * We assume that disconnect will not happen until all completion
+ * routines have completed, so we can assume that the dev_port
+ * array does not change during the lifetime of this function.
+ */
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size)
+{
+	unsigned int len;
+	struct gs_port *port;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_recv_packet:NULL device pointer\n");
+		return -EIO;
+	}
+
+	/* TEMPORARY -- only port 0 is supported right now */
+	port = dev->dev_port[0];
+	if (port == NULL) {
+		printk(KERN_ERR
+		       "gs_send_packet: port=%d, NULL port pointer\n", 0);
+		return -EIO;
+	}
+
+
+	len = gs_buf_data_avail(port->port_write_buf);
+	if (len < size)
+		size = len;
+	if (size != 0)
+		size = gs_buf_get(port->port_write_buf, packet, size);
+
+
+
+	if (port->port_tty)
+		tty_wakeup(port->port_tty);
+
+	return size;
+}
+
+static void gs_rx_push(struct work_struct *work)
+{
+	struct gs_port *port = container_of(work,
+					struct gs_port,
+					push_work);
+	struct tty_struct *tty;
+	struct list_head *queue = &port->read_queue;
+	bool do_push = false;
+	struct gs_dev *dev = port->port_dev;
+
+	/* hand any queued data to the tty */
+	spin_lock_irq(&port->port_lock);
+	tty = port->port_tty;
+	while (!list_empty(queue)) {
+		struct usb_request	*req;
+
+		req = list_first_entry(queue, struct usb_request, list);
+
+		/* discard data if tty was closed */
+		if (!tty)
+			goto recycle;
+
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			int		count;
+			/* we may have pushed part of this packet already... */
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+			/*printk(KERN_INFO "tty_push:%d\n",size);*/
+			count = tty_insert_flip_string(tty, packet, size);
+			if (count == 0)
+				printk(KERN_INFO "%s: tty buffer is full: throttle\n",
+							__func__);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				/* stop pushing; TTY layer can't handle more */
+				port->n_read += count;
+				break;
+			}
+			port->n_read = 0;
+		}
+recycle:
+		list_move(&req->list, &port->read_pool);
+	}
+	if (tty && do_push) {
+		spin_unlock_irq(&port->port_lock);
+		tty_flip_buffer_push(tty);
+		wake_up_interruptible(&tty->read_wait);
+		spin_lock_irq(&port->port_lock);
+		/* tty may have been closed */
+		tty = port->port_tty;
+	}
+	if (!list_empty(queue) && tty) {
+		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+			if (do_push)
+				queue_work(gs_tty_wq, &port->push_work);
+		}
+	}
+	gs_start_rx(dev);
+	spin_unlock_irq(&port->port_lock);
+}
+
+/*
+* gs_read_complete
+*/
+static void gs_read_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	/* used global variable */
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+	struct tty_struct *tty;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_read_complete: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+	tty = port->port_tty;
+	switch (req->status) {
+	case 0:
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &port->read_queue);
+		if (!test_bit(TTY_THROTTLED, &tty->flags))
+			queue_work(gs_tty_wq, &port->push_work);
+		spin_unlock(&port->port_lock);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_read_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	case -ENODEV:
+		list_add_tail(&req->list, &port->read_pool);
+		/* Implemented handling in future if needed */
+		break;
+	default:
+		list_add_tail(&req->list, &port->read_pool);
+		printk(KERN_ERR
+		"gs_read_complete: unexpected status error, status=%d\n",
+			req->status);
+		/* goto requeue; */
+		break;
+	}
+}
+
+/*
+* gs_write_complete
+*/
+static void gs_write_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port	*port = dev->dev_port[0];
+	unsigned long flags;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_write_complete: NULL device pointer\n");
+		return;
+	}
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+
+	switch (req->status) {
+	default:
+		/* presumably a transient fault */
+		printk(KERN_ERR "%s: unexpected status %d\n",
+				__func__, req->status);
+		/* FALL THROUGH */
+	case 0:
+		/* normal completion */
+
+		if ((req->length == 0) &&
+			(gs_buf_data_avail(port->port_write_buf) == 0)) {
+			break;
+		}
+		if (dev->dev_config)
+			gs_send(dev);
+
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		printk(KERN_DEBUG "%s: shutdown\n", __func__);
+		break;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/* Send Notification to host if Status changes */
+static void send_notify_data(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct usb_cdc_notification *notify;
+	struct gs_port *port;
+	unsigned int msr, ret;
+	__le16 *data;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "send_notify_data: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR"send_notify_data:port is NULL\n");
+		return;
+	}
+
+	if (test_bit(0, &dev->notify_queued))
+		usb_ept_cancel_xfer(dev->dev_notify_ep,
+		dev->notify_req);
+	notify = req->buf;
+	msr = port->msr;
+	notify->bmRequestType  = 0xA1;
+	notify->bNotificationType  = USB_CDC_NOTIFY_SERIAL_STATE;
+	notify->wValue  = __constant_cpu_to_le16(0);
+	notify->wIndex  = __constant_cpu_to_le16(dev->interface_num);
+	notify->wLength  = __constant_cpu_to_le16(2);
+	data = req->buf + sizeof *notify;
+	data[0] = __constant_cpu_to_le16(((msr & MSR_CD) ? 1 : 0)
+			| ((msr & MSR_DSR) ? (1<<1) : (0<<1))
+			| ((msr & MSR_RI) ? (1<<3) : (0<<3)));
+
+	set_bit(0, &dev->notify_queued);
+	ret = usb_ept_queue_xfer(ep, req);
+	if (ret) {
+		clear_bit(0, &dev->notify_queued);
+		printk(KERN_ERR
+		"send_notify_data: cannot queue status request,ret = %d\n",
+			       ret);
+	}
+}
+
+/* Free request if -ESHUTDOWN */
+static void gs_status_complete(struct usb_endpoint *ep,
+				struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+
+	if (dev == NULL) {
+		printk(KERN_ERR"gs_status_complete : NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_status_complete: NULL port pointer\n");
+		return;
+	}
+
+	clear_bit(0, &dev->notify_queued);
+	switch (req->status) {
+	case 0:
+
+		gs_debug("%s:port->msr=%x,dev=%p,ep=%p,req=%p", __func__,
+			port->msr, dev, dev->dev_notify_ep, dev->notify_req);
+		/* executed only if data missed because of
+		** request already in queue and user modifies using tiocmset */
+		if (port->prev_msr != port->msr) {
+			send_notify_data(dev->dev_notify_ep, dev->notify_req);
+			port->prev_msr = port->msr;
+		}
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_status_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	default:
+		printk(KERN_ERR
+	       "gs_status_complete: unexpected status error, status=%d\n",
+		       req->status);
+		break;
+	}
+}
+
+/* Function Driver */
+/*
+ * gs_bind
+ *
+ * Called on module load.  Allocates and initializes the device
+ * structure and a control request.
+ */
+static void gs_bind(void *_ctxt)
+{
+	struct usb_endpoint *ep;
+	struct gs_dev *dev = _ctxt;
+	struct usb_function *func = dev->func;
+	int i = 0;
+	int ret;
+
+	if (func == NULL) {
+		pr_err("%s: NULL function pointer\n", __func__);
+		return;
+	}
+
+	ret = gs_alloc_ports(dev, GFP_KERNEL);
+	if (ret != 0) {
+		pr_err("%s: cannot allocate ports\n", __func__);
+		gs_unbind(_ctxt);
+		return;
+	}
+
+	ret = usb_msm_get_next_ifc_number(func);
+	dev->gs_ifc_desc.bInterfaceNumber = ret;
+	dev->gs_ifc_desc.iInterface = 0;
+
+	/*Configuring IN Endpoint*/
+	ep = dev->dev_in_ep = usb_alloc_endpoint(USB_DIR_IN);
+	if (!ep) {
+		pr_err("%s: in endpoint allocation failed\n", __func__);
+		return;
+	}
+	dev->gs_hs_bulkin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	dev->gs_fs_bulkin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: bulk_in_endpoint Number = %d\n",
+						__func__, ep->num);
+
+	/*Configuring OUT endpoint*/
+	ep = dev->dev_out_ep = usb_alloc_endpoint(USB_DIR_OUT);
+	if (!ep) {
+		pr_err("out endpoint allocation failed\n");
+		return;
+	}
+	dev->gs_hs_bulkout_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	dev->gs_fs_bulkout_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	pr_debug("%s: bulk_out_endpoint Number = %d\n",
+						__func__, ep->num);
+
+	/*Configuring NOTIFY endpoint*/
+	ep = dev->dev_notify_ep = usb_alloc_endpoint(USB_DIR_IN);
+	if (!ep) {
+		pr_err("notify endpoint allocation failed\n");
+		return;
+	}
+	dev->gs_hs_notifyin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	dev->gs_fs_notifyin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: notify_in_endpoint Number = %d\n",
+						__func__, ep->num);
+
+
+
+	for (i = 0; i < GS_DEFAULT_INT_REQ; ++i) {
+		struct gs_reqbuf *bh = &dev->statusreqbuf;
+		bh->buf = kmalloc(64, GFP_KERNEL);
+		if (!bh->buf)
+			return;
+	}
+
+	dev->bound = 1;
+	return;
+}
+
+/*
+ * gs_unbind
+ *
+ * Called on module unload.  Frees the control request and device
+ * structure.
+ */
+static void /* __init_or_exit */ gs_unbind(void *_ctxt)
+{
+	struct gs_dev *dev = _ctxt;
+
+	if (!dev) {
+		pr_err("%s: error: null device\n", __func__);
+		return;
+	}
+	if (!dev->bound)
+		return;
+
+	kfree(dev->statusreqbuf.buf);
+
+	if (dev->dev_in_ep) {
+		usb_ept_fifo_flush(dev->dev_in_ep);
+		usb_ept_enable(dev->dev_in_ep,  0);
+		usb_free_endpoint(dev->dev_in_ep);
+	}
+	if (dev->dev_out_ep) {
+		usb_ept_fifo_flush(dev->dev_out_ep);
+		usb_ept_enable(dev->dev_out_ep,  0);
+		usb_free_endpoint(dev->dev_out_ep);
+	}
+	if (dev->dev_notify_ep) {
+		usb_ept_fifo_flush(dev->dev_notify_ep);
+		usb_ept_enable(dev->dev_notify_ep,  0);
+		usb_free_endpoint(dev->dev_notify_ep);
+	}
+
+	gs_free_ports(dev);
+	dev->bound = 0;
+	pr_debug("%s: %s %s\n", __func__, GS_LONG_NAME, GS_VERSION_STR);
+}
+
+static void gser_complete_set_line_coding(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+	struct usb_cdc_line_coding *value;
+	struct usb_request *in_req;
+
+	port = dev->dev_port[0];
+	if (!(dev && dev->dev_port[0])) {
+		printk(KERN_ERR "%s(): dev or dev_port is null\n", __func__);
+		usb_ept_set_halt(dev->func->ep0_in);
+		return;
+	}
+	if (req->actual != sizeof(port->port_line_coding)) {
+		printk(KERN_ERR "%s(): received wrong data\n", __func__);
+		usb_ept_set_halt(dev->func->ep0_in);
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	/* Use Host assigned port_line setting */
+	value = req->buf;
+	port->port_line_coding = *value;
+
+	/* Send ACK on EP0 IN */
+	in_req = dev->func->ep0_in_req;
+	in_req->length = 0;
+	in_req->complete = 0;
+	usb_ept_queue_xfer(dev->func->ep0_in, in_req);
+}
+
+static int gs_setup(struct usb_ctrlrequest *ctrl,
+		void *buf, int len, void *_ctxt)
+{
+	int ret = -EOPNOTSUPP;
+	struct gs_dev *dev = _ctxt;
+	struct gs_port *port;/* ACM only has one port */
+	u16 wIndex = le16_to_cpu(ctrl->wIndex);
+	u16 wValue = le16_to_cpu(ctrl->wValue);
+	u16 wLength = le16_to_cpu(ctrl->wLength);
+
+	if (dev == NULL) {
+		printk(KERN_ERR"gs_setup:device pointer NULL\n");
+		return 0;
+	}
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR"gs_setup: port pointer is NULL\n");
+		return 0;
+	}
+	switch (ctrl->bRequest) {
+
+	case USB_CDC_REQ_SET_LINE_CODING:
+		if (port) {
+			struct usb_request *req = dev->func->ep0_out_req;
+			ret = min(wLength,
+				(u16) sizeof(struct usb_cdc_line_coding));
+			if (ret != sizeof(struct usb_cdc_line_coding))
+				ret = -EOPNOTSUPP;
+			else {
+				req->device = dev;
+				req->complete = gser_complete_set_line_coding;
+				}
+		} else
+			ret = -ENODEV;
+		break;
+
+	case USB_CDC_REQ_GET_LINE_CODING:
+		port = dev->dev_port[0];/* ACM only has one port */
+		ret = min(wLength, (u16) sizeof(struct usb_cdc_line_coding));
+		if (port) {
+			spin_lock(&port->port_lock);
+			memcpy(buf, &port->port_line_coding, ret);
+			spin_unlock(&port->port_lock);
+		}
+		break;
+	case USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		port = dev->dev_port[0];/* ACM only has one port */
+		if (wValue & USB_CDC_SET_CONTROL_LINE_STATE_DTR) {
+			port->mcr |= MCR_DTR;
+		} else	{
+			port->mcr &= ~MCR_DTR;
+		}
+		if (wValue & USB_CDC_SET_CONTROL_LINE_STATE_RTS)
+			port->mcr |= MCR_RTS;
+		else
+			port->mcr &= ~MCR_RTS;
+
+		dev->interface_num = wIndex;
+		ret = 0;
+		break;
+
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void gs_disconnect(void *_ctxt)
+{
+	struct gs_dev *dev = _ctxt;
+	struct gs_port *port = dev->dev_port[0];
+	unsigned long flags;
+
+	/* tell the TTY glue not to do I/O here any more */
+	spin_lock_irqsave(&port->port_lock, flags);
+	dev->dev_config = 0;
+	if (port->port_open_count > 0 || port->port_in_use) {
+		wake_up_interruptible(&port->port_write_wait);
+		if (port->port_tty) {
+			wake_up_interruptible(&port->port_tty->read_wait);
+			wake_up_interruptible(&port->port_tty->write_wait);
+			tty_hangup(port->port_tty);
+		}
+	}
+	port->mcr = 0;
+	port->msr = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+}
+/*
+ * gs_configure
+ *
+ * Configures the device by enabling device specific
+ * optimizations, setting up the endpoints, allocating
+ * read and write requests and queuing read requests.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_configure(int config, void *_ctxt)
+{
+	int i, ret = 0;
+	unsigned MaxPacketSize;
+	struct gs_dev *dev = _ctxt;
+	struct usb_endpoint *ep;
+	struct usb_request *req;
+	struct gs_port *port;
+	struct list_head *rhead;
+	struct list_head *whead;
+	unsigned started = 0;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_configure: NULL device pointer\n");
+		return;
+	}
+	if (!dev->bound)
+		return;
+
+	port = dev->dev_port[0];
+	rhead = &port->read_pool;
+	whead = &port->write_pool;
+	if (port == NULL) {
+		printk(KERN_ERR "gs_configure:port is NULL\n");
+		return;
+	}
+
+
+	if (!config) {
+		gs_debug("gs_configure: Deconfigure\n");
+		dev->configured = SERIAL_UNCONFIGURED;
+		gs_reset_config(dev);
+		return;
+	}
+	dev->dev_config = config;
+
+	if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL ||
+	    (dev->dev_notify_ep == NULL)) {
+		printk(KERN_ERR "gs_configure : cannot find endpoints\n");
+		ret = -ENODEV;
+		goto reset_config;
+	}
+
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->dev_in_ep, &dev->gs_hs_bulkin_desc);
+		usb_configure_endpoint(dev->dev_out_ep,
+					&dev->gs_hs_bulkout_desc);
+		usb_configure_endpoint(dev->dev_notify_ep,
+					&dev->gs_hs_notifyin_desc);
+	} else {
+		usb_configure_endpoint(dev->dev_in_ep, &dev->gs_fs_bulkin_desc);
+		usb_configure_endpoint(dev->dev_out_ep,
+					&dev->gs_fs_bulkout_desc);
+		usb_configure_endpoint(dev->dev_notify_ep,
+					&dev->gs_fs_notifyin_desc);
+	}
+	usb_ept_enable(dev->dev_in_ep, 1);
+	usb_ept_enable(dev->dev_out_ep, 1);
+	usb_ept_enable(dev->dev_notify_ep, 1);
+
+	gs_debug("gs_configure: endpoint sizes and buffers\n");
+	/* allocate and queue read requests */
+	ep = dev->dev_out_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < read_q_size; i++) {
+		req = gs_alloc_req(ep, MaxPacketSize);
+		if (req) {
+			req->device = (void *)dev;
+			req->length = MaxPacketSize;
+			req->complete = gs_read_complete;
+			list_add_tail(&req->list, rhead);
+			gs_debug("gs_configure: queuing read request(%d)\n", i);
+		} else {
+			printk(KERN_ERR
+			"gs_configure: cannot allocate read request(%d)\n", i);
+			goto reset_config;
+		}
+	}
+
+	/* allocate write requests, and put on free list */
+	ep = dev->dev_in_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < write_q_size; i++) {
+		req = gs_alloc_req(ep, MaxPacketSize);
+		if (req) {
+			req->device = (void *)dev;
+			req->length = MaxPacketSize;
+			req->complete = gs_write_complete;
+			list_add_tail(&req->list, whead);
+		} else {
+			printk(KERN_ERR
+			"gs_configure: cannot allocate write request(%d)\n", i);
+			goto reset_config;
+		}
+	}
+
+	ep = dev->dev_notify_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < GS_DEFAULT_INT_REQ; ++i) {
+		struct gs_reqbuf *bh = &dev->statusreqbuf;
+		dev->notify_req = req = gs_alloc_req(ep, 0);
+		if (req) {
+			req->device = (void *)dev;
+			req->buf = bh->buf;
+			req->length = MaxPacketSize;
+			req->complete = gs_status_complete;
+		}
+	}
+	if (port->port_open_count) {
+		unsigned long flags;
+		spin_lock_irqsave(&port->port_lock, flags);
+		started = gs_start_rx(dev);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		if (started)
+			tty_wakeup(port->port_tty);
+	}
+
+	dev->configured = SERIAL_CONFIGURED;
+
+	return;
+
+reset_config:
+	printk(KERN_ERR "gs_configure(end): error, calling gs_reset_config\n");
+	gs_reset_config(dev);
+	return;
+}
+static unsigned gs_start_rx(struct gs_dev *dev)
+{
+	struct gs_port *port = dev->dev_port[0];
+	struct list_head *pool = &port->read_pool;
+	unsigned ret = 0;
+	struct usb_endpoint *ep = dev->dev_out_ep;
+	unsigned started = 0;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		struct tty_struct	*tty;
+		tty = port->port_tty;
+		if (!tty) {
+			printk(KERN_ERR "%s: tty is null\n", __func__);
+			break;
+		}
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock(&port->port_lock);
+		ret = usb_ept_queue_xfer(ep, req);
+		spin_lock(&port->port_lock);
+		if (ret) {
+			list_add(&req->list, pool);
+			break;
+		}
+		started++;
+
+	}
+	return started;
+}
+/*
+ * gs_reset_config
+ *
+ * Mark the device as not configured, disable all endpoints,
+ * which forces completion of pending I/O and frees queued
+ * requests, and free the remaining write requests on the
+ * free list.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_reset_config(struct gs_dev *dev)
+{
+	struct gs_port *port;
+	struct usb_request *req;
+	unsigned long flags;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_reset_config: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+
+
+	if (dev->dev_out_ep)
+		usb_free_endpoint_all_req(dev->dev_out_ep);
+	if (dev->dev_in_ep)
+		usb_free_endpoint_all_req(dev->dev_in_ep);
+	if (dev->dev_notify_ep)
+		usb_free_endpoint_all_req(dev->dev_notify_ep);
+
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	dev->dev_config = GS_NO_CONFIG_ID;
+	/* free write requests on the free list */
+	while (!list_empty(&port->write_pool)) {
+		req = list_entry(port->write_pool.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_in_ep, req);
+	}
+
+	/* free read requests from read pool */
+	while (!list_empty(&port->read_pool)) {
+		req = list_entry(port->read_pool.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_out_ep, req);
+	}
+
+	/* free read requests from read queue */
+	while (!list_empty(&port->read_queue)) {
+		req = list_entry(port->read_queue.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_out_ep, req);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+static struct usb_request *gs_alloc_req(struct usb_endpoint *ep,
+					unsigned int len)
+{
+	struct usb_request *req;
+	if (ep == NULL)
+		return NULL;
+	req = usb_ept_alloc_req(ep, len);
+	return req;
+}
+
+/*
+ * gs_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+static void gs_free_req(struct usb_endpoint *ep, struct usb_request *req)
+{
+	if (ep != NULL && req != NULL)
+		usb_ept_free_req(ep, req);
+}
+
+/*
+ * gs_alloc_ports
+ *
+ * Allocate all ports and set the gs_dev struct to point to them.
+ * Return 0 if successful, or a negative error number.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags)
+{
+	int i;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return -EIO;
+
+	for (i = 0; i < GS_NUM_PORTS; i++) {
+		port = kzalloc(sizeof(struct gs_port), kmalloc_flags);
+		if (port == NULL)
+			return -ENOMEM;
+
+		INIT_WORK(&port->push_work, gs_rx_push);
+		INIT_LIST_HEAD(&port->read_pool);
+		INIT_LIST_HEAD(&port->read_queue);
+		INIT_LIST_HEAD(&port->write_pool);
+		port->msr = 0;
+		port->prev_msr = 0;
+		port->mcr = 0;
+		port->port_dev = dev;
+		port->port_num = i;
+		port->port_line_coding.dwDTERate =
+		    cpu_to_le32(GS_DEFAULT_DTE_RATE);
+		port->port_line_coding.bCharFormat = GS_DEFAULT_CHAR_FORMAT;
+		port->port_line_coding.bParityType = GS_DEFAULT_PARITY;
+		port->port_line_coding.bDataBits = GS_DEFAULT_DATA_BITS;
+		spin_lock_init(&port->port_lock);
+		mutex_init(&port->mutex_lock);
+		init_waitqueue_head(&port->port_write_wait);
+
+		dev->dev_port[i] = port;
+	}
+
+	return 0;
+}
+
+/*
+ * gs_free_ports
+ *
+ * Free all closed ports.  Open ports are disconnected by
+ * freeing their write buffers, setting their device pointers
+ * and the pointers to them in the device to NULL.  These
+ * ports will be freed when closed.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static void gs_free_ports(struct gs_dev *dev)
+{
+	int i;
+	unsigned long flags;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return;
+
+	for (i = 0; i < GS_NUM_PORTS; i++) {
+		port = dev->dev_port[i];
+		if (port != NULL) {
+			dev->dev_port[i] = NULL;
+
+			spin_lock_irqsave(&port->port_lock, flags);
+
+			if (port->port_write_buf != NULL) {
+				gs_buf_free(port->port_write_buf);
+				port->port_write_buf = NULL;
+			}
+
+			if (port->port_open_count > 0 || port->port_in_use) {
+				port->port_dev = NULL;
+				wake_up_interruptible(&port->port_write_wait);
+				if (port->port_tty) {
+					wake_up_interruptible
+					    (&port->port_tty->read_wait);
+					wake_up_interruptible
+					    (&port->port_tty->write_wait);
+				}
+				spin_unlock_irqrestore(&port->port_lock, flags);
+			} else {
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				kfree(port);
+			}
+
+		}
+	}
+}
+
+/* Circular Buffer */
+
+/*
+ * gs_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
+{
+	struct gs_buf *gb;
+
+	if (size == 0)
+		return NULL;
+
+	gb = kmalloc(sizeof(struct gs_buf), kmalloc_flags);
+	if (gb == NULL)
+		return NULL;
+
+	gb->buf_buf = kmalloc(size, kmalloc_flags);
+	if (gb->buf_buf == NULL) {
+		kfree(gb);
+		return NULL;
+	}
+
+	gb->buf_size = size;
+	gb->buf_get = gb->buf_put = gb->buf_buf;
+
+	return gb;
+}
+
+/*
+ * gs_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+void gs_buf_free(struct gs_buf *gb)
+{
+	if (gb) {
+		kfree(gb->buf_buf);
+		kfree(gb);
+	}
+}
+
+/*
+ * gs_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+void gs_buf_clear(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		gb->buf_get = gb->buf_put;
+	/* equivalent to a get of all data available */
+}
+
+/*
+ * gs_buf_data_avail
+ *
+ * Return the number of bytes of data available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_data_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_put - gb->buf_get)
+		    % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_space_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_get - gb->buf_put - 1)
+		    % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_space_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf + len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else		/* count == len */
+			gb->buf_put = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+ * gs_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_data_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_get;
+	if (count > len) {
+		memcpy(buf, gb->buf_get, len);
+		memcpy(buf + len, gb->buf_buf, count - len);
+		gb->buf_get = gb->buf_buf + count - len;
+	} else {
+		memcpy(buf, gb->buf_get, count);
+		if (count < len)
+			gb->buf_get += count;
+		else		/* count == len */
+			gb->buf_get = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+* gs_tiocmget
+*/
+static int gs_tiocmget(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port;
+	unsigned int mcr, msr;
+	unsigned int result = 0;
+	struct gs_dev *dev = gs_devices[tty->index];
+
+	if (dev == NULL)
+		return -EIO;
+
+	port = dev->dev_port[0];
+	if (port == NULL)
+		return -EIO;
+
+	mutex_lock(&port->mutex_lock);
+	mcr = port->mcr;
+	msr = port->msr;
+
+	result = ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+		| ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+		| ((mcr & MCR_LOOP) ? TIOCM_LOOP : 0)
+		| ((msr & MSR_CD) ? TIOCM_CD : 0)
+		| ((msr & MSR_RI) ? TIOCM_RI : 0)
+		| ((msr & MSR_DSR) ? TIOCM_DSR : 0)
+		| ((msr & MSR_CTS) ? TIOCM_CTS : 0);
+
+	mutex_unlock(&port->mutex_lock);
+	return result;
+}
+
+/*
+* gs_tiocmset
+*/
+static int gs_tiocmset(struct tty_struct *tty, struct file *file,
+	unsigned int set, unsigned int clear)
+{
+	struct gs_port *port;
+	unsigned int mcr;
+	unsigned int msr;
+	struct gs_dev *dev = gs_devices[tty->index];
+
+	if (dev == NULL)
+		return -EIO;
+	port = dev->dev_port[0];
+
+	if (port == NULL)
+		return -EIO;
+
+	mcr = port->mcr;
+	msr = port->msr;
+	if (dev->configured != SERIAL_CONFIGURED)
+		return -EIO;
+
+	set &= TIOCM_DSR | TIOCM_RI | TIOCM_CD | TIOCM_CTS;
+
+	if (set & TIOCM_DSR)
+		msr |= MSR_DSR;
+	if (set & TIOCM_RI)
+		msr |= MSR_RI;
+	if (set & TIOCM_CD)
+		msr |= MSR_CD;
+	if (set & TIOCM_CTS)
+		msr |= MSR_CTS;
+
+	clear &= TIOCM_DSR | TIOCM_RI | TIOCM_CD | TIOCM_CTS;
+
+	if (clear & TIOCM_RI)
+		msr &= ~MSR_RI;
+	if (clear & TIOCM_DSR)
+		msr &= ~MSR_DSR;
+	if (clear & TIOCM_CD)
+		msr &= ~MSR_CD;
+	if (clear & TIOCM_CTS)
+		msr &= ~MSR_CTS;
+
+	mutex_lock(&port->mutex_lock);
+	port->mcr = mcr;
+	port->msr = msr;
+
+	if (port->prev_msr != port->msr) {
+		send_notify_data(dev->dev_notify_ep, dev->notify_req);
+		port->prev_msr = port->msr;
+	}
+	mutex_unlock(&port->mutex_lock);
+
+	return 0;
+}
diff --git a/drivers/usb/function/ums.c b/drivers/usb/function/ums.c
new file mode 100644
index 0000000..509387f
--- /dev/null
+++ b/drivers/usb/function/ums.c
@@ -0,0 +1,469 @@
+/* drivers/usb/function/ums.c
+ *
+ * Function Device for USB Mass Storage
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+#include <linux/wait.h>
+#include <linux/list.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb_usual.h>
+
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "usb_function.h"
+
+#if 1
+#define DBG(x...) do {} while (0)
+#else
+#define DBG(x...) printk(x)
+#endif
+
+#define TXN_MAX 4096
+
+/* UMS setup class requests */
+#define USB_BULK_GET_MAX_LUN_REQUEST   0xFE
+#define USB_BULK_RESET_REQUEST         0xFF
+
+/* number of rx and tx requests to allocate */
+#define RX_REQ_MAX 4
+#define TX_REQ_MAX 4
+
+/* FIXME - add ioctl() support for LUN count */
+int lun_count = 1;
+
+struct ums_context
+{
+	int online;
+	int error;
+	
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	spinlock_t lock;
+	
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_done;
+	
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	/* the request we're currently reading from */
+	struct usb_request *read_req;
+	unsigned char *read_buf;
+};
+
+static struct ums_context _context;
+
+static inline int _lock(atomic_t *excl)
+{
+	if(atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void _unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct ums_context *ctxt, struct list_head *head, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct ums_context *ctxt, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+	
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if(list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return req;
+}
+
+static void ums_complete_in(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct ums_context *ctxt = req->context;
+
+	DBG("ums_complete_in length: %d, actual: %d \n", req->length, req->actual);
+    
+	if(req->status != 0) 
+		ctxt->error = 1;
+
+	req_put(ctxt, &ctxt->tx_idle, req);
+
+	wake_up(&ctxt->write_wq);
+}
+
+static void ums_complete_out(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct ums_context *ctxt = req->context;
+
+	DBG("ums_complete_out length: %d, actual: %d \n", req->length, req->actual);
+
+	if(req->status != 0) {
+		ctxt->error = 1;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	} else {
+		req_put(ctxt, &ctxt->rx_done, req);
+	}
+
+	wake_up(&ctxt->read_wq);
+}
+
+static ssize_t ums_read(struct file *fp, char __user *buf,
+                            size_t count, loff_t *pos)
+{
+	struct ums_context *ctxt = &_context;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	DBG("ums_read(%d)\n", count);
+	
+	if(_lock(&ctxt->read_excl))
+		return -EBUSY;
+	
+	/* we will block until we're online */
+	while(!(ctxt->online || ctxt->error)) {
+		DBG("ums_read: waiting for online state\n");
+		ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
+		if(ret < 0) {
+			_unlock(&ctxt->read_excl);
+			return ret;
+		}
+	}
+
+	if(ctxt->error) {
+		r = -EIO;
+		goto fail;
+	}
+
+		/* if we have idle read requests, get them queued */
+	if((req = req_get(ctxt, &ctxt->rx_idle))) {
+		req->length = count;
+		ret = usb_ept_queue_xfer(ctxt->out, req);
+		if(ret < 0) {
+			DBG("ums_read: failed to queue req %p (%d)\n", req, ret);
+			r = -EIO;
+			ctxt->error = 1;
+			req_put(ctxt, &ctxt->rx_idle, req);
+			goto fail;
+		} else {
+			DBG("rx %p queue\n", req);
+		}
+	} else {
+		DBG("req_get failed!\n");
+		goto fail;
+	}
+
+	/* wait for a request to complete */
+	req = 0;
+	ret = wait_event_interruptible(ctxt->read_wq, 
+				       ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
+	
+	if(req != 0) {
+		ctxt->read_req = req;
+		ctxt->read_buf = req->buf;
+		DBG("rx %p %d\n", req, req->actual);
+
+		xfer = req->actual;
+		if (xfer > count) {
+			xfer = count;
+		}
+		r = xfer;
+
+		if (xfer > 0) {	
+			DBG("copy_to_user %d bytes\n", xfer); 
+			if(copy_to_user(buf, ctxt->read_buf, xfer)) {
+				r = -EFAULT;
+			}
+
+		}		
+		req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+		ctxt->read_req = 0;
+	} else {
+		r = ret;
+	}
+
+fail:
+	_unlock(&ctxt->read_excl);
+	DBG("ums_read returning %d\n", r);
+	return r;
+} 
+
+static ssize_t ums_write(struct file *fp, const char __user *buf,
+                             size_t count, loff_t *pos)
+{
+	struct ums_context *ctxt = &_context;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	DBG("ums_write(%d)\n", count);
+
+	if(_lock(&ctxt->write_excl))
+		return -EBUSY;
+
+	while(count >= 0) {
+		if(ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->write_wq, 
+					       ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error));
+		
+		if(ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if(req != 0) {
+			xfer = count > TXN_MAX ? TXN_MAX : count;
+			if(copy_from_user(req->buf, buf, xfer)){
+				r = -EFAULT;
+				break;
+			}
+			
+			req->length = xfer;
+			ret = usb_ept_queue_xfer(ctxt->in, req);
+			if(ret < 0) {
+				DBG("ums_write: xfer error %d\n", ret);
+				ctxt->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+			if (count == 0) {
+			    break;
+			}
+		}
+	}
+
+
+	if(req)
+		req_put(ctxt, &ctxt->tx_idle, req);
+
+	_unlock(&ctxt->write_excl);
+	DBG("ums_write returning %d\n", r);
+	return r;
+}
+
+static int ums_open(struct inode *ip, struct file *fp)
+{
+	struct ums_context *ctxt = &_context;
+	
+	if(_lock(&ctxt->open_excl))
+		return -EBUSY;
+
+	/* clear the error latch */
+	ctxt->error = 0;
+	
+	return 0;
+}
+
+static int ums_release(struct inode *ip, struct file *fp)
+{
+	struct ums_context *ctxt = &_context;
+
+	_unlock(&ctxt->open_excl);
+	return 0;
+}
+
+static struct file_operations ums_fops = {
+	.owner =   THIS_MODULE,
+	.read =    ums_read,
+	.write =   ums_write,
+	.open =    ums_open,
+	.release = ums_release,
+};
+	
+static struct miscdevice ums_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_ums",
+	.fops = &ums_fops,
+};
+
+static void ums_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct ums_context *ctxt = _ctxt;
+	struct usb_request *req;
+	int n;
+	
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	DBG("ums_bind() %p, %p\n", ctxt->out, ctxt->in);
+	
+	for(n = 0; n < RX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 4096);
+		if(req == 0) goto fail;
+		req->context = ctxt;
+		req->complete = ums_complete_out;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	}
+
+	for(n = 0; n < TX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 4096);
+		if(req == 0) goto fail;
+		req->context = ctxt;
+		req->complete = ums_complete_in;
+		req_put(ctxt, &ctxt->tx_idle, req);
+	}
+
+	printk("ums_bind() allocated %d rx and %d tx requests\n",
+	       RX_REQ_MAX, TX_REQ_MAX);
+	
+	misc_register(&ums_device);
+	return;
+	
+fail:
+	printk("ums_bind() could not allocate requests\n");
+
+	/* XXX release any we did allocate */
+}
+
+static int ums_setup(struct usb_ctrlrequest* req, void* buf, int len, void *_ctxt)
+{
+	if ((req->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		if (req->bRequest == USB_BULK_GET_MAX_LUN_REQUEST) {
+			if ((req->bRequestType & USB_DIR_IN) != USB_DIR_IN 
+					|| req->wValue != 0 || req->wIndex != 0)
+			 	return -1;
+
+			((u8*)buf)[0] = lun_count - 1;
+			printk("USB_BULK_GET_MAX_LUN_REQUEST returning %d\n", lun_count - 1);
+			return 1;
+		} else if (req->bRequest == USB_BULK_RESET_REQUEST) {
+			if ((req->bRequestType & USB_DIR_OUT) != USB_DIR_IN 
+					|| req->wValue != 0 || req->wIndex != 0)
+			 	return -1;
+
+			/* FIXME - I'm not sure what to do here */
+			printk("USB_BULK_RESET_REQUEST\n");
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+static void ums_configure(int configured, void *_ctxt)
+{
+	struct ums_context *ctxt = _ctxt;
+	struct usb_request *req;
+	
+	DBG("ums_configure() %d\n", configured);
+
+	if(configured) {
+		ctxt->online = 1;
+
+		/* if we have a stale request being read, recycle it */
+		ctxt->read_buf = 0;
+		if(ctxt->read_req) {
+			req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+			ctxt->read_req = 0;
+		}
+
+		/* retire any completed rx requests from previous session */
+		while((req = req_get(ctxt, &ctxt->rx_done))) {
+			req_put(ctxt, &ctxt->rx_idle, req);
+		}
+		
+	} else {
+		ctxt->online = 0;
+		ctxt->error = 1;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+}
+
+static struct usb_function usb_func_ums = {
+	.bind = ums_bind,
+	.configure = ums_configure,
+	.setup = ums_setup,
+
+	.name = "ums",
+	.context = &_context,
+
+	.ifc_class = USB_CLASS_MASS_STORAGE,
+	.ifc_subclass = US_SC_SCSI,
+	.ifc_protocol = US_PR_BULK,
+
+	.ifc_name = "ums",
+	
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int __init ums_init(void)
+{
+	struct ums_context *ctxt = &_context;
+	DBG("ums_init()\n");
+
+	spin_lock_init(&ctxt->lock);
+
+	init_waitqueue_head(&ctxt->read_wq);
+	init_waitqueue_head(&ctxt->write_wq);
+
+	atomic_set(&ctxt->open_excl, 0);
+	atomic_set(&ctxt->read_excl, 0);
+	atomic_set(&ctxt->write_excl, 0);
+	
+	INIT_LIST_HEAD(&ctxt->rx_idle);
+	INIT_LIST_HEAD(&ctxt->rx_done);
+	INIT_LIST_HEAD(&ctxt->tx_idle);
+	
+	return usb_function_register(&usb_func_ums);
+}
+
+module_init(ums_init);
diff --git a/drivers/usb/function/usb_function.h b/drivers/usb/function/usb_function.h
new file mode 100644
index 0000000..35eb257
--- /dev/null
+++ b/drivers/usb/function/usb_function.h
@@ -0,0 +1,187 @@
+/* drivers/usb/function/usb_function.h
+ *
+ * USB Function Device Interface
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DRIVERS_USB_FUNCTION_USB_FUNCTION_H_
+#define _DRIVERS_USB_FUNCTION_USB_FUNCTION_H_
+
+#include <linux/list.h>
+#include <linux/usb/ch9.h>
+
+#define EPT_BULK_IN   1
+#define EPT_BULK_OUT  2
+#define EPT_INT_IN  3
+
+#define USB_CONFIG_ATT_SELFPOWER_POS	(6)	/* self powered */
+#define USB_CONFIG_ATT_WAKEUP_POS	(5)	/* can wakeup */
+
+struct usb_endpoint {
+	struct usb_info *ui;
+	struct msm_request *req; /* head of pending requests */
+	struct msm_request *last;
+	unsigned flags;
+
+	/* bit number (0-31) in various status registers
+	** as well as the index into the usb_info's array
+	** of all endpoints
+	*/
+	unsigned char bit;
+	unsigned char num;
+
+	unsigned short max_pkt;
+
+	unsigned ept_halted;
+
+	/* pointers to DMA transfer list area */
+	/* these are allocated from the usb_info dma space */
+	struct ept_queue_head *head;
+	struct usb_endpoint_descriptor *ep_descriptor;
+	unsigned int alloced;
+};
+
+struct usb_request {
+	void *buf;          /* pointer to associated data buffer */
+	unsigned length;    /* requested transfer length */
+	int status;         /* status upon completion */
+	unsigned actual;    /* actual bytes transferred */
+
+	void (*complete)(struct usb_endpoint *ep, struct usb_request *req);
+	void *context;
+
+	void *device;
+
+	struct list_head list;
+};
+
+struct usb_function {
+	/* bind() is called once when the function has had its endpoints
+	** allocated, but before the bus is active.
+	**
+	** might be a good place to allocate some usb_request objects
+	*/
+	void (*bind)(void *);
+
+	/* unbind() is called when the function is being removed.
+	** it is illegal to call and usb_ept_* hooks at this point
+	** and all endpoints must be released.
+	*/
+	void (*unbind)(void *);
+
+	/* configure() is called when the usb client has been configured
+	** by the host and again when the device is unconfigured (or
+	** when the client is detached)
+	**
+	** currently called from interrupt context.
+	*/
+	void (*configure)(int configured, void *);
+	void (*disconnect)(void *);
+
+	/* setup() is called to allow functions to handle class and vendor
+	** setup requests.  If the request is unsupported or can not be handled,
+	** setup() should return -1.
+	** For OUT requests, buf will point to a buffer to data received in the
+	** request's data phase, and len will contain the length of the data.
+	** setup() should return 0 after handling an OUT request successfully.
+	** for IN requests, buf will contain a pointer to a buffer for setup()
+	** to write data to, and len will be the maximum size of the data to
+	** be written back to the host.
+	** After successfully handling an IN request, setup() should return
+	** the number of bytes written to buf that should be sent in the
+	** response to the host.
+	*/
+	int (*setup)(struct usb_ctrlrequest *req, void *buf,
+			int len, void *);
+
+	int (*set_interface)(int ifc_num, int alt_set, void *_ctxt);
+	int (*get_interface)(int ifc_num, void *ctxt);
+	/* driver name */
+	const char *name;
+	void *context;
+
+	/* interface class/subclass/protocol for descriptor */
+	unsigned char ifc_class;
+	unsigned char ifc_subclass;
+	unsigned char ifc_protocol;
+
+	/* name string for descriptor */
+	const char *ifc_name;
+
+	/* number of needed endpoints and their types */
+	unsigned char ifc_ept_count;
+	unsigned char ifc_ept_type[8];
+
+	/* if the endpoint is disabled, its interface will not be
+	** included in the configuration descriptor
+	*/
+	unsigned char   disabled;
+
+	struct usb_descriptor_header **fs_descriptors;
+	struct usb_descriptor_header **hs_descriptors;
+
+	struct usb_request *ep0_out_req, *ep0_in_req;
+	struct usb_endpoint *ep0_out, *ep0_in;
+};
+
+int usb_function_register(struct usb_function *driver);
+int usb_function_unregister(struct usb_function *driver);
+
+int usb_msm_get_speed(void);
+void usb_configure_endpoint(struct usb_endpoint *ep,
+			struct usb_endpoint_descriptor *ep_desc);
+int usb_remote_wakeup(void);
+/* To allocate endpoint from function driver*/
+struct usb_endpoint *usb_alloc_endpoint(unsigned direction);
+int usb_free_endpoint(struct usb_endpoint *ept);
+/* To enable endpoint from frunction driver*/
+void usb_ept_enable(struct usb_endpoint *ept, int yes);
+int usb_msm_get_next_ifc_number(struct usb_function *);
+int usb_msm_get_next_strdesc_id(char *);
+void usb_msm_enable_iad(void);
+
+void usb_function_enable(const char *function, int enable);
+
+/* Allocate a USB request.
+** Must be called from a context that can sleep.
+** If bufsize is nonzero, req->buf will be allocated for
+** you and free'd when the request is free'd.  Otherwise
+** it is your responsibility to provide.
+*/
+struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept, unsigned bufsize);
+void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *req);
+
+/* safely callable from any context
+** returns 0 if successfully queued and sets req->status = -EBUSY
+** req->status will change to a different value upon completion
+** (0 for success, -EIO, -ENODEV, etc for error)
+*/
+int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *req);
+int usb_ept_flush(struct usb_endpoint *ept);
+int usb_ept_get_max_packet(struct usb_endpoint *ept);
+int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req);
+void usb_ept_fifo_flush(struct usb_endpoint *ept);
+int usb_ept_set_halt(struct usb_endpoint *ept);
+int usb_ept_clear_halt(struct usb_endpoint *ept);
+struct device *usb_get_device(void);
+struct usb_endpoint *usb_ept_find(struct usb_endpoint **ept, int type);
+struct usb_function *usb_ept_get_function(struct usb_endpoint *ept);
+int usb_ept_is_stalled(struct usb_endpoint *ept);
+void usb_request_set_buffer(struct usb_request *req, void *buf, dma_addr_t dma);
+void usb_free_endpoint_all_req(struct usb_endpoint *ep);
+void usb_remove_function_driver(struct usb_function *func);
+int usb_remote_wakeup(void);
+#endif
diff --git a/drivers/usb/function/zero.c b/drivers/usb/function/zero.c
new file mode 100644
index 0000000..449bcbf
--- /dev/null
+++ b/drivers/usb/function/zero.c
@@ -0,0 +1,120 @@
+/* driver/usb/function/zero.c
+ *
+ * Zero Function Device - A Trivial Data Source
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct zero_context
+{
+	struct usb_endpoint *in;
+	struct usb_request *req0;
+	struct usb_request *req1;
+};
+
+static struct zero_context _context;
+
+static void zero_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	ctxt->in = ept[0];
+	printk(KERN_INFO "zero_bind() %p\n", ctxt->in);
+
+	ctxt->req0 = usb_ept_alloc_req(ctxt->in, 4096);
+	ctxt->req1 = usb_ept_alloc_req(ctxt->in, 4096);
+
+	memset(ctxt->req0->buf, 0, 4096);
+	memset(ctxt->req1->buf, 0, 4096);
+}
+
+static void zero_unbind(void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_unbind()\n");
+	if (ctxt->req0) {
+		usb_ept_free_req(ctxt->in, ctxt->req0);
+		ctxt->req0 = 0;
+	}
+	if (ctxt->req1) {
+		usb_ept_free_req(ctxt->in, ctxt->req1);
+		ctxt->req1 = 0;
+	}
+	ctxt->in = 0;
+}
+
+static void zero_queue_in(struct zero_context *ctxt, struct usb_request *req);
+
+static void zero_in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct zero_context *ctxt = req->context;
+	unsigned char *data = req->buf;
+
+	if (req->status != -ENODEV)
+		zero_queue_in(ctxt, req);
+}
+
+static void zero_queue_in(struct zero_context *ctxt, struct usb_request *req)
+{
+	req->complete = zero_in_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->in, req);
+}
+
+static void zero_configure(int configured, void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	printk(KERN_INFO "zero_configure() %d\n", configured);
+
+	if (configured) {
+		zero_queue_in(ctxt, ctxt->req0);
+		zero_queue_in(ctxt, ctxt->req1);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_zero = {
+	.bind = zero_bind,
+	.unbind = zero_unbind,
+	.configure = zero_configure,
+
+	.name = "zero",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xfe,
+	.ifc_protocol = 0x02,
+
+	.ifc_name = "zero",
+
+	.ifc_ept_count = 1,
+	.ifc_ept_type = { EPT_BULK_IN },
+};
+
+static int __init zero_init(void)
+{
+	printk(KERN_INFO "zero_init()\n");
+	usb_function_register(&usb_func_zero);
+	return 0;
+}
+
+module_init(zero_init);