Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
new file mode 100644
index 0000000..3b24f9f
--- /dev/null
+++ b/drivers/usb/gadget/Kconfig
@@ -0,0 +1,389 @@
+#
+# USB Gadget support on a system involves
+#    (a) a peripheral controller, and
+#    (b) the gadget driver using it.
+#
+# NOTE:  Gadget support ** DOES NOT ** depend on host-side CONFIG_USB !!
+#
+#  - Host systems (like PCs) need CONFIG_USB (with "A" jacks).
+#  - Peripherals (like PDAs) need CONFIG_USB_GADGET (with "B" jacks).
+#  - Some systems have both kinds of of controller.
+#
+# With help from a special transceiver and a "Mini-AB" jack, systems with
+# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
+#
+menu "USB Gadget Support"
+
+config USB_GADGET
+	tristate "Support for USB Gadgets"
+	help
+	   USB is a master/slave protocol, organized with one master
+	   host (such as a PC) controlling up to 127 peripheral devices.
+	   The USB hardware is asymmetric, which makes it easier to set up:
+	   you can't connect a "to-the-host" connector to a peripheral.
+
+	   Linux can run in the host, or in the peripheral.  In both cases
+	   you need a low level bus controller driver, and some software
+	   talking to it.  Peripheral controllers are often discrete silicon,
+	   or are integrated with the CPU in a microcontroller.  The more
+	   familiar host side controllers have names like like "EHCI", "OHCI",
+	   or "UHCI", and are usually integrated into southbridges on PC
+	   motherboards.
+
+	   Enable this configuration option if you want to run Linux inside
+	   a USB peripheral device.  Configure one hardware driver for your
+	   peripheral/device side bus controller, and a "gadget driver" for
+	   your peripheral protocol.  (If you use modular gadget drivers,
+	   you may configure more than one.)
+
+	   If in doubt, say "N" and don't enable these drivers; most people
+	   don't have this kind of hardware (except maybe inside Linux PDAs).
+
+	   For more information, see <http://www.linux-usb.org/gadget> and
+	   the kernel DocBook documentation for this API.
+
+config USB_GADGET_DEBUG_FILES
+	boolean "Debugging information files"
+	depends on USB_GADGET && PROC_FS
+	help
+	   Some of the drivers in the "gadget" framework can expose
+	   debugging information in files such as /proc/driver/udc
+	   (for a peripheral controller).  The information in these
+	   files may help when you're troubleshooting or bringing up a
+	   driver on a new board.   Enable these files by choosing "Y"
+	   here.  If in doubt, or to conserve kernel memory, say "N".
+
+#
+# USB Peripheral Controller Support
+#
+choice
+	prompt "USB Peripheral Controller"
+	depends on USB_GADGET
+	help
+	   A USB device uses a controller to talk to its host.
+	   Systems should have only one such upstream link.
+	   Many controller drivers are platform-specific; these
+	   often need board-specific hooks.
+
+config USB_GADGET_NET2280
+	boolean "NetChip 2280"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	   NetChip 2280 is a PCI based USB peripheral controller which
+	   supports both full and high speed USB 2.0 data transfers.  
+	   
+	   It has six configurable endpoints, as well as endpoint zero
+	   (for control transfers) and several endpoints with dedicated
+	   functions.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "net2280" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_NET2280
+	tristate
+	depends on USB_GADGET_NET2280
+	default USB_GADGET
+
+config USB_GADGET_PXA2XX
+	boolean "PXA 25x or IXP 4xx"
+	depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
+	help
+	   Intel's PXA 25x series XScale ARM-5TE processors include
+	   an integrated full speed USB 1.1 device controller.  The
+	   controller in the IXP 4xx series is register-compatible.
+
+	   It has fifteen fixed-function endpoints, as well as endpoint
+	   zero (for control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "pxa2xx_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_PXA2XX
+	tristate
+	depends on USB_GADGET_PXA2XX
+	default USB_GADGET
+
+# if there's only one gadget driver, using only two bulk endpoints,
+# don't waste memory for the other endpoints
+config USB_PXA2XX_SMALL
+	depends on USB_GADGET_PXA2XX
+	bool
+	default n if USB_ETH_RNDIS
+	default y if USB_ZERO
+	default y if USB_ETH
+	default y if USB_G_SERIAL
+
+config USB_GADGET_GOKU
+	boolean "Toshiba TC86C001 'Goku-S'"
+	depends on PCI
+	help
+	   The Toshiba TC86C001 is a PCI device which includes controllers
+	   for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
+	   
+	   The device controller has three configurable (bulk or interrupt)
+	   endpoints, plus endpoint zero (for control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "goku_udc" and to force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_GOKU
+	tristate
+	depends on USB_GADGET_GOKU
+	default USB_GADGET
+
+
+config USB_GADGET_LH7A40X
+	boolean "LH7A40X"
+	depends on ARCH_LH7A40X
+	help
+    This driver provides USB Device Controller driver for LH7A40x
+
+config USB_LH7A40X
+	tristate
+	depends on USB_GADGET_LH7A40X
+	default USB_GADGET
+
+
+config USB_GADGET_OMAP
+	boolean "OMAP USB Device Controller"
+	depends on ARCH_OMAP
+	select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
+	help
+	   Many Texas Instruments OMAP processors have flexible full
+	   speed USB device controllers, with support for up to 30
+	   endpoints (plus endpoint zero).  This driver supports the
+	   controller in the OMAP 1611, and should work with controllers
+	   in other OMAP processors too, given minor tweaks.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "omap_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_OMAP
+	tristate
+	depends on USB_GADGET_OMAP
+	default USB_GADGET
+
+config USB_OTG
+	boolean "OTG Support"
+	depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD
+	help
+	   The most notable feature of USB OTG is support for a
+	   "Dual-Role" device, which can act as either a device
+	   or a host.  The initial role choice can be changed
+	   later, when two dual-role devices talk to each other.
+
+	   Select this only if your OMAP board has a Mini-AB connector.
+
+
+config USB_GADGET_DUMMY_HCD
+	boolean "Dummy HCD (DEVELOPMENT)"
+	depends on USB && EXPERIMENTAL
+	select USB_GADGET_DUALSPEED
+	help
+	  This host controller driver emulates USB, looping all data transfer
+	  requests back to a USB "gadget driver" in the same host.  The host
+	  side is the master; the gadget side is the slave.  Gadget drivers
+	  can be high, full, or low speed; and they have access to endpoints
+	  like those from NET2280, PXA2xx, or SA1100 hardware.
+	  
+	  This may help in some stages of creating a driver to embed in a
+	  Linux device, since it lets you debug several parts of the gadget
+	  driver without its hardware or drivers being involved.
+	  
+	  Since such a gadget side driver needs to interoperate with a host
+	  side Linux-USB device driver, this may help to debug both sides
+	  of a USB protocol stack.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "dummy_hcd" and force all
+	  gadget drivers to also be dynamically linked.
+
+config USB_DUMMY_HCD
+	tristate
+	depends on USB_GADGET_DUMMY_HCD
+	default USB_GADGET
+
+# NOTE:  Please keep dummy_hcd LAST so that "real hardware" appears
+# first and will be selected by default.
+
+endchoice
+
+config USB_GADGET_DUALSPEED
+	bool
+	depends on USB_GADGET
+	default n
+	help
+	  Means that gadget drivers should include extra descriptors
+	  and code to handle dual-speed controllers.
+
+#
+# USB Gadget Drivers
+#
+choice
+	tristate "USB Gadget Drivers"
+	depends on USB_GADGET
+	default USB_ETH
+	help
+	  A Linux "Gadget Driver" talks to the USB Peripheral Controller
+	  driver through the abstract "gadget" API.  Some other operating
+	  systems call these "client" drivers, of which "class drivers"
+	  are a subset (implementing a USB device class specification).
+	  A gadget driver implements one or more USB functions using
+	  the peripheral hardware.
+
+	  Gadget drivers are hardware-neutral, or "platform independent",
+	  except that they sometimes must understand quirks or limitations
+	  of the particular controllers they work with.  For example, when
+	  a controller doesn't support alternate configurations or provide
+	  enough of the right types of endpoints, the gadget driver might
+	  not be able work with that controller, or might need to implement
+	  a less common variant of a device class protocol.
+
+# this first set of drivers all depend on bulk-capable hardware.
+
+config USB_ZERO
+	tristate "Gadget Zero (DEVELOPMENT)"
+	depends on EXPERIMENTAL
+	help
+	  Gadget Zero is a two-configuration device.  It either sinks and
+	  sources bulk data; or it loops back a configurable number of
+	  transfers.  It also implements control requests, for "chapter 9"
+	  conformance.  The driver needs only two bulk-capable endpoints, so
+	  it can work on top of most device-side usb controllers.  It's
+	  useful for testing, and is also a working example showing how
+	  USB "gadget drivers" can be written.
+
+	  Make this be the first driver you try using on top of any new
+	  USB peripheral controller driver.  Then you can use host-side
+	  test software, like the "usbtest" driver, to put your hardware
+	  and its driver through a basic set of functional tests.
+
+	  Gadget Zero also works with the host-side "usb-skeleton" driver,
+	  and with many kinds of host-side test software.  You may need
+	  to tweak product and vendor IDs before host software knows about
+	  this device, and arrange to select an appropriate configuration.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_zero".
+
+config USB_ZERO_HNPTEST
+	boolean "HNP Test Device"
+	depends on USB_ZERO && USB_OTG
+	help
+	  You can configure this device to enumerate using the device
+	  identifiers of the USB-OTG test device.  That means that when
+	  this gadget connects to another OTG device, with this one using
+	  the "B-Peripheral" role, that device will use HNP to let this
+	  one serve as the USB host instead (in the "B-Host" role).
+
+config USB_ETH
+	tristate "Ethernet Gadget (with CDC Ethernet support)"
+	depends on NET
+	help
+	  This driver implements Ethernet style communication, in either
+	  of two ways:
+	  
+	   - The "Communication Device Class" (CDC) Ethernet Control Model.
+	     That protocol is often avoided with pure Ethernet adapters, in
+	     favor of simpler vendor-specific hardware, but is widely
+	     supported by firmware for smart network devices.
+
+	   - On hardware can't implement that protocol, a simple CDC subset
+	     is used, placing fewer demands on USB.
+
+	  RNDIS support is a third option, more demanding than that subset.
+
+	  Within the USB device, this gadget driver exposes a network device
+	  "usbX", where X depends on what other networking devices you have.
+	  Treat it like a two-node Ethernet link:  host, and gadget.
+
+	  The Linux-USB host-side "usbnet" driver interoperates with this
+	  driver, so that deep I/O queues can be supported.  On 2.4 kernels,
+	  use "CDCEther" instead, if you're using the CDC option. That CDC
+	  mode should also interoperate with standard CDC Ethernet class
+	  drivers on other host operating systems.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_ether".
+
+config USB_ETH_RNDIS
+	bool "RNDIS support (EXPERIMENTAL)"
+	depends on USB_ETH && EXPERIMENTAL
+	default y
+	help
+	   Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
+	   and Microsoft provides redistributable binary RNDIS drivers for
+	   older versions of Windows.
+
+	   If you say "y" here, the Ethernet gadget driver will try to provide
+	   a second device configuration, supporting RNDIS to talk to such
+	   Microsoft USB hosts.
+	   
+	   To make MS-Windows work with this, use Documentation/usb/linux.inf
+	   as the "driver info file".  For versions of MS-Windows older than
+	   XP, you'll need to download drivers from Microsoft's website; a URL
+	   is given in comments found in that info file.
+
+config USB_GADGETFS
+	tristate "Gadget Filesystem (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  This driver provides a filesystem based API that lets user mode
+	  programs implement a single-configuration USB device, including
+	  endpoint I/O and control requests that don't relate to enumeration.
+	  All endpoints, transfer speeds, and transfer types supported by
+	  the hardware are available, through read() and write() calls.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "gadgetfs".
+
+config USB_FILE_STORAGE
+	tristate "File-backed Storage Gadget"
+	help
+	  The File-backed Storage Gadget acts as a USB Mass Storage
+	  disk drive.  As its storage repository it can use a regular
+	  file or a block device (in much the same way as the "loop"
+	  device driver), specified as a module parameter.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_file_storage".
+
+config USB_FILE_STORAGE_TEST
+	bool "File-backed Storage Gadget testing version"
+	depends on USB_FILE_STORAGE
+	default n
+	help
+	  Say "y" to generate the larger testing version of the
+	  File-backed Storage Gadget, useful for probing the
+	  behavior of USB Mass Storage hosts.  Not needed for
+	  normal operation.
+
+config USB_G_SERIAL
+	tristate "Serial Gadget (with CDC ACM support)"
+	help
+	  The Serial Gadget talks to the Linux-USB generic serial driver.
+	  This driver supports a CDC-ACM module option, which can be used
+	  to interoperate with MS-Windows hosts or with the Linux-USB
+	  "cdc-acm" driver.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_serial".
+
+	  For more information, see Documentation/usb/gadget_serial.txt
+	  which includes instructions and a "driver info file" needed to
+	  make MS-Windows work with this driver.
+
+
+# put drivers that need isochronous transfer support (for audio
+# or video class gadget drivers), or specific hardware, here.
+
+# - none yet
+
+endchoice
+
+endmenu
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
new file mode 100644
index 0000000..d5fd04d
--- /dev/null
+++ b/drivers/usb/gadget/Makefile
@@ -0,0 +1,30 @@
+#
+# USB peripheral controller drivers
+#
+obj-$(CONFIG_USB_DUMMY_HCD)	+= dummy_hcd.o
+obj-$(CONFIG_USB_NET2280)	+= net2280.o
+obj-$(CONFIG_USB_PXA2XX)	+= pxa2xx_udc.o
+obj-$(CONFIG_USB_GOKU)		+= goku_udc.o
+obj-$(CONFIG_USB_OMAP)		+= omap_udc.o
+obj-$(CONFIG_USB_LH7A40X)	+= lh7a40x_udc.o
+
+#
+# USB gadget drivers
+#
+g_zero-objs			:= zero.o usbstring.o config.o epautoconf.o
+g_ether-objs			:= ether.o usbstring.o config.o epautoconf.o
+g_serial-objs			:= serial.o usbstring.o config.o epautoconf.o
+gadgetfs-objs			:= inode.o
+g_file_storage-objs		:= file_storage.o usbstring.o config.o \
+					epautoconf.o
+
+ifeq ($(CONFIG_USB_ETH_RNDIS),y)
+	g_ether-objs		+= rndis.o
+endif
+ 
+obj-$(CONFIG_USB_ZERO)		+= g_zero.o
+obj-$(CONFIG_USB_ETH)		+= g_ether.o
+obj-$(CONFIG_USB_GADGETFS)	+= gadgetfs.o
+obj-$(CONFIG_USB_FILE_STORAGE)	+= g_file_storage.o
+obj-$(CONFIG_USB_G_SERIAL)	+= g_serial.o
+
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
new file mode 100644
index 0000000..83b4866
--- /dev/null
+++ b/drivers/usb/gadget/config.c
@@ -0,0 +1,117 @@
+/*
+ * usb/gadget/config.c -- simplify building config descriptors
+ *
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+
+/**
+ * usb_descriptor_fillbuf - fill buffer with descriptors
+ * @buf: Buffer to be filled
+ * @buflen: Size of buf
+ * @src: Array of descriptor pointers, terminated by null pointer.
+ *
+ * Copies descriptors into the buffer, returning the length or a
+ * negative error code if they can't all be copied.  Useful when
+ * assembling descriptors for an associated set of interfaces used
+ * as part of configuring a composite device; or in other cases where
+ * sets of descriptors need to be marshaled.
+ */
+int
+usb_descriptor_fillbuf(void *buf, unsigned buflen,
+		const struct usb_descriptor_header **src)
+{
+	u8	*dest = buf;
+
+	if (!src)
+		return -EINVAL;
+
+	/* fill buffer from src[] until null descriptor ptr */
+	for (; 0 != *src; src++) {
+		unsigned		len = (*src)->bLength;
+
+		if (len > buflen)
+			return -EINVAL;
+		memcpy(dest, *src, len);
+		buflen -= len;
+		dest += len;
+	}
+	return dest - (u8 *)buf;
+}
+
+
+/**
+ * usb_gadget_config_buf - builts a complete configuration descriptor
+ * @config: Header for the descriptor, including characteristics such
+ *	as power requirements and number of interfaces.
+ * @desc: Null-terminated vector of pointers to the descriptors (interface,
+ *	endpoint, etc) defining all functions in this device configuration.
+ * @buf: Buffer for the resulting configuration descriptor.
+ * @length: Length of buffer.  If this is not big enough to hold the
+ *	entire configuration descriptor, an error code will be returned.
+ *
+ * This copies descriptors into the response buffer, building a descriptor
+ * for that configuration.  It returns the buffer length or a negative
+ * status code.  The config.wTotalLength field is set to match the length
+ * of the result, but other descriptor fields (including power usage and
+ * interface count) must be set by the caller.
+ *
+ * Gadget drivers could use this when constructing a config descriptor
+ * in response to USB_REQ_GET_DESCRIPTOR.  They will need to patch the
+ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
+ */
+int usb_gadget_config_buf(
+	const struct usb_config_descriptor	*config,
+	void					*buf,
+	unsigned				length,
+	const struct usb_descriptor_header	**desc
+)
+{
+	struct usb_config_descriptor		*cp = buf;
+	int					len;
+
+	/* config descriptor first */
+	if (length < USB_DT_CONFIG_SIZE || !desc)
+		return -EINVAL;
+	*cp = *config; 
+
+	/* then interface/endpoint/class/vendor/... */
+	len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
+			length - USB_DT_CONFIG_SIZE, desc);
+	if (len < 0)
+		return len;
+	len += USB_DT_CONFIG_SIZE;
+	if (len > 0xffff)
+		return -EINVAL;
+
+	/* patch up the config descriptor */
+	cp->bLength = USB_DT_CONFIG_SIZE;
+	cp->bDescriptorType = USB_DT_CONFIG;
+	cp->wTotalLength = cpu_to_le16(len);
+	cp->bmAttributes |= USB_CONFIG_ATT_ONE;
+	return len;
+}
+
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
new file mode 100644
index 0000000..8ef8a9c
--- /dev/null
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -0,0 +1,1793 @@
+/*
+ * dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+/*
+ * This exposes a device side "USB gadget" API, driven by requests to a
+ * Linux-USB host controller driver.  USB traffic is simulated; there's
+ * no need for USB hardware.  Use this with two other drivers:
+ *
+ *  - Gadget driver, responding to requests (slave);
+ *  - Host-side device driver, as already familiar in Linux.
+ *
+ * Having this all in one kernel can help some stages of development,
+ * bypassing some hardware (and driver) issues.  UML could help too.
+ */
+
+#define DEBUG
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+
+#include <linux/usb.h>
+#include <linux/usb_gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+
+#include "../core/hcd.h"
+
+
+#define DRIVER_DESC	"USB Host+Gadget Emulator"
+#define DRIVER_VERSION	"17 Dec 2004"
+
+static const char	driver_name [] = "dummy_hcd";
+static const char	driver_desc [] = "USB Host+Gadget Emulator";
+
+static const char	gadget_name [] = "dummy_udc";
+
+MODULE_DESCRIPTION (DRIVER_DESC);
+MODULE_AUTHOR ("David Brownell");
+MODULE_LICENSE ("GPL");
+
+/*-------------------------------------------------------------------------*/
+
+/* gadget side driver data structres */
+struct dummy_ep {
+	struct list_head		queue;
+	unsigned long			last_io;	/* jiffies timestamp */
+	struct usb_gadget		*gadget;
+	const struct usb_endpoint_descriptor *desc;
+	struct usb_ep			ep;
+	unsigned			halted : 1;
+	unsigned			already_seen : 1;
+	unsigned			setup_stage : 1;
+};
+
+struct dummy_request {
+	struct list_head		queue;		/* ep's requests */
+	struct usb_request		req;
+};
+
+static inline struct dummy_ep *usb_ep_to_dummy_ep (struct usb_ep *_ep)
+{
+	return container_of (_ep, struct dummy_ep, ep);
+}
+
+static inline struct dummy_request *usb_request_to_dummy_request
+		(struct usb_request *_req)
+{
+	return container_of (_req, struct dummy_request, req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Every device has ep0 for control requests, plus up to 30 more endpoints,
+ * in one of two types:
+ *
+ *   - Configurable:  direction (in/out), type (bulk, iso, etc), and endpoint
+ *     number can be changed.  Names like "ep-a" are used for this type.
+ *
+ *   - Fixed Function:  in other cases.  some characteristics may be mutable;
+ *     that'd be hardware-specific.  Names like "ep12out-bulk" are used.
+ *
+ * Gadget drivers are responsible for not setting up conflicting endpoint
+ * configurations, illegal or unsupported packet lengths, and so on.
+ */
+
+static const char ep0name [] = "ep0";
+
+static const char *const ep_name [] = {
+	ep0name,				/* everyone has ep0 */
+
+	/* act like a net2280: high speed, six configurable endpoints */
+	"ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
+
+	/* or like pxa250: fifteen fixed function endpoints */
+	"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
+	"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
+	"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
+		"ep15in-int",
+
+	/* or like sa1100: two fixed function endpoints */
+	"ep1out-bulk", "ep2in-bulk",
+};
+#define DUMMY_ENDPOINTS	(sizeof(ep_name)/sizeof(char *))
+
+#define FIFO_SIZE		64
+
+struct urbp {
+	struct urb		*urb;
+	struct list_head	urbp_list;
+};
+
+struct dummy {
+	spinlock_t			lock;
+
+	/*
+	 * SLAVE/GADGET side support
+	 */
+	struct dummy_ep			ep [DUMMY_ENDPOINTS];
+	int				address;
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct dummy_request		fifo_req;
+	u8				fifo_buf [FIFO_SIZE];
+	u16				devstatus;
+
+	/*
+	 * MASTER/HOST side support
+	 */
+	struct timer_list		timer;
+	u32				port_status;
+	unsigned			resuming:1;
+	unsigned long			re_timeout;
+
+	struct usb_device		*udev;
+	struct list_head		urbp_list;
+};
+
+static inline struct dummy *hcd_to_dummy (struct usb_hcd *hcd)
+{
+	return (struct dummy *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *dummy_to_hcd (struct dummy *dum)
+{
+	return container_of((void *) dum, struct usb_hcd, hcd_priv);
+}
+
+static inline struct device *dummy_dev (struct dummy *dum)
+{
+	return dummy_to_hcd(dum)->self.controller;
+}
+
+static inline struct dummy *ep_to_dummy (struct dummy_ep *ep)
+{
+	return container_of (ep->gadget, struct dummy, gadget);
+}
+
+static inline struct dummy *gadget_to_dummy (struct usb_gadget *gadget)
+{
+	return container_of (gadget, struct dummy, gadget);
+}
+
+static inline struct dummy *gadget_dev_to_dummy (struct device *dev)
+{
+	return container_of (dev, struct dummy, gadget.dev);
+}
+
+static struct dummy			*the_controller;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This "hardware" may look a bit odd in diagnostics since it's got both
+ * host and device sides; and it binds different drivers to each side.
+ */
+static struct platform_device		the_pdev;
+
+static struct device_driver dummy_driver = {
+	.name		= (char *) driver_name,
+	.bus		= &platform_bus_type,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* SLAVE/GADGET SIDE DRIVER
+ *
+ * This only tracks gadget state.  All the work is done when the host
+ * side tries some (emulated) i/o operation.  Real device controller
+ * drivers would do real i/o using dma, fifos, irqs, timers, etc.
+ */
+
+#define is_enabled(dum) \
+	(dum->port_status & USB_PORT_STAT_ENABLE)
+
+static int
+dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct dummy		*dum;
+	struct dummy_ep		*ep;
+	unsigned		max;
+	int			retval;
+
+	ep = usb_ep_to_dummy_ep (_ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dum = ep_to_dummy (ep);
+	if (!dum->driver || !is_enabled (dum))
+		return -ESHUTDOWN;
+	max = le16_to_cpu(desc->wMaxPacketSize) & 0x3ff;
+
+	/* drivers must not request bad settings, since lower levels
+	 * (hardware or its drivers) may not check.  some endpoints
+	 * can't do iso, many have maxpacket limitations, etc.
+	 *
+	 * since this "hardware" driver is here to help debugging, we
+	 * have some extra sanity checks.  (there could be more though,
+	 * especially for "ep9out" style fixed function ones.)
+	 */
+	retval = -EINVAL;
+	switch (desc->bmAttributes & 0x03) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (strstr (ep->ep.name, "-iso")
+				|| strstr (ep->ep.name, "-int")) {
+			goto done;
+		}
+		switch (dum->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max == 512)
+				break;
+			/* conserve return statements */
+		default:
+			switch (max) {
+			case 8: case 16: case 32: case 64:
+				/* we'll fake any legal size */
+				break;
+			default:
+		case USB_SPEED_LOW:
+				goto done;
+			}
+		}
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (strstr (ep->ep.name, "-iso")) /* bulk is ok */
+			goto done;
+		/* real hardware might not handle all packet sizes */
+		switch (dum->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+			/* save a return statement */
+		case USB_SPEED_FULL:
+			if (max <= 64)
+				break;
+			/* save a return statement */
+		default:
+			if (max <= 8)
+				break;
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (strstr (ep->ep.name, "-bulk")
+				|| strstr (ep->ep.name, "-int"))
+			goto done;
+		/* real hardware might not handle all packet sizes */
+		switch (dum->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+			/* save a return statement */
+		case USB_SPEED_FULL:
+			if (max <= 1023)
+				break;
+			/* save a return statement */
+		default:
+			goto done;
+		}
+		break;
+	default:
+		/* few chips support control except on ep0 */
+		goto done;
+	}
+
+	_ep->maxpacket = max;
+	ep->desc = desc;
+
+	dev_dbg (dummy_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d\n",
+		_ep->name,
+		desc->bEndpointAddress & 0x0f,
+		(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+		({ char *val;
+		 switch (desc->bmAttributes & 0x03) {
+		 case USB_ENDPOINT_XFER_BULK: val = "bulk"; break;
+		 case USB_ENDPOINT_XFER_ISOC: val = "iso"; break;
+		 case USB_ENDPOINT_XFER_INT: val = "intr"; break;
+		 default: val = "ctrl"; break;
+		 }; val; }),
+		max);
+
+	/* at this point real hardware should be NAKing transfers
+	 * to that endpoint, until a buffer is queued to it.
+	 */
+	retval = 0;
+done:
+	return retval;
+}
+
+/* called with spinlock held */
+static void nuke (struct dummy *dum, struct dummy_ep *ep)
+{
+	while (!list_empty (&ep->queue)) {
+		struct dummy_request	*req;
+
+		req = list_entry (ep->queue.next, struct dummy_request, queue);
+		list_del_init (&req->queue);
+		req->req.status = -ESHUTDOWN;
+
+		spin_unlock (&dum->lock);
+		req->req.complete (&ep->ep, &req->req);
+		spin_lock (&dum->lock);
+	}
+}
+
+static int dummy_disable (struct usb_ep *_ep)
+{
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+	unsigned long		flags;
+	int			retval;
+
+	ep = usb_ep_to_dummy_ep (_ep);
+	if (!_ep || !ep->desc || _ep->name == ep0name)
+		return -EINVAL;
+	dum = ep_to_dummy (ep);
+
+	spin_lock_irqsave (&dum->lock, flags);
+	ep->desc = NULL;
+	retval = 0;
+	nuke (dum, ep);
+	spin_unlock_irqrestore (&dum->lock, flags);
+
+	dev_dbg (dummy_dev(dum), "disabled %s\n", _ep->name);
+	return retval;
+}
+
+static struct usb_request *
+dummy_alloc_request (struct usb_ep *_ep, int mem_flags)
+{
+	struct dummy_ep		*ep;
+	struct dummy_request	*req;
+
+	if (!_ep)
+		return NULL;
+	ep = usb_ep_to_dummy_ep (_ep);
+
+	req = kmalloc (sizeof *req, mem_flags);
+	if (!req)
+		return NULL;
+	memset (req, 0, sizeof *req);
+	INIT_LIST_HEAD (&req->queue);
+	return &req->req;
+}
+
+static void
+dummy_free_request (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct dummy_ep		*ep;
+	struct dummy_request	*req;
+
+	ep = usb_ep_to_dummy_ep (_ep);
+	if (!ep || !_req || (!ep->desc && _ep->name != ep0name))
+		return;
+
+	req = usb_request_to_dummy_request (_req);
+	WARN_ON (!list_empty (&req->queue));
+	kfree (req);
+}
+
+static void *
+dummy_alloc_buffer (
+	struct usb_ep *_ep,
+	unsigned bytes,
+	dma_addr_t *dma,
+	int mem_flags
+) {
+	char			*retval;
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+
+	ep = usb_ep_to_dummy_ep (_ep);
+	dum = ep_to_dummy (ep);
+
+	if (!dum->driver)
+		return NULL;
+	retval = kmalloc (bytes, mem_flags);
+	*dma = (dma_addr_t) retval;
+	return retval;
+}
+
+static void
+dummy_free_buffer (
+	struct usb_ep *_ep,
+	void *buf,
+	dma_addr_t dma,
+	unsigned bytes
+) {
+	if (bytes)
+		kfree (buf);
+}
+
+static void
+fifo_complete (struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int
+dummy_queue (struct usb_ep *_ep, struct usb_request *_req, int mem_flags)
+{
+	struct dummy_ep		*ep;
+	struct dummy_request	*req;
+	struct dummy		*dum;
+	unsigned long		flags;
+
+	req = usb_request_to_dummy_request (_req);
+	if (!_req || !list_empty (&req->queue) || !_req->complete)
+		return -EINVAL;
+
+	ep = usb_ep_to_dummy_ep (_ep);
+	if (!_ep || (!ep->desc && _ep->name != ep0name))
+		return -EINVAL;
+
+	dum = ep_to_dummy (ep);
+	if (!dum->driver || !is_enabled (dum))
+		return -ESHUTDOWN;
+
+#if 0
+	dev_dbg (dummy_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
+			ep, _req, _ep->name, _req->length, _req->buf);
+#endif
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+	spin_lock_irqsave (&dum->lock, flags);
+
+	/* implement an emulated single-request FIFO */
+	if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+			list_empty (&dum->fifo_req.queue) &&
+			list_empty (&ep->queue) &&
+			_req->length <= FIFO_SIZE) {
+		req = &dum->fifo_req;
+		req->req = *_req;
+		req->req.buf = dum->fifo_buf;
+		memcpy (dum->fifo_buf, _req->buf, _req->length);
+		req->req.context = dum;
+		req->req.complete = fifo_complete;
+
+		spin_unlock (&dum->lock);
+		_req->actual = _req->length;
+		_req->status = 0;
+		_req->complete (_ep, _req);
+		spin_lock (&dum->lock);
+	}
+	list_add_tail (&req->queue, &ep->queue);
+	spin_unlock_irqrestore (&dum->lock, flags);
+
+	/* real hardware would likely enable transfers here, in case
+	 * it'd been left NAKing.
+	 */
+	return 0;
+}
+
+static int dummy_dequeue (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+	int			retval = -EINVAL;
+	unsigned long		flags;
+	struct dummy_request	*req = NULL;
+
+	if (!_ep || !_req)
+		return retval;
+	ep = usb_ep_to_dummy_ep (_ep);
+	dum = ep_to_dummy (ep);
+
+	if (!dum->driver)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave (&dum->lock, flags);
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req) {
+			list_del_init (&req->queue);
+			_req->status = -ECONNRESET;
+			retval = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore (&dum->lock, flags);
+
+	if (retval == 0) {
+		dev_dbg (dummy_dev(dum),
+				"dequeued req %p from %s, len %d buf %p\n",
+				req, _ep->name, _req->length, _req->buf);
+		_req->complete (_ep, _req);
+	}
+	return retval;
+}
+
+static int
+dummy_set_halt (struct usb_ep *_ep, int value)
+{
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+
+	if (!_ep)
+		return -EINVAL;
+	ep = usb_ep_to_dummy_ep (_ep);
+	dum = ep_to_dummy (ep);
+	if (!dum->driver)
+		return -ESHUTDOWN;
+	if (!value)
+		ep->halted = 0;
+	else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+			!list_empty (&ep->queue))
+		return -EAGAIN;
+	else
+		ep->halted = 1;
+	/* FIXME clear emulated data toggle too */
+	return 0;
+}
+
+static const struct usb_ep_ops dummy_ep_ops = {
+	.enable		= dummy_enable,
+	.disable	= dummy_disable,
+
+	.alloc_request	= dummy_alloc_request,
+	.free_request	= dummy_free_request,
+
+	.alloc_buffer	= dummy_alloc_buffer,
+	.free_buffer	= dummy_free_buffer,
+	/* map, unmap, ... eventually hook the "generic" dma calls */
+
+	.queue		= dummy_queue,
+	.dequeue	= dummy_dequeue,
+
+	.set_halt	= dummy_set_halt,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* there are both host and device side versions of this call ... */
+static int dummy_g_get_frame (struct usb_gadget *_gadget)
+{
+	struct timeval	tv;
+
+	do_gettimeofday (&tv);
+	return tv.tv_usec / 1000;
+}
+
+static int dummy_wakeup (struct usb_gadget *_gadget)
+{
+	struct dummy	*dum;
+
+	dum = gadget_to_dummy (_gadget);
+	if ((dum->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) == 0
+			|| !(dum->port_status & (1 << USB_PORT_FEAT_SUSPEND)))
+		return -EINVAL;
+
+	/* hub notices our request, issues downstream resume, etc */
+	dum->resuming = 1;
+	dum->port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+	return 0;
+}
+
+static int dummy_set_selfpowered (struct usb_gadget *_gadget, int value)
+{
+	struct dummy	*dum;
+
+	dum = gadget_to_dummy (_gadget);
+	if (value)
+		dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+	else
+		dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+	return 0;
+}
+
+static const struct usb_gadget_ops dummy_ops = {
+	.get_frame	= dummy_g_get_frame,
+	.wakeup		= dummy_wakeup,
+	.set_selfpowered = dummy_set_selfpowered,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* "function" sysfs attribute */
+static ssize_t
+show_function (struct device *dev, char *buf)
+{
+	struct dummy	*dum = gadget_dev_to_dummy (dev);
+
+	if (!dum->driver || !dum->driver->function)
+		return 0;
+	return scnprintf (buf, PAGE_SIZE, "%s\n", dum->driver->function);
+}
+DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Driver registration/unregistration.
+ *
+ * This is basically hardware-specific; there's usually only one real USB
+ * device (not host) controller since that's how USB devices are intended
+ * to work.  So most implementations of these api calls will rely on the
+ * fact that only one driver will ever bind to the hardware.  But curious
+ * hardware can be built with discrete components, so the gadget API doesn't
+ * require that assumption.
+ *
+ * For this emulator, it might be convenient to create a usb slave device
+ * for each driver that registers:  just add to a big root hub.
+ */
+
+static void
+dummy_udc_release (struct device *dev)
+{
+}
+
+static void
+dummy_pdev_release (struct device *dev)
+{
+}
+
+static int
+dummy_register_udc (struct dummy *dum)
+{
+	int		rc;
+
+	strcpy (dum->gadget.dev.bus_id, "udc");
+	dum->gadget.dev.parent = dummy_dev(dum);
+	dum->gadget.dev.release = dummy_udc_release;
+
+	rc = device_register (&dum->gadget.dev);
+	if (rc == 0)
+		device_create_file (&dum->gadget.dev, &dev_attr_function);
+	return rc;
+}
+
+static void
+dummy_unregister_udc (struct dummy *dum)
+{
+	device_remove_file (&dum->gadget.dev, &dev_attr_function);
+	device_unregister (&dum->gadget.dev);
+}
+
+int
+usb_gadget_register_driver (struct usb_gadget_driver *driver)
+{
+	struct dummy	*dum = the_controller;
+	int		retval, i;
+
+	if (!dum)
+		return -EINVAL;
+	if (dum->driver)
+		return -EBUSY;
+	if (!driver->bind || !driver->unbind || !driver->setup
+			|| driver->speed == USB_SPEED_UNKNOWN)
+		return -EINVAL;
+
+	/*
+	 * SLAVE side init ... the layer above hardware, which
+	 * can't enumerate without help from the driver we're binding.
+	 */
+	dum->gadget.name = gadget_name;
+	dum->gadget.ops = &dummy_ops;
+	dum->gadget.is_dualspeed = 1;
+
+	dum->devstatus = 0;
+	dum->resuming = 0;
+
+	INIT_LIST_HEAD (&dum->gadget.ep_list);
+	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+		struct dummy_ep	*ep = &dum->ep [i];
+
+		if (!ep_name [i])
+			break;
+		ep->ep.name = ep_name [i];
+		ep->ep.ops = &dummy_ep_ops;
+		list_add_tail (&ep->ep.ep_list, &dum->gadget.ep_list);
+		ep->halted = ep->already_seen = ep->setup_stage = 0;
+		ep->ep.maxpacket = ~0;
+		ep->last_io = jiffies;
+		ep->gadget = &dum->gadget;
+		ep->desc = NULL;
+		INIT_LIST_HEAD (&ep->queue);
+	}
+
+	dum->gadget.ep0 = &dum->ep [0].ep;
+	dum->ep [0].ep.maxpacket = 64;
+	list_del_init (&dum->ep [0].ep.ep_list);
+	INIT_LIST_HEAD(&dum->fifo_req.queue);
+
+	dum->driver = driver;
+	dum->gadget.dev.driver = &driver->driver;
+	dev_dbg (dummy_dev(dum), "binding gadget driver '%s'\n",
+			driver->driver.name);
+	if ((retval = driver->bind (&dum->gadget)) != 0) {
+		dum->driver = NULL;
+		dum->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	// FIXME: Check these calls for errors and re-order
+	driver->driver.bus = dum->gadget.dev.parent->bus;
+	driver_register (&driver->driver);
+
+	device_bind_driver (&dum->gadget.dev);
+
+	/* khubd will enumerate this in a while */
+	dum->port_status |= USB_PORT_STAT_CONNECTION
+		| (1 << USB_PORT_FEAT_C_CONNECTION);
+	return 0;
+}
+EXPORT_SYMBOL (usb_gadget_register_driver);
+
+/* caller must hold lock */
+static void
+stop_activity (struct dummy *dum, struct usb_gadget_driver *driver)
+{
+	struct dummy_ep	*ep;
+
+	/* prevent any more requests */
+	dum->address = 0;
+
+	/* The timer is left running so that outstanding URBs can fail */
+
+	/* nuke any pending requests first, so driver i/o is quiesced */
+	list_for_each_entry (ep, &dum->gadget.ep_list, ep.ep_list)
+		nuke (dum, ep);
+
+	/* driver now does any non-usb quiescing necessary */
+	if (driver) {
+		spin_unlock (&dum->lock);
+		driver->disconnect (&dum->gadget);
+		spin_lock (&dum->lock);
+	}
+}
+
+int
+usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+{
+	struct dummy	*dum = the_controller;
+	unsigned long	flags;
+
+	if (!dum)
+		return -ENODEV;
+	if (!driver || driver != dum->driver)
+		return -EINVAL;
+
+	dev_dbg (dummy_dev(dum), "unregister gadget driver '%s'\n",
+			driver->driver.name);
+
+	spin_lock_irqsave (&dum->lock, flags);
+	stop_activity (dum, driver);
+	dum->port_status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE |
+			USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
+	dum->port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+	spin_unlock_irqrestore (&dum->lock, flags);
+
+	driver->unbind (&dum->gadget);
+	dum->driver = NULL;
+
+	device_release_driver (&dum->gadget.dev);
+
+	driver_unregister (&driver->driver);
+
+	return 0;
+}
+EXPORT_SYMBOL (usb_gadget_unregister_driver);
+
+#undef is_enabled
+
+int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
+{
+	return -ENOSYS;
+}
+EXPORT_SYMBOL (net2280_set_fifo_mode);
+
+/*-------------------------------------------------------------------------*/
+
+/* MASTER/HOST SIDE DRIVER
+ *
+ * this uses the hcd framework to hook up to host side drivers.
+ * its root hub will only have one device, otherwise it acts like
+ * a normal host controller.
+ *
+ * when urbs are queued, they're just stuck on a list that we
+ * scan in a timer callback.  that callback connects writes from
+ * the host with reads from the device, and so on, based on the
+ * usb 2.0 rules.
+ */
+
+static int dummy_urb_enqueue (
+	struct usb_hcd			*hcd,
+	struct usb_host_endpoint	*ep,
+	struct urb			*urb,
+	int				mem_flags
+) {
+	struct dummy	*dum;
+	struct urbp	*urbp;
+	unsigned long	flags;
+
+	if (!urb->transfer_buffer && urb->transfer_buffer_length)
+		return -EINVAL;
+
+	urbp = kmalloc (sizeof *urbp, mem_flags);
+	if (!urbp)
+		return -ENOMEM;
+	urbp->urb = urb;
+
+	dum = hcd_to_dummy (hcd);
+	spin_lock_irqsave (&dum->lock, flags);
+
+	if (!dum->udev) {
+		dum->udev = urb->dev;
+		usb_get_dev (dum->udev);
+	} else if (unlikely (dum->udev != urb->dev))
+		dev_err (dummy_dev(dum), "usb_device address has changed!\n");
+
+	list_add_tail (&urbp->urbp_list, &dum->urbp_list);
+	urb->hcpriv = urbp;
+	if (usb_pipetype (urb->pipe) == PIPE_CONTROL)
+		urb->error_count = 1;		/* mark as a new urb */
+
+	/* kick the scheduler, it'll do the rest */
+	if (!timer_pending (&dum->timer))
+		mod_timer (&dum->timer, jiffies + 1);
+
+	spin_unlock_irqrestore (&dum->lock, flags);
+	return 0;
+}
+
+static int dummy_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
+{
+	/* giveback happens automatically in timer callback */
+	return 0;
+}
+
+static void maybe_set_status (struct urb *urb, int status)
+{
+	spin_lock (&urb->lock);
+	if (urb->status == -EINPROGRESS)
+		urb->status = status;
+	spin_unlock (&urb->lock);
+}
+
+/* transfer up to a frame's worth; caller must own lock */
+static int
+transfer (struct dummy *dum, struct urb *urb, struct dummy_ep *ep, int limit)
+{
+	struct dummy_request	*req;
+
+top:
+	/* if there's no request queued, the device is NAKing; return */
+	list_for_each_entry (req, &ep->queue, queue) {
+		unsigned	host_len, dev_len, len;
+		int		is_short, to_host;
+		int		rescan = 0;
+
+		/* 1..N packets of ep->ep.maxpacket each ... the last one
+		 * may be short (including zero length).
+		 *
+		 * writer can send a zlp explicitly (length 0) or implicitly
+		 * (length mod maxpacket zero, and 'zero' flag); they always
+		 * terminate reads.
+		 */
+		host_len = urb->transfer_buffer_length - urb->actual_length;
+		dev_len = req->req.length - req->req.actual;
+		len = min (host_len, dev_len);
+
+		/* FIXME update emulated data toggle too */
+
+		to_host = usb_pipein (urb->pipe);
+		if (unlikely (len == 0))
+			is_short = 1;
+		else {
+			char		*ubuf, *rbuf;
+
+			/* not enough bandwidth left? */
+			if (limit < ep->ep.maxpacket && limit < len)
+				break;
+			len = min (len, (unsigned) limit);
+			if (len == 0)
+				break;
+
+			/* use an extra pass for the final short packet */
+			if (len > ep->ep.maxpacket) {
+				rescan = 1;
+				len -= (len % ep->ep.maxpacket);
+			}
+			is_short = (len % ep->ep.maxpacket) != 0;
+
+			/* else transfer packet(s) */
+			ubuf = urb->transfer_buffer + urb->actual_length;
+			rbuf = req->req.buf + req->req.actual;
+			if (to_host)
+				memcpy (ubuf, rbuf, len);
+			else
+				memcpy (rbuf, ubuf, len);
+			ep->last_io = jiffies;
+
+			limit -= len;
+			urb->actual_length += len;
+			req->req.actual += len;
+		}
+
+		/* short packets terminate, maybe with overflow/underflow.
+		 * it's only really an error to write too much.
+		 *
+		 * partially filling a buffer optionally blocks queue advances
+		 * (so completion handlers can clean up the queue) but we don't
+		 * need to emulate such data-in-flight.  so we only show part
+		 * of the URB_SHORT_NOT_OK effect: completion status.
+		 */
+		if (is_short) {
+			if (host_len == dev_len) {
+				req->req.status = 0;
+				maybe_set_status (urb, 0);
+			} else if (to_host) {
+				req->req.status = 0;
+				if (dev_len > host_len)
+					maybe_set_status (urb, -EOVERFLOW);
+				else
+					maybe_set_status (urb,
+						(urb->transfer_flags
+							& URB_SHORT_NOT_OK)
+						? -EREMOTEIO : 0);
+			} else if (!to_host) {
+				maybe_set_status (urb, 0);
+				if (host_len > dev_len)
+					req->req.status = -EOVERFLOW;
+				else
+					req->req.status = 0;
+			}
+
+		/* many requests terminate without a short packet */
+		} else {
+			if (req->req.length == req->req.actual
+					&& !req->req.zero)
+				req->req.status = 0;
+			if (urb->transfer_buffer_length == urb->actual_length
+					&& !(urb->transfer_flags
+						& URB_ZERO_PACKET)) {
+				maybe_set_status (urb, 0);
+			}
+		}
+
+		/* device side completion --> continuable */
+		if (req->req.status != -EINPROGRESS) {
+			list_del_init (&req->queue);
+
+			spin_unlock (&dum->lock);
+			req->req.complete (&ep->ep, &req->req);
+			spin_lock (&dum->lock);
+
+			/* requests might have been unlinked... */
+			rescan = 1;
+		}
+
+		/* host side completion --> terminate */
+		if (urb->status != -EINPROGRESS)
+			break;
+
+		/* rescan to continue with any other queued i/o */
+		if (rescan)
+			goto top;
+	}
+	return limit;
+}
+
+static int periodic_bytes (struct dummy *dum, struct dummy_ep *ep)
+{
+	int	limit = ep->ep.maxpacket;
+
+	if (dum->gadget.speed == USB_SPEED_HIGH) {
+		int	tmp;
+
+		/* high bandwidth mode */
+		tmp = le16_to_cpu(ep->desc->wMaxPacketSize);
+		tmp = le16_to_cpu (tmp);
+		tmp = (tmp >> 11) & 0x03;
+		tmp *= 8 /* applies to entire frame */;
+		limit += limit * tmp;
+	}
+	return limit;
+}
+
+#define is_active(dum)	((dum->port_status & \
+		(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
+			USB_PORT_STAT_SUSPEND)) \
+		== (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
+
+static struct dummy_ep *find_endpoint (struct dummy *dum, u8 address)
+{
+	int		i;
+
+	if (!is_active (dum))
+		return NULL;
+	if ((address & ~USB_DIR_IN) == 0)
+		return &dum->ep [0];
+	for (i = 1; i < DUMMY_ENDPOINTS; i++) {
+		struct dummy_ep	*ep = &dum->ep [i];
+
+		if (!ep->desc)
+			continue;
+		if (ep->desc->bEndpointAddress == address)
+			return ep;
+	}
+	return NULL;
+}
+
+#undef is_active
+
+#define Dev_Request	(USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+#define Dev_InRequest	(Dev_Request | USB_DIR_IN)
+#define Intf_Request	(USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
+#define Intf_InRequest	(Intf_Request | USB_DIR_IN)
+#define Ep_Request	(USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
+#define Ep_InRequest	(Ep_Request | USB_DIR_IN)
+
+/* drive both sides of the transfers; looks like irq handlers to
+ * both drivers except the callbacks aren't in_irq().
+ */
+static void dummy_timer (unsigned long _dum)
+{
+	struct dummy		*dum = (struct dummy *) _dum;
+	struct urbp		*urbp, *tmp;
+	unsigned long		flags;
+	int			limit, total;
+	int			i;
+
+	/* simplistic model for one frame's bandwidth */
+	switch (dum->gadget.speed) {
+	case USB_SPEED_LOW:
+		total = 8/*bytes*/ * 12/*packets*/;
+		break;
+	case USB_SPEED_FULL:
+		total = 64/*bytes*/ * 19/*packets*/;
+		break;
+	case USB_SPEED_HIGH:
+		total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
+		break;
+	default:
+		dev_err (dummy_dev(dum), "bogus device speed\n");
+		return;
+	}
+
+	/* FIXME if HZ != 1000 this will probably misbehave ... */
+
+	/* look at each urb queued by the host side driver */
+	spin_lock_irqsave (&dum->lock, flags);
+
+	if (!dum->udev) {
+		dev_err (dummy_dev(dum),
+				"timer fired with no URBs pending?\n");
+		spin_unlock_irqrestore (&dum->lock, flags);
+		return;
+	}
+
+	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+		if (!ep_name [i])
+			break;
+		dum->ep [i].already_seen = 0;
+	}
+
+restart:
+	list_for_each_entry_safe (urbp, tmp, &dum->urbp_list, urbp_list) {
+		struct urb		*urb;
+		struct dummy_request	*req;
+		u8			address;
+		struct dummy_ep		*ep = NULL;
+		int			type;
+
+		urb = urbp->urb;
+		if (urb->status != -EINPROGRESS) {
+			/* likely it was just unlinked */
+			goto return_urb;
+		}
+		type = usb_pipetype (urb->pipe);
+
+		/* used up this frame's non-periodic bandwidth?
+		 * FIXME there's infinite bandwidth for control and
+		 * periodic transfers ... unrealistic.
+		 */
+		if (total <= 0 && type == PIPE_BULK)
+			continue;
+
+		/* find the gadget's ep for this request (if configured) */
+		address = usb_pipeendpoint (urb->pipe);
+		if (usb_pipein (urb->pipe))
+			address |= USB_DIR_IN;
+		ep = find_endpoint(dum, address);
+		if (!ep) {
+			/* set_configuration() disagreement */
+			dev_dbg (dummy_dev(dum),
+				"no ep configured for urb %p\n",
+				urb);
+			maybe_set_status (urb, -EPROTO);
+			goto return_urb;
+		}
+
+		if (ep->already_seen)
+			continue;
+		ep->already_seen = 1;
+		if (ep == &dum->ep [0] && urb->error_count) {
+			ep->setup_stage = 1;	/* a new urb */
+			urb->error_count = 0;
+		}
+		if (ep->halted && !ep->setup_stage) {
+			/* NOTE: must not be iso! */
+			dev_dbg (dummy_dev(dum), "ep %s halted, urb %p\n",
+					ep->ep.name, urb);
+			maybe_set_status (urb, -EPIPE);
+			goto return_urb;
+		}
+		/* FIXME make sure both ends agree on maxpacket */
+
+		/* handle control requests */
+		if (ep == &dum->ep [0] && ep->setup_stage) {
+			struct usb_ctrlrequest		setup;
+			int				value = 1;
+			struct dummy_ep			*ep2;
+
+			setup = *(struct usb_ctrlrequest*) urb->setup_packet;
+			le16_to_cpus (&setup.wIndex);
+			le16_to_cpus (&setup.wValue);
+			le16_to_cpus (&setup.wLength);
+			if (setup.wLength != urb->transfer_buffer_length) {
+				maybe_set_status (urb, -EOVERFLOW);
+				goto return_urb;
+			}
+
+			/* paranoia, in case of stale queued data */
+			list_for_each_entry (req, &ep->queue, queue) {
+				list_del_init (&req->queue);
+				req->req.status = -EOVERFLOW;
+				dev_dbg (dummy_dev(dum), "stale req = %p\n",
+						req);
+
+				spin_unlock (&dum->lock);
+				req->req.complete (&ep->ep, &req->req);
+				spin_lock (&dum->lock);
+				ep->already_seen = 0;
+				goto restart;
+			}
+
+			/* gadget driver never sees set_address or operations
+			 * on standard feature flags.  some hardware doesn't
+			 * even expose them.
+			 */
+			ep->last_io = jiffies;
+			ep->setup_stage = 0;
+			ep->halted = 0;
+			switch (setup.bRequest) {
+			case USB_REQ_SET_ADDRESS:
+				if (setup.bRequestType != Dev_Request)
+					break;
+				dum->address = setup.wValue;
+				maybe_set_status (urb, 0);
+				dev_dbg (dummy_dev(dum), "set_address = %d\n",
+						setup.wValue);
+				value = 0;
+				break;
+			case USB_REQ_SET_FEATURE:
+				if (setup.bRequestType == Dev_Request) {
+					value = 0;
+					switch (setup.wValue) {
+					case USB_DEVICE_REMOTE_WAKEUP:
+						break;
+					default:
+						value = -EOPNOTSUPP;
+					}
+					if (value == 0) {
+						dum->devstatus |=
+							(1 << setup.wValue);
+						maybe_set_status (urb, 0);
+					}
+
+				} else if (setup.bRequestType == Ep_Request) {
+					// endpoint halt
+					ep2 = find_endpoint (dum,
+							setup.wIndex);
+					if (!ep2) {
+						value = -EOPNOTSUPP;
+						break;
+					}
+					ep2->halted = 1;
+					value = 0;
+					maybe_set_status (urb, 0);
+				}
+				break;
+			case USB_REQ_CLEAR_FEATURE:
+				if (setup.bRequestType == Dev_Request) {
+					switch (setup.wValue) {
+					case USB_DEVICE_REMOTE_WAKEUP:
+						dum->devstatus &= ~(1 <<
+							USB_DEVICE_REMOTE_WAKEUP);
+						value = 0;
+						maybe_set_status (urb, 0);
+						break;
+					default:
+						value = -EOPNOTSUPP;
+						break;
+					}
+				} else if (setup.bRequestType == Ep_Request) {
+					// endpoint halt
+					ep2 = find_endpoint (dum,
+							setup.wIndex);
+					if (!ep2) {
+						value = -EOPNOTSUPP;
+						break;
+					}
+					ep2->halted = 0;
+					value = 0;
+					maybe_set_status (urb, 0);
+				}
+				break;
+			case USB_REQ_GET_STATUS:
+				if (setup.bRequestType == Dev_InRequest
+						|| setup.bRequestType
+							== Intf_InRequest
+						|| setup.bRequestType
+							== Ep_InRequest
+						) {
+					char *buf;
+
+					// device: remote wakeup, selfpowered
+					// interface: nothing
+					// endpoint: halt
+					buf = (char *)urb->transfer_buffer;
+					if (urb->transfer_buffer_length > 0) {
+						if (setup.bRequestType ==
+								Ep_InRequest) {
+	ep2 = find_endpoint (dum, setup.wIndex);
+	if (!ep2) {
+		value = -EOPNOTSUPP;
+		break;
+	}
+	buf [0] = ep2->halted;
+						} else if (setup.bRequestType ==
+								Dev_InRequest) {
+							buf [0] = (u8)
+								dum->devstatus;
+						} else
+							buf [0] = 0;
+					}
+					if (urb->transfer_buffer_length > 1)
+						buf [1] = 0;
+					urb->actual_length = min (2,
+						urb->transfer_buffer_length);
+					value = 0;
+					maybe_set_status (urb, 0);
+				}
+				break;
+			}
+
+			/* gadget driver handles all other requests.  block
+			 * until setup() returns; no reentrancy issues etc.
+			 */
+			if (value > 0) {
+				spin_unlock (&dum->lock);
+				value = dum->driver->setup (&dum->gadget,
+						&setup);
+				spin_lock (&dum->lock);
+
+				if (value >= 0) {
+					/* no delays (max 64KB data stage) */
+					limit = 64*1024;
+					goto treat_control_like_bulk;
+				}
+				/* error, see below */
+			}
+
+			if (value < 0) {
+				if (value != -EOPNOTSUPP)
+					dev_dbg (dummy_dev(dum),
+						"setup --> %d\n",
+						value);
+				maybe_set_status (urb, -EPIPE);
+				urb->actual_length = 0;
+			}
+
+			goto return_urb;
+		}
+
+		/* non-control requests */
+		limit = total;
+		switch (usb_pipetype (urb->pipe)) {
+		case PIPE_ISOCHRONOUS:
+			/* FIXME is it urb->interval since the last xfer?
+			 * use urb->iso_frame_desc[i].
+			 * complete whether or not ep has requests queued.
+			 * report random errors, to debug drivers.
+			 */
+			limit = max (limit, periodic_bytes (dum, ep));
+			maybe_set_status (urb, -ENOSYS);
+			break;
+
+		case PIPE_INTERRUPT:
+			/* FIXME is it urb->interval since the last xfer?
+			 * this almost certainly polls too fast.
+			 */
+			limit = max (limit, periodic_bytes (dum, ep));
+			/* FALLTHROUGH */
+
+		// case PIPE_BULK:  case PIPE_CONTROL:
+		default:
+		treat_control_like_bulk:
+			ep->last_io = jiffies;
+			total = transfer (dum, urb, ep, limit);
+			break;
+		}
+
+		/* incomplete transfer? */
+		if (urb->status == -EINPROGRESS)
+			continue;
+
+return_urb:
+		urb->hcpriv = NULL;
+		list_del (&urbp->urbp_list);
+		kfree (urbp);
+		if (ep)
+			ep->already_seen = ep->setup_stage = 0;
+
+		spin_unlock (&dum->lock);
+		usb_hcd_giveback_urb (dummy_to_hcd(dum), urb, NULL);
+		spin_lock (&dum->lock);
+
+		goto restart;
+	}
+
+	/* want a 1 msec delay here */
+	if (!list_empty (&dum->urbp_list))
+		mod_timer (&dum->timer, jiffies + msecs_to_jiffies(1));
+	else {
+		usb_put_dev (dum->udev);
+		dum->udev = NULL;
+	}
+
+	spin_unlock_irqrestore (&dum->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define PORT_C_MASK \
+	 ((1 << USB_PORT_FEAT_C_CONNECTION) \
+	| (1 << USB_PORT_FEAT_C_ENABLE) \
+	| (1 << USB_PORT_FEAT_C_SUSPEND) \
+	| (1 << USB_PORT_FEAT_C_OVER_CURRENT) \
+	| (1 << USB_PORT_FEAT_C_RESET))
+
+static int dummy_hub_status (struct usb_hcd *hcd, char *buf)
+{
+	struct dummy		*dum;
+	unsigned long		flags;
+	int			retval;
+
+	dum = hcd_to_dummy (hcd);
+
+	spin_lock_irqsave (&dum->lock, flags);
+	if (!(dum->port_status & PORT_C_MASK))
+		retval = 0;
+	else {
+		*buf = (1 << 1);
+		dev_dbg (dummy_dev(dum), "port status 0x%08x has changes\n",
+			dum->port_status);
+		retval = 1;
+	}
+	spin_unlock_irqrestore (&dum->lock, flags);
+	return retval;
+}
+
+static inline void
+hub_descriptor (struct usb_hub_descriptor *desc)
+{
+	memset (desc, 0, sizeof *desc);
+	desc->bDescriptorType = 0x29;
+	desc->bDescLength = 9;
+	desc->wHubCharacteristics = __constant_cpu_to_le16 (0x0001);
+	desc->bNbrPorts = 1;
+	desc->bitmap [0] = 0xff;
+	desc->bitmap [1] = 0xff;
+}
+
+static int dummy_hub_control (
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+) {
+	struct dummy	*dum;
+	int		retval = 0;
+	unsigned long	flags;
+
+	dum = hcd_to_dummy (hcd);
+	spin_lock_irqsave (&dum->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		break;
+	case ClearPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if (dum->port_status & (1 << USB_PORT_FEAT_SUSPEND)) {
+				/* 20msec resume signaling */
+				dum->resuming = 1;
+				dum->re_timeout = jiffies +
+							msecs_to_jiffies(20);
+			}
+			break;
+		case USB_PORT_FEAT_POWER:
+			dum->port_status = 0;
+			dum->resuming = 0;
+			stop_activity(dum, dum->driver);
+			break;
+		default:
+			dum->port_status &= ~(1 << wValue);
+		}
+		break;
+	case GetHubDescriptor:
+		hub_descriptor ((struct usb_hub_descriptor *) buf);
+		break;
+	case GetHubStatus:
+		*(u32 *) buf = __constant_cpu_to_le32 (0);
+		break;
+	case GetPortStatus:
+		if (wIndex != 1)
+			retval = -EPIPE;
+
+		/* whoever resets or resumes must GetPortStatus to
+		 * complete it!!
+		 */
+		if (dum->resuming && time_after (jiffies, dum->re_timeout)) {
+			dum->port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+			dum->port_status &= ~(1 << USB_PORT_FEAT_SUSPEND);
+			dum->resuming = 0;
+			dum->re_timeout = 0;
+			if (dum->driver && dum->driver->resume) {
+				spin_unlock (&dum->lock);
+				dum->driver->resume (&dum->gadget);
+				spin_lock (&dum->lock);
+			}
+		}
+		if ((dum->port_status & (1 << USB_PORT_FEAT_RESET)) != 0
+				&& time_after (jiffies, dum->re_timeout)) {
+			dum->port_status |= (1 << USB_PORT_FEAT_C_RESET);
+			dum->port_status &= ~(1 << USB_PORT_FEAT_RESET);
+			dum->re_timeout = 0;
+			if (dum->driver) {
+				dum->port_status |= USB_PORT_STAT_ENABLE;
+				/* give it the best speed we agree on */
+				dum->gadget.speed = dum->driver->speed;
+				dum->gadget.ep0->maxpacket = 64;
+				switch (dum->gadget.speed) {
+				case USB_SPEED_HIGH:
+					dum->port_status |=
+						USB_PORT_STAT_HIGH_SPEED;
+					break;
+				case USB_SPEED_LOW:
+					dum->gadget.ep0->maxpacket = 8;
+					dum->port_status |=
+						USB_PORT_STAT_LOW_SPEED;
+					break;
+				default:
+					dum->gadget.speed = USB_SPEED_FULL;
+					break;
+				}
+			}
+		}
+		((u16 *) buf)[0] = cpu_to_le16 (dum->port_status);
+		((u16 *) buf)[1] = cpu_to_le16 (dum->port_status >> 16);
+		break;
+	case SetHubFeature:
+		retval = -EPIPE;
+		break;
+	case SetPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if ((dum->port_status & (1 << USB_PORT_FEAT_SUSPEND))
+					== 0) {
+				dum->port_status |=
+						(1 << USB_PORT_FEAT_SUSPEND);
+				if (dum->driver && dum->driver->suspend) {
+					spin_unlock (&dum->lock);
+					dum->driver->suspend (&dum->gadget);
+					spin_lock (&dum->lock);
+				}
+			}
+			break;
+		case USB_PORT_FEAT_RESET:
+			/* if it's already running, disconnect first */
+			if (dum->port_status & USB_PORT_STAT_ENABLE) {
+				dum->port_status &= ~(USB_PORT_STAT_ENABLE
+						| USB_PORT_STAT_LOW_SPEED
+						| USB_PORT_STAT_HIGH_SPEED);
+				if (dum->driver) {
+					dev_dbg (dummy_dev(dum),
+							"disconnect\n");
+					stop_activity (dum, dum->driver);
+				}
+
+				/* FIXME test that code path! */
+			}
+			/* 50msec reset signaling */
+			dum->re_timeout = jiffies + msecs_to_jiffies(50);
+			/* FALLTHROUGH */
+		default:
+			dum->port_status |= (1 << wValue);
+		}
+		break;
+
+	default:
+		dev_dbg (dummy_dev(dum),
+			"hub control req%04x v%04x i%04x l%d\n",
+			typeReq, wValue, wIndex, wLength);
+
+		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore (&dum->lock, flags);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline ssize_t
+show_urb (char *buf, size_t size, struct urb *urb)
+{
+	int ep = usb_pipeendpoint (urb->pipe);
+
+	return snprintf (buf, size,
+		"urb/%p %s ep%d%s%s len %d/%d\n",
+		urb,
+		({ char *s;
+		 switch (urb->dev->speed) {
+		 case USB_SPEED_LOW:	s = "ls"; break;
+		 case USB_SPEED_FULL:	s = "fs"; break;
+		 case USB_SPEED_HIGH:	s = "hs"; break;
+		 default:		s = "?"; break;
+		 }; s; }),
+		ep, ep ? (usb_pipein (urb->pipe) ? "in" : "out") : "",
+		({ char *s; \
+		 switch (usb_pipetype (urb->pipe)) { \
+		 case PIPE_CONTROL:	s = ""; break; \
+		 case PIPE_BULK:	s = "-bulk"; break; \
+		 case PIPE_INTERRUPT:	s = "-int"; break; \
+		 default: 		s = "-iso"; break; \
+		}; s;}),
+		urb->actual_length, urb->transfer_buffer_length);
+}
+
+static ssize_t
+show_urbs (struct device *dev, char *buf)
+{
+	struct usb_hcd		*hcd = dev_get_drvdata (dev);
+	struct dummy		*dum = hcd_to_dummy (hcd);
+	struct urbp		*urbp;
+	size_t			size = 0;
+	unsigned long		flags;
+
+	spin_lock_irqsave (&dum->lock, flags);
+	list_for_each_entry (urbp, &dum->urbp_list, urbp_list) {
+		size_t		temp;
+
+		temp = show_urb (buf, PAGE_SIZE - size, urbp->urb);
+		buf += temp;
+		size += temp;
+	}
+	spin_unlock_irqrestore (&dum->lock, flags);
+
+	return size;
+}
+static DEVICE_ATTR (urbs, S_IRUGO, show_urbs, NULL);
+
+static int dummy_start (struct usb_hcd *hcd)
+{
+	struct dummy		*dum;
+	struct usb_device	*root;
+	int			retval;
+
+	dum = hcd_to_dummy (hcd);
+
+	/*
+	 * MASTER side init ... we emulate a root hub that'll only ever
+	 * talk to one device (the slave side).  Also appears in sysfs,
+	 * just like more familiar pci-based HCDs.
+	 */
+	spin_lock_init (&dum->lock);
+	init_timer (&dum->timer);
+	dum->timer.function = dummy_timer;
+	dum->timer.data = (unsigned long) dum;
+
+	INIT_LIST_HEAD (&dum->urbp_list);
+
+	root = usb_alloc_dev (NULL, &hcd->self, 0);
+	if (!root)
+		return -ENOMEM;
+
+	/* root hub enters addressed state... */
+	hcd->state = HC_STATE_RUNNING;
+	root->speed = USB_SPEED_HIGH;
+
+	/* ...then configured, so khubd sees us. */
+	if ((retval = usb_hcd_register_root_hub (root, hcd)) != 0) {
+		goto err1;
+	}
+
+	/* only show a low-power port: just 8mA */
+	hub_set_power_budget (root, 8);
+
+	if ((retval = dummy_register_udc (dum)) != 0)
+		goto err2;
+
+	/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
+	device_create_file (dummy_dev(dum), &dev_attr_urbs);
+	return 0;
+
+ err2:
+	usb_disconnect (&hcd->self.root_hub);
+ err1:
+	usb_put_dev (root);
+	hcd->state = HC_STATE_QUIESCING;
+	return retval;
+}
+
+static void dummy_stop (struct usb_hcd *hcd)
+{
+	struct dummy		*dum;
+
+	dum = hcd_to_dummy (hcd);
+
+	device_remove_file (dummy_dev(dum), &dev_attr_urbs);
+
+	usb_gadget_unregister_driver (dum->driver);
+	dummy_unregister_udc (dum);
+
+	dev_info (dummy_dev(dum), "stopped\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int dummy_h_get_frame (struct usb_hcd *hcd)
+{
+	return dummy_g_get_frame (NULL);
+}
+
+static const struct hc_driver dummy_hcd = {
+	.description =		(char *) driver_name,
+	.product_desc =		"Dummy host controller",
+	.hcd_priv_size =	sizeof(struct dummy),
+
+	.flags =		HCD_USB2,
+
+	.start =		dummy_start,
+	.stop =			dummy_stop,
+
+	.urb_enqueue = 		dummy_urb_enqueue,
+	.urb_dequeue = 		dummy_urb_dequeue,
+
+	.get_frame_number = 	dummy_h_get_frame,
+
+	.hub_status_data = 	dummy_hub_status,
+	.hub_control = 		dummy_hub_control,
+};
+
+static int dummy_probe (struct device *dev)
+{
+	struct usb_hcd		*hcd;
+	int			retval;
+
+	dev_info (dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
+
+	hcd = usb_create_hcd (&dummy_hcd, dev, dev->bus_id);
+	if (!hcd)
+		return -ENOMEM;
+	the_controller = hcd_to_dummy (hcd);
+
+	retval = usb_add_hcd(hcd, 0, 0);
+	if (retval != 0) {
+		usb_put_hcd (hcd);
+		the_controller = NULL;
+	}
+	return retval;
+}
+
+static void dummy_remove (struct device *dev)
+{
+	struct usb_hcd		*hcd;
+
+	hcd = dev_get_drvdata (dev);
+	usb_remove_hcd (hcd);
+	usb_put_hcd (hcd);
+	the_controller = NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int dummy_pdev_detect (void)
+{
+	int			retval;
+
+	retval = driver_register (&dummy_driver);
+	if (retval < 0)
+		return retval;
+
+	the_pdev.name = "hc";
+	the_pdev.dev.driver = &dummy_driver;
+	the_pdev.dev.release = dummy_pdev_release;
+
+	retval = platform_device_register (&the_pdev);
+	if (retval < 0)
+		driver_unregister (&dummy_driver);
+	return retval;
+}
+
+static void dummy_pdev_remove (void)
+{
+	platform_device_unregister (&the_pdev);
+	driver_unregister (&dummy_driver);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init init (void)
+{
+	int	retval;
+
+	if (usb_disabled ())
+		return -ENODEV;
+	if ((retval = dummy_pdev_detect ()) != 0)
+		return retval;
+	if ((retval = dummy_probe (&the_pdev.dev)) != 0)
+		dummy_pdev_remove ();
+	return retval;
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	dummy_remove (&the_pdev.dev);
+	dummy_pdev_remove ();
+}
+module_exit (cleanup);
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
new file mode 100644
index 0000000..f7c6d75
--- /dev/null
+++ b/drivers/usb/gadget/epautoconf.c
@@ -0,0 +1,310 @@
+/*
+ * epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
+ *
+ * Copyright (C) 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include "gadget_chips.h"
+
+
+/* we must assign addresses for configurable endpoints (like net2280) */
+static __initdata unsigned epnum;
+
+// #define MANY_ENDPOINTS
+#ifdef MANY_ENDPOINTS
+/* more than 15 configurable endpoints */
+static __initdata unsigned in_epnum;
+#endif
+
+
+/*
+ * This should work with endpoints from controller drivers sharing the
+ * same endpoint naming convention.  By example:
+ *
+ *	- ep1, ep2, ... address is fixed, not direction or type
+ *	- ep1in, ep2out, ... address and direction are fixed, not type
+ *	- ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
+ *	- ep1in-bulk, ep2out-iso, ... all three are fixed
+ *	- ep-* ... no functionality restrictions
+ *
+ * Type suffixes are "-bulk", "-iso", or "-int".  Numbers are decimal.
+ * Less common restrictions are implied by gadget_is_*().
+ *
+ * NOTE:  each endpoint is unidirectional, as specified by its USB
+ * descriptor; and isn't specific to a configuration or altsetting.
+ */
+static int __init
+ep_matches (
+	struct usb_gadget		*gadget,
+	struct usb_ep			*ep,
+	struct usb_endpoint_descriptor	*desc
+)
+{
+	u8		type;
+	const char	*tmp;
+	u16		max;
+
+	/* endpoint already claimed? */
+	if (0 != ep->driver_data)
+		return 0;
+		
+	/* only support ep0 for portable CONTROL traffic */
+	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	if (USB_ENDPOINT_XFER_CONTROL == type)
+		return 0;
+
+	/* some other naming convention */
+	if ('e' != ep->name[0])
+		return 0;
+
+	/* type-restriction:  "-iso", "-bulk", or "-int".
+	 * direction-restriction:  "in", "out".
+	 */
+	if ('-' != ep->name[2]) {
+		tmp = strrchr (ep->name, '-');
+		if (tmp) {
+			switch (type) {
+			case USB_ENDPOINT_XFER_INT:
+				/* bulk endpoints handle interrupt transfers,
+				 * except the toggle-quirky iso-synch kind
+				 */
+				if ('s' == tmp[2])	// == "-iso"
+					return 0;
+				/* for now, avoid PXA "interrupt-in";
+				 * it's documented as never using DATA1.
+				 */
+				if (gadget_is_pxa (gadget)
+						&& 'i' == tmp [1])
+					return 0;
+				break;
+			case USB_ENDPOINT_XFER_BULK:
+				if ('b' != tmp[1])	// != "-bulk"
+					return 0;
+				break;
+			case USB_ENDPOINT_XFER_ISOC:
+				if ('s' != tmp[2])	// != "-iso"
+					return 0;
+			}
+		} else {
+			tmp = ep->name + strlen (ep->name);
+		}
+
+		/* direction-restriction:  "..in-..", "out-.." */
+		tmp--;
+		if (!isdigit (*tmp)) {
+			if (desc->bEndpointAddress & USB_DIR_IN) {
+				if ('n' != *tmp)
+					return 0;
+			} else {
+				if ('t' != *tmp)
+					return 0;
+			}
+		}
+	}
+
+	/* endpoint maxpacket size is an input parameter, except for bulk
+	 * where it's an output parameter representing the full speed limit.
+	 * the usb spec fixes high speed bulk maxpacket at 512 bytes.
+	 */
+	max = 0x7ff & le16_to_cpup (&desc->wMaxPacketSize);
+	switch (type) {
+	case USB_ENDPOINT_XFER_INT:
+		/* INT:  limit 64 bytes full speed, 1024 high speed */
+		if (!gadget->is_dualspeed && max > 64)
+			return 0;
+		/* FALLTHROUGH */
+
+	case USB_ENDPOINT_XFER_ISOC:
+		/* ISO:  limit 1023 bytes full speed, 1024 high speed */
+		if (ep->maxpacket < max)
+			return 0;
+		if (!gadget->is_dualspeed && max > 1023)
+			return 0;
+
+		/* BOTH:  "high bandwidth" works only at high speed */
+		if ((desc->wMaxPacketSize & __constant_cpu_to_le16(3<<11))) {
+			if (!gadget->is_dualspeed)
+				return 0;
+			/* configure your hardware with enough buffering!! */
+		}
+		break;
+	}
+
+	/* MATCH!! */
+
+	/* report address */
+	if (isdigit (ep->name [2])) {
+		u8	num = simple_strtol (&ep->name [2], NULL, 10);
+		desc->bEndpointAddress |= num;
+#ifdef	MANY_ENDPOINTS
+	} else if (desc->bEndpointAddress & USB_DIR_IN) {
+		if (++in_epnum > 15)
+			return 0;
+		desc->bEndpointAddress = USB_DIR_IN | in_epnum;
+#endif
+	} else {
+		if (++epnum > 15)
+			return 0;
+		desc->bEndpointAddress |= epnum;
+	}
+
+	/* report (variable) full speed bulk maxpacket */
+	if (USB_ENDPOINT_XFER_BULK == type) {
+		int size = ep->maxpacket;
+
+		/* min() doesn't work on bitfields with gcc-3.5 */
+		if (size > 64)
+			size = 64;
+		desc->wMaxPacketSize = cpu_to_le16(size);
+	}
+	return 1;
+}
+
+static struct usb_ep * __init
+find_ep (struct usb_gadget *gadget, const char *name)
+{
+	struct usb_ep	*ep;
+
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		if (0 == strcmp (ep->name, name))
+			return ep;
+	}
+	return NULL;
+}
+
+/**
+ * usb_ep_autoconfig - choose an endpoint matching the descriptor
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ *	initialized.  For periodic transfers, the maximum packet
+ *	size must also be initialized.  This is modified on success.
+ *
+ * By choosing an endpoint to use with the specified descriptor, this
+ * routine simplifies writing gadget drivers that work with multiple
+ * USB device controllers.  The endpoint would be passed later to
+ * usb_ep_enable(), along with some descriptor.
+ *
+ * That second descriptor won't always be the same as the first one.
+ * For example, isochronous endpoints can be autoconfigured for high
+ * bandwidth, and then used in several lower bandwidth altsettings.
+ * Also, high and full speed descriptors will be different.
+ *
+ * Be sure to examine and test the results of autoconfiguration on your
+ * hardware.  This code may not make the best choices about how to use the
+ * USB controller, and it can't know all the restrictions that may apply.
+ * Some combinations of driver and hardware won't be able to autoconfigure.
+ *
+ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
+ * descriptor bEndpointAddress.  For bulk endpoints, the wMaxPacket value
+ * is initialized as if the endpoint were used at full speed.  To prevent
+ * the endpoint from being returned by a later autoconfig call, claim it
+ * by assigning ep->driver_data to some non-null value.
+ *
+ * On failure, this returns a null endpoint descriptor.
+ */
+struct usb_ep * __init usb_ep_autoconfig (
+	struct usb_gadget		*gadget,
+	struct usb_endpoint_descriptor	*desc
+)
+{
+	struct usb_ep	*ep;
+	u8		type;
+
+	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	/* First, apply chip-specific "best usage" knowledge.
+	 * This might make a good usb_gadget_ops hook ...
+	 */
+	if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) {
+		/* ep-e, ep-f are PIO with only 64 byte fifos */
+		ep = find_ep (gadget, "ep-e");
+		if (ep && ep_matches (gadget, ep, desc))
+			return ep;
+		ep = find_ep (gadget, "ep-f");
+		if (ep && ep_matches (gadget, ep, desc))
+			return ep;
+
+	} else if (gadget_is_goku (gadget)) {
+		if (USB_ENDPOINT_XFER_INT == type) {
+			/* single buffering is enough */
+			ep = find_ep (gadget, "ep3-bulk");
+			if (ep && ep_matches (gadget, ep, desc))
+				return ep;
+		} else if (USB_ENDPOINT_XFER_BULK == type
+				&& (USB_DIR_IN & desc->bEndpointAddress)) {
+			/* DMA may be available */
+			ep = find_ep (gadget, "ep2-bulk");
+			if (ep && ep_matches (gadget, ep, desc))
+				return ep;
+		}
+
+	} else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) {
+		/* single buffering is enough; maybe 8 byte fifo is too */
+		ep = find_ep (gadget, "ep3in-bulk");
+		if (ep && ep_matches (gadget, ep, desc))
+			return ep;
+
+	} else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) {
+		ep = find_ep (gadget, "ep1-bulk");
+		if (ep && ep_matches (gadget, ep, desc))
+			return ep;
+	}
+
+	/* Second, look at endpoints until an unclaimed one looks usable */ 
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		if (ep_matches (gadget, ep, desc))
+			return ep;
+	}
+
+	/* Fail */
+	return NULL;
+}
+
+/**
+ * usb_ep_autoconfig_reset - reset endpoint autoconfig state
+ * @gadget: device for which autoconfig state will be reset
+ *
+ * Use this for devices where one configuration may need to assign
+ * endpoint resources very differently from the next one.  It clears
+ * state such as ep->driver_data and the record of assigned endpoints
+ * used by usb_ep_autoconfig().
+ */
+void __init usb_ep_autoconfig_reset (struct usb_gadget *gadget)
+{
+	struct usb_ep	*ep;
+
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		ep->driver_data = NULL;
+	}
+#ifdef	MANY_ENDPOINTS
+	in_epnum = 0;
+#endif
+	epnum = 0;
+}
+
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
new file mode 100644
index 0000000..cff9fb0
--- /dev/null
+++ b/drivers/usb/gadget/ether.c
@@ -0,0 +1,2660 @@
+/*
+ * ether.c -- Ethernet gadget driver, with CDC and non-CDC options
+ *
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+// #define DEBUG 1
+// #define VERBOSE
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_cdc.h>
+#include <linux/usb_gadget.h>
+
+#include <linux/random.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "gadget_chips.h"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Ethernet gadget driver -- with CDC and non-CDC options
+ * Builds on hardware support for a full duplex link.
+ *
+ * CDC Ethernet is the standard USB solution for sending Ethernet frames
+ * using USB.  Real hardware tends to use the same framing protocol but look
+ * different for control features.  This driver strongly prefers to use
+ * this USB-IF standard as its open-systems interoperability solution;
+ * most host side USB stacks (except from Microsoft) support it.
+ *
+ * There's some hardware that can't talk CDC.  We make that hardware
+ * implement a "minimalist" vendor-agnostic CDC core:  same framing, but
+ * link-level setup only requires activating the configuration.
+ * Linux supports it, but other host operating systems may not.
+ * (This is a subset of CDC Ethernet.)
+ *
+ * A third option is also in use.  Rather than CDC Ethernet, or something
+ * simpler, Microsoft pushes their own approach: RNDIS.  The published
+ * RNDIS specs are ambiguous and appear to be incomplete, and are also
+ * needlessly complex.
+ */
+
+#define DRIVER_DESC		"Ethernet Gadget"
+#define DRIVER_VERSION		"Equinox 2004"
+
+static const char shortname [] = "ether";
+static const char driver_desc [] = DRIVER_DESC;
+
+#define RX_EXTRA	20		/* guard against rx overflows */
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+#include "rndis.h"
+#else
+#define rndis_init() 0
+#define rndis_exit() do{}while(0)
+#endif
+
+/* CDC and RNDIS support the same host-chosen outgoing packet filters. */
+#define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
+ 			|USB_CDC_PACKET_TYPE_DIRECTED)
+
+
+/*-------------------------------------------------------------------------*/
+
+struct eth_dev {
+	spinlock_t		lock;
+	struct usb_gadget	*gadget;
+	struct usb_request	*req;		/* for control responses */
+	struct usb_request	*stat_req;	/* for cdc & rndis status */
+
+	u8			config;
+	struct usb_ep		*in_ep, *out_ep, *status_ep;
+	const struct usb_endpoint_descriptor
+				*in, *out, *status;
+	struct list_head	tx_reqs, rx_reqs;
+
+	struct net_device	*net;
+	struct net_device_stats	stats;
+	atomic_t		tx_qlen;
+
+	struct work_struct	work;
+	unsigned		zlp:1;
+	unsigned		cdc:1;
+	unsigned		rndis:1;
+	unsigned		suspended:1;
+	u16			cdc_filter;
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+	int			rndis_config;
+	u8			host_mac [ETH_ALEN];
+};
+
+/* This version autoconfigures as much as possible at run-time.
+ *
+ * It also ASSUMES a self-powered device, without remote wakeup,
+ * although remote wakeup support would make sense.
+ */
+static const char *EP_IN_NAME;
+static const char *EP_OUT_NAME;
+static const char *EP_STATUS_NAME;
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ * It's for devices with only CDC Ethernet configurations.
+ */
+#define CDC_VENDOR_NUM	0x0525		/* NetChip */
+#define CDC_PRODUCT_NUM	0xa4a1		/* Linux-USB Ethernet Gadget */
+
+/* For hardware that can't talk CDC, we use the same vendor ID that
+ * ARM Linux has used for ethernet-over-usb, both with sa1100 and
+ * with pxa250.  We're protocol-compatible, if the host-side drivers
+ * use the endpoint descriptors.  bcdDevice (version) is nonzero, so
+ * drivers that need to hard-wire endpoint numbers have a hook.
+ *
+ * The protocol is a minimal subset of CDC Ether, which works on any bulk
+ * hardware that's not deeply broken ... even on hardware that can't talk
+ * RNDIS (like SA-1100, with no interrupt endpoint, or anything that
+ * doesn't handle control-OUT).
+ */
+#define	SIMPLE_VENDOR_NUM	0x049f
+#define	SIMPLE_PRODUCT_NUM	0x505a
+
+/* For hardware that can talk RNDIS and either of the above protocols,
+ * use this ID ... the windows INF files will know it.  Unless it's
+ * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose
+ * the non-RNDIS configuration.
+ */
+#define RNDIS_VENDOR_NUM	0x0525	/* NetChip */
+#define RNDIS_PRODUCT_NUM	0xa4a2	/* Ethernet/RNDIS Gadget */
+
+
+/* Some systems will want different product identifers published in the
+ * device descriptor, either numbers or strings or both.  These string
+ * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+static ushort __initdata idVendor;
+module_param(idVendor, ushort, S_IRUGO);
+MODULE_PARM_DESC(idVendor, "USB Vendor ID");
+
+static ushort __initdata idProduct;
+module_param(idProduct, ushort, S_IRUGO);
+MODULE_PARM_DESC(idProduct, "USB Product ID");
+
+static ushort __initdata bcdDevice;
+module_param(bcdDevice, ushort, S_IRUGO);
+MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
+
+static char *__initdata iManufacturer;
+module_param(iManufacturer, charp, S_IRUGO);
+MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
+
+static char *__initdata iProduct;
+module_param(iProduct, charp, S_IRUGO);
+MODULE_PARM_DESC(iProduct, "USB Product string");
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *__initdata dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *__initdata host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Include CDC support if we could run on CDC-capable hardware. */
+
+#ifdef CONFIG_USB_GADGET_NET2280
+#define	DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_DUMMY_HCD
+#define	DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_GOKU
+#define	DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_LH7A40X
+#define DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_MQ11XX
+#define	DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_OMAP
+#define	DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_N9604
+#define	DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_PXA27X
+#define DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_AT91
+#define DEV_CONFIG_CDC
+#endif
+
+
+/* For CDC-incapable hardware, choose the simple cdc subset.
+ * Anything that talks bulk (without notable bugs) can do this.
+ */
+#ifdef CONFIG_USB_GADGET_PXA2XX
+#define	DEV_CONFIG_SUBSET
+#endif
+
+#ifdef CONFIG_USB_GADGET_SH
+#define	DEV_CONFIG_SUBSET
+#endif
+
+#ifdef CONFIG_USB_GADGET_SA1100
+/* use non-CDC for backwards compatibility */
+#define	DEV_CONFIG_SUBSET
+#endif
+
+#ifdef CONFIG_USB_GADGET_S3C2410
+#define DEV_CONFIG_CDC
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* "main" config is either CDC, or its simple subset */
+static inline int is_cdc(struct eth_dev *dev)
+{
+#if	!defined(DEV_CONFIG_SUBSET)
+	return 1;		/* only cdc possible */
+#elif	!defined (DEV_CONFIG_CDC)
+	return 0;		/* only subset possible */
+#else
+	return dev->cdc;	/* depends on what hardware we found */
+#endif
+}
+
+/* "secondary" RNDIS config may sometimes be activated */
+static inline int rndis_active(struct eth_dev *dev)
+{
+#ifdef	CONFIG_USB_ETH_RNDIS
+	return dev->rndis;
+#else
+	return 0;
+#endif
+}
+
+#define	subset_active(dev)	(!is_cdc(dev) && !rndis_active(dev))
+#define	cdc_active(dev)		( is_cdc(dev) && !rndis_active(dev))
+
+
+
+#define DEFAULT_QLEN	2	/* double buffering by default */
+
+/* peak bulk transfer bits-per-second */
+#define	HS_BPS 		(13 * 512 * 8 * 1000 * 8)
+#define	FS_BPS		(19 *  64 * 1 * 1000 * 8)
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+
+static unsigned qmult = 5;
+module_param (qmult, uint, S_IRUGO|S_IWUSR);
+
+
+/* for dual-speed hardware, use deeper queues at highspeed */
+#define qlen(gadget) \
+	(DEFAULT_QLEN*((gadget->speed == USB_SPEED_HIGH) ? qmult : 1))
+
+/* also defer IRQs on highspeed TX */
+#define TX_DELAY	qmult
+
+#define	BITRATE(g)	(((g)->speed == USB_SPEED_HIGH) ? HS_BPS : FS_BPS)
+
+#else	/* full speed (low speed doesn't do bulk) */
+#define qlen(gadget) DEFAULT_QLEN
+
+#define	BITRATE(g)	FS_BPS
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(d,level,fmt,args...) \
+	printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DEBUG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDEBUG	DEBUG
+#else
+#define VDEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARN(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* USB DRIVER HOOKUP (to the hardware driver, below us), mostly
+ * ep0 implementation:  descriptors, config management, setup().
+ * also optional class-specific notification interrupt transfer.
+ */
+
+/*
+ * DESCRIPTORS ... most are static, but strings and (full) configuration
+ * descriptors are built on demand.  For now we do either full CDC, or
+ * our simple subset, with RNDIS as an optional second configuration.
+ *
+ * RNDIS includes some CDC ACM descriptors ... like CDC Ethernet.  But
+ * the class descriptors match a modem (they're ignored; it's really just
+ * Ethernet functionality), they don't need the NOP altsetting, and the
+ * status transfer endpoint isn't optional.
+ */
+
+#define STRING_MANUFACTURER		1
+#define STRING_PRODUCT			2
+#define STRING_ETHADDR			3
+#define STRING_DATA			4
+#define STRING_CONTROL			5
+#define STRING_RNDIS_CONTROL		6
+#define STRING_CDC			7
+#define STRING_SUBSET			8
+#define STRING_RNDIS			9
+
+#define USB_BUFSIZ	256		/* holds our biggest descriptor */
+
+/*
+ * This device advertises one configuration, eth_config, unless RNDIS
+ * is enabled (rndis_config) on hardware supporting at least two configs.
+ *
+ * NOTE:  Controllers like superh_udc should probably be able to use
+ * an RNDIS-only configuration.
+ *
+ * FIXME define some higher-powered configurations to make it easier
+ * to recharge batteries ...
+ */
+
+#define DEV_CONFIG_VALUE	1	/* cdc or subset */
+#define DEV_RNDIS_CONFIG_VALUE	2	/* rndis; optional */
+
+static struct usb_device_descriptor
+device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		__constant_cpu_to_le16 (0x0200),
+
+	.bDeviceClass =		USB_CLASS_COMM,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+
+	.idVendor =		__constant_cpu_to_le16 (CDC_VENDOR_NUM),
+	.idProduct =		__constant_cpu_to_le16 (CDC_PRODUCT_NUM),
+	.iManufacturer =	STRING_MANUFACTURER,
+	.iProduct =		STRING_PRODUCT,
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor
+otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	.bmAttributes =		USB_OTG_SRP,
+};
+
+static struct usb_config_descriptor
+eth_config = {
+	.bLength =		sizeof eth_config,
+	.bDescriptorType =	USB_DT_CONFIG,
+
+	/* compute wTotalLength on the fly */
+	.bNumInterfaces =	2,
+	.bConfigurationValue =	DEV_CONFIG_VALUE,
+	.iConfiguration =	STRING_CDC,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		50,
+};
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+static struct usb_config_descriptor 
+rndis_config = {
+	.bLength =              sizeof rndis_config,
+	.bDescriptorType =      USB_DT_CONFIG,
+
+	/* compute wTotalLength on the fly */
+	.bNumInterfaces =       2,
+	.bConfigurationValue =  DEV_RNDIS_CONFIG_VALUE,
+	.iConfiguration =       STRING_RNDIS,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =            50,
+};
+#endif
+
+/*
+ * Compared to the simple CDC subset, the full CDC Ethernet model adds
+ * three class descriptors, two interface descriptors, optional status
+ * endpoint.  Both have a "data" interface and two bulk endpoints.
+ * There are also differences in how control requests are handled.
+ *
+ * RNDIS shares a lot with CDC-Ethernet, since it's a variant of
+ * the CDC-ACM (modem) spec.
+ */
+
+#ifdef	DEV_CONFIG_CDC
+static struct usb_interface_descriptor
+control_intf = {
+	.bLength =		sizeof control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	0,
+	/* status endpoint is optional; this may be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	.iInterface =		STRING_CONTROL,
+};
+#endif
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+static const struct usb_interface_descriptor
+rndis_control_intf = {
+	.bLength =              sizeof rndis_control_intf,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	  
+	.bInterfaceNumber =     0,
+	.bNumEndpoints =        1,
+	.bInterfaceClass =      USB_CLASS_COMM,
+	.bInterfaceSubClass =   USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =   USB_CDC_ACM_PROTO_VENDOR,
+	.iInterface =           STRING_RNDIS_CONTROL,
+};
+#endif
+
+#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
+
+static const struct usb_cdc_header_desc header_desc = {
+	.bLength =		sizeof header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		__constant_cpu_to_le16 (0x0110),
+};
+
+static const struct usb_cdc_union_desc union_desc = {
+	.bLength =		sizeof union_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+
+	.bMasterInterface0 =	0,	/* index of control interface */
+	.bSlaveInterface0 =	1,	/* index of DATA interface */
+};
+
+#endif	/* CDC || RNDIS */
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+
+static const struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
+	.bLength =  		sizeof call_mgmt_descriptor,
+	.bDescriptorType = 	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = 	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities = 	0x00,
+	.bDataInterface = 	0x01,
+};
+
+static struct usb_cdc_acm_descriptor acm_descriptor = {
+	.bLength =  		sizeof acm_descriptor,
+	.bDescriptorType = 	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = 	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities = 	0x00,
+};
+
+#endif
+
+#ifdef	DEV_CONFIG_CDC
+
+static const struct usb_cdc_ether_desc ether_desc = {
+	.bLength =		sizeof ether_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	.iMACAddress =		STRING_ETHADDR,
+	.bmEthernetStatistics =	__constant_cpu_to_le32 (0), /* no statistics */
+	.wMaxSegmentSize =	__constant_cpu_to_le16 (ETH_FRAME_LEN),
+	.wNumberMCFilters =	__constant_cpu_to_le16 (0),
+	.bNumberPowerFilters =	0,
+};
+
+#endif
+
+#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
+
+/* include the status endpoint if we can, even where it's optional.
+ * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancelation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide.  more advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ *
+ * RNDIS requires the status endpoint, since it uses that encapsulation
+ * mechanism for its funky RPC scheme.
+ */
+ 
+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+static struct usb_endpoint_descriptor
+fs_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16 (STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+#endif
+
+#ifdef	DEV_CONFIG_CDC
+
+/* the default data interface has no endpoints ... */
+
+static const struct usb_interface_descriptor
+data_nop_intf = {
+	.bLength =		sizeof data_nop_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static const struct usb_interface_descriptor
+data_intf = {
+	.bLength =		sizeof data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	.iInterface =		STRING_DATA,
+};
+
+#endif
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+
+/* RNDIS doesn't activate by changing to the "real" altsetting */
+
+static const struct usb_interface_descriptor
+rndis_data_intf = {
+	.bLength =		sizeof rndis_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	.iInterface =		STRING_DATA,
+};
+
+#endif
+
+#ifdef DEV_CONFIG_SUBSET
+
+/*
+ * "Simple" CDC-subset option is a simple vendor-neutral model that most
+ * full speed controllers can handle:  one interface, two bulk endpoints.
+ */
+
+static const struct usb_interface_descriptor
+subset_data_intf = {
+	.bLength =		sizeof subset_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	0,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	.iInterface =		STRING_DATA,
+};
+
+#endif	/* SUBSET */
+
+
+static struct usb_endpoint_descriptor
+fs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor
+fs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static const struct usb_descriptor_header *fs_eth_function [11] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+#ifdef DEV_CONFIG_CDC
+	/* "cdc" mode descriptors */
+	(struct usb_descriptor_header *) &control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &union_desc,
+	(struct usb_descriptor_header *) &ether_desc,
+	/* NOTE: status endpoint may need to be removed */
+	(struct usb_descriptor_header *) &fs_status_desc,
+	/* data interface, with altsetting */
+	(struct usb_descriptor_header *) &data_nop_intf,
+	(struct usb_descriptor_header *) &data_intf,
+	(struct usb_descriptor_header *) &fs_source_desc,
+	(struct usb_descriptor_header *) &fs_sink_desc,
+	NULL,
+#endif /* DEV_CONFIG_CDC */
+};
+
+static inline void __init fs_subset_descriptors(void)
+{
+#ifdef DEV_CONFIG_SUBSET
+	fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
+	fs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc;
+	fs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc;
+	fs_eth_function[4] = NULL;
+#else
+	fs_eth_function[1] = NULL;
+#endif
+}
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+static const struct usb_descriptor_header *fs_rndis_function [] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &union_desc,
+	(struct usb_descriptor_header *) &fs_status_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &fs_source_desc,
+	(struct usb_descriptor_header *) &fs_sink_desc,
+	NULL,
+};
+#endif
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+
+/*
+ * usb 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ */
+
+#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
+static struct usb_endpoint_descriptor
+hs_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16 (STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+#endif /* DEV_CONFIG_CDC */
+
+static struct usb_endpoint_descriptor
+hs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16 (512),
+};
+
+static struct usb_endpoint_descriptor
+hs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16 (512),
+};
+
+static struct usb_qualifier_descriptor
+dev_qualifier = {
+	.bLength =		sizeof dev_qualifier,
+	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
+
+	.bcdUSB =		__constant_cpu_to_le16 (0x0200),
+	.bDeviceClass =		USB_CLASS_COMM,
+
+	.bNumConfigurations =	1,
+};
+
+static const struct usb_descriptor_header *hs_eth_function [11] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+#ifdef DEV_CONFIG_CDC
+	/* "cdc" mode descriptors */
+	(struct usb_descriptor_header *) &control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &union_desc,
+	(struct usb_descriptor_header *) &ether_desc,
+	/* NOTE: status endpoint may need to be removed */
+	(struct usb_descriptor_header *) &hs_status_desc,
+	/* data interface, with altsetting */
+	(struct usb_descriptor_header *) &data_nop_intf,
+	(struct usb_descriptor_header *) &data_intf,
+	(struct usb_descriptor_header *) &hs_source_desc,
+	(struct usb_descriptor_header *) &hs_sink_desc,
+	NULL,
+#endif /* DEV_CONFIG_CDC */
+};
+
+static inline void __init hs_subset_descriptors(void)
+{
+#ifdef DEV_CONFIG_SUBSET
+	hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
+	hs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc;
+	hs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc;
+	hs_eth_function[4] = NULL;
+#else
+	hs_eth_function[1] = NULL;
+#endif
+}
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+static const struct usb_descriptor_header *hs_rndis_function [] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &union_desc,
+	(struct usb_descriptor_header *) &hs_status_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &hs_source_desc,
+	(struct usb_descriptor_header *) &hs_sink_desc,
+	NULL,
+};
+#endif
+
+
+/* maxpacket and other transfer characteristics vary by speed. */
+#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
+
+#else
+
+/* if there's no high speed support, maxpacket doesn't change. */
+#define ep_desc(g,hs,fs) fs
+
+static inline void __init hs_subset_descriptors(void)
+{
+}
+
+#endif	/* !CONFIG_USB_GADGET_DUALSPEED */
+
+/*-------------------------------------------------------------------------*/
+
+/* descriptors that are built on-demand */
+
+static char				manufacturer [50];
+static char				product_desc [40] = DRIVER_DESC;
+
+#ifdef	DEV_CONFIG_CDC
+/* address that the host will use ... usually assigned at random */
+static char				ethaddr [2 * ETH_ALEN + 1];
+#endif
+
+/* static strings, in UTF-8 */
+static struct usb_string		strings [] = {
+	{ STRING_MANUFACTURER,	manufacturer, },
+	{ STRING_PRODUCT,	product_desc, },
+	{ STRING_DATA,		"Ethernet Data", },
+#ifdef	DEV_CONFIG_CDC
+	{ STRING_CDC,		"CDC Ethernet", },
+	{ STRING_ETHADDR,	ethaddr, },
+	{ STRING_CONTROL,	"CDC Communications Control", },
+#endif
+#ifdef	DEV_CONFIG_SUBSET
+	{ STRING_SUBSET,	"CDC Ethernet Subset", },
+#endif
+#ifdef	CONFIG_USB_ETH_RNDIS
+	{ STRING_RNDIS,		"RNDIS", },
+	{ STRING_RNDIS_CONTROL,	"RNDIS Communications Control", },
+#endif
+	{  }		/* end of list */
+};
+
+static struct usb_gadget_strings	stringtab = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings,
+};
+
+/*
+ * one config, two interfaces:  control, data.
+ * complications: class descriptors, and an altsetting.
+ */
+static int
+config_buf (enum usb_device_speed speed,
+	u8 *buf, u8 type,
+	unsigned index, int is_otg)
+{
+	int					len;
+	const struct usb_config_descriptor	*config;
+	const struct usb_descriptor_header	**function;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	int				hs = (speed == USB_SPEED_HIGH);
+
+	if (type == USB_DT_OTHER_SPEED_CONFIG)
+		hs = !hs;
+#define which_fn(t)	(hs ? hs_ ## t ## _function : fs_ ## t ## _function)
+#else
+#define	which_fn(t)	(fs_ ## t ## _function)
+#endif
+
+	if (index >= device_desc.bNumConfigurations)
+		return -EINVAL;
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+	/* list the RNDIS config first, to make Microsoft's drivers
+	 * happy. DOCSIS 1.0 needs this too.
+	 */
+	if (device_desc.bNumConfigurations == 2 && index == 0) {
+		config = &rndis_config;
+		function = which_fn (rndis);
+	} else
+#endif
+	{
+		config = &eth_config;
+		function = which_fn (eth);
+	}
+
+	/* for now, don't advertise srp-only devices */
+	if (!is_otg)
+		function++;
+
+	len = usb_gadget_config_buf (config, buf, USB_BUFSIZ, function);
+	if (len < 0)
+		return len;
+	((struct usb_config_descriptor *) buf)->bDescriptorType = type;
+	return len;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void eth_start (struct eth_dev *dev, int gfp_flags);
+static int alloc_requests (struct eth_dev *dev, unsigned n, int gfp_flags);
+
+#ifdef	DEV_CONFIG_CDC
+static inline int ether_alt_ep_setup (struct eth_dev *dev, struct usb_ep *ep)
+{
+	const struct usb_endpoint_descriptor	*d;
+
+	/* With CDC,  the host isn't allowed to use these two data
+	 * endpoints in the default altsetting for the interface.
+	 * so we don't activate them yet.  Reset from SET_INTERFACE.
+	 *
+	 * Strictly speaking RNDIS should work the same: activation is
+	 * a side effect of setting a packet filter.  Deactivation is
+	 * from REMOTE_NDIS_HALT_MSG, reset from REMOTE_NDIS_RESET_MSG.
+	 */
+
+	/* one endpoint writes data back IN to the host */
+	if (strcmp (ep->name, EP_IN_NAME) == 0) {
+		d = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc);
+		ep->driver_data = dev;
+		dev->in = d;
+
+	/* one endpoint just reads OUT packets */
+	} else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
+		d = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc);
+		ep->driver_data = dev;
+		dev->out = d;
+
+	/* optional status/notification endpoint */
+	} else if (EP_STATUS_NAME &&
+			strcmp (ep->name, EP_STATUS_NAME) == 0) {
+		int			result;
+
+		d = ep_desc (dev->gadget, &hs_status_desc, &fs_status_desc);
+		result = usb_ep_enable (ep, d);
+		if (result < 0)
+			return result;
+
+		ep->driver_data = dev;
+		dev->status = d;
+	}
+	return 0;
+}
+#endif
+
+#if	defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
+static inline int ether_ep_setup (struct eth_dev *dev, struct usb_ep *ep)
+{
+	int					result;
+	const struct usb_endpoint_descriptor	*d;
+
+	/* CDC subset is simpler:  if the device is there,
+	 * it's live with rx and tx endpoints.
+	 *
+	 * Do this as a shortcut for RNDIS too.
+	 */
+
+	/* one endpoint writes data back IN to the host */
+	if (strcmp (ep->name, EP_IN_NAME) == 0) {
+		d = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc);
+		result = usb_ep_enable (ep, d);
+		if (result < 0)
+			return result;
+
+		ep->driver_data = dev;
+		dev->in = d;
+
+	/* one endpoint just reads OUT packets */
+	} else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
+		d = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc);
+		result = usb_ep_enable (ep, d);
+		if (result < 0)
+			return result;
+
+		ep->driver_data = dev;
+		dev->out = d;
+	}
+
+	return 0;
+}
+#endif
+
+static int
+set_ether_config (struct eth_dev *dev, int gfp_flags)
+{
+	int			result = 0;
+	struct usb_ep		*ep;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	gadget_for_each_ep (ep, gadget) {
+#ifdef	DEV_CONFIG_CDC
+		if (!dev->rndis && dev->cdc) {
+			result = ether_alt_ep_setup (dev, ep);
+			if (result == 0)
+				continue;
+		}
+#endif
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+		if (dev->rndis && strcmp (ep->name, EP_STATUS_NAME) == 0) {
+			const struct usb_endpoint_descriptor	*d;
+			d = ep_desc (gadget, &hs_status_desc, &fs_status_desc);
+			result = usb_ep_enable (ep, d);
+			if (result == 0) {
+				ep->driver_data = dev;
+				dev->status = d;
+				continue;
+			}
+		} else
+#endif
+
+		{
+#if	defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
+			result = ether_ep_setup (dev, ep);
+			if (result == 0)
+				continue;
+#endif
+		}
+
+		/* stop on error */
+		ERROR (dev, "can't enable %s, result %d\n", ep->name, result);
+		break;
+	}
+	if (!result && (!dev->in_ep || !dev->out_ep))
+		result = -ENODEV;
+
+	if (result == 0)
+		result = alloc_requests (dev, qlen (gadget), gfp_flags);
+
+	/* on error, disable any endpoints  */
+	if (result < 0) {
+#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
+		if (dev->status)
+			(void) usb_ep_disable (dev->status_ep);
+#endif
+		dev->status = NULL;
+#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
+		if (dev->rndis || !dev->cdc) {
+			if (dev->in)
+				(void) usb_ep_disable (dev->in_ep);
+			if (dev->out)
+				(void) usb_ep_disable (dev->out_ep);
+		}
+#endif
+		dev->in = NULL;
+		dev->out = NULL;
+	} else
+
+	/* activate non-CDC configs right away
+	 * this isn't strictly according to the RNDIS spec
+	 */
+#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
+	if (dev->rndis || !dev->cdc) {
+		netif_carrier_on (dev->net);
+		if (netif_running (dev->net)) {
+			spin_unlock (&dev->lock);
+			eth_start (dev, GFP_ATOMIC);
+			spin_lock (&dev->lock);
+		}
+	}
+#endif
+
+	if (result == 0)
+		DEBUG (dev, "qlen %d\n", qlen (gadget));
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+static void eth_reset_config (struct eth_dev *dev)
+{
+	struct usb_request	*req;
+
+	if (dev->config == 0)
+		return;
+
+	DEBUG (dev, "%s\n", __FUNCTION__);
+
+	netif_stop_queue (dev->net);
+	netif_carrier_off (dev->net);
+
+	/* disable endpoints, forcing (synchronous) completion of
+	 * pending i/o.  then free the requests.
+	 */
+	if (dev->in) {
+		usb_ep_disable (dev->in_ep);
+		while (likely (!list_empty (&dev->tx_reqs))) {
+			req = container_of (dev->tx_reqs.next,
+						struct usb_request, list);
+			list_del (&req->list);
+			usb_ep_free_request (dev->in_ep, req);
+		}
+	}
+	if (dev->out) {
+		usb_ep_disable (dev->out_ep);
+		while (likely (!list_empty (&dev->rx_reqs))) {
+			req = container_of (dev->rx_reqs.next,
+						struct usb_request, list);
+			list_del (&req->list);
+			usb_ep_free_request (dev->out_ep, req);
+		}
+	}
+
+	if (dev->status) {
+		usb_ep_disable (dev->status_ep);
+	}
+	dev->config = 0;
+}
+
+/* change our operational config.  must agree with the code
+ * that returns config descriptors, and altsetting code.
+ */
+static int
+eth_set_config (struct eth_dev *dev, unsigned number, int gfp_flags)
+{
+	int			result = 0;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	if (number == dev->config)
+		return 0;
+
+	if (gadget_is_sa1100 (gadget)
+			&& dev->config
+			&& atomic_read (&dev->tx_qlen) != 0) {
+		/* tx fifo is full, but we can't clear it...*/
+		INFO (dev, "can't change configurations\n");
+		return -ESPIPE;
+	}
+	eth_reset_config (dev);
+
+	/* default:  pass all packets, no multicast filtering */
+	dev->cdc_filter = 0x000f;
+
+	switch (number) {
+	case DEV_CONFIG_VALUE:
+		dev->rndis = 0;
+		result = set_ether_config (dev, gfp_flags);
+		break;
+#ifdef	CONFIG_USB_ETH_RNDIS
+	case DEV_RNDIS_CONFIG_VALUE:
+		dev->rndis = 1;
+		result = set_ether_config (dev, gfp_flags);
+		break;
+#endif
+	default:
+		result = -EINVAL;
+		/* FALL THROUGH */
+	case 0:
+		break;
+	}
+
+	if (result) {
+		if (number)
+			eth_reset_config (dev);
+		usb_gadget_vbus_draw(dev->gadget,
+				dev->gadget->is_otg ? 8 : 100);
+	} else {
+		char *speed;
+		unsigned power;
+
+		power = 2 * eth_config.bMaxPower;
+		usb_gadget_vbus_draw(dev->gadget, power);
+
+		switch (gadget->speed) {
+		case USB_SPEED_FULL:	speed = "full"; break;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+		case USB_SPEED_HIGH:	speed = "high"; break;
+#endif
+		default: 		speed = "?"; break;
+		}
+
+		dev->config = number;
+		INFO (dev, "%s speed config #%d: %d mA, %s, using %s\n",
+				speed, number, power, driver_desc,
+				dev->rndis
+					? "RNDIS"
+					: (dev->cdc
+						? "CDC Ethernet"
+						: "CDC Ethernet Subset"));
+	}
+	return result;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	DEV_CONFIG_CDC
+
+static void eth_status_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct usb_cdc_notification	*event = req->buf;
+	int				value = req->status;
+	struct eth_dev			*dev = ep->driver_data;
+
+	/* issue the second notification if host reads the first */
+	if (event->bNotificationType == USB_CDC_NOTIFY_NETWORK_CONNECTION
+			&& value == 0) {
+		__le32	*data = req->buf + sizeof *event;
+
+		event->bmRequestType = 0xA1;
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = __constant_cpu_to_le16 (0);
+		event->wIndex = __constant_cpu_to_le16 (1);
+		event->wLength = __constant_cpu_to_le16 (8);
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data [0] = data [1] = cpu_to_le32 (BITRATE (dev->gadget));
+
+		req->length = STATUS_BYTECOUNT;
+		value = usb_ep_queue (ep, req, GFP_ATOMIC);
+		DEBUG (dev, "send SPEED_CHANGE --> %d\n", value);
+		if (value == 0)
+			return;
+	} else if (value != -ECONNRESET)
+		DEBUG (dev, "event %02x --> %d\n",
+			event->bNotificationType, value);
+	event->bmRequestType = 0xff;
+}
+
+static void issue_start_status (struct eth_dev *dev)
+{
+	struct usb_request		*req = dev->stat_req;
+	struct usb_cdc_notification	*event;
+	int				value;
+ 
+	DEBUG (dev, "%s, flush old status first\n", __FUNCTION__);
+
+	/* flush old status
+	 *
+	 * FIXME ugly idiom, maybe we'd be better with just
+	 * a "cancel the whole queue" primitive since any
+	 * unlink-one primitive has way too many error modes.
+	 * here, we "know" toggle is already clear...
+	 */
+	usb_ep_disable (dev->status_ep);
+	usb_ep_enable (dev->status_ep, dev->status);
+
+	/* 3.8.1 says to issue first NETWORK_CONNECTION, then
+	 * a SPEED_CHANGE.  could be useful in some configs.
+	 */
+	event = req->buf;
+	event->bmRequestType = 0xA1;
+	event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+	event->wValue = __constant_cpu_to_le16 (1);	/* connected */
+	event->wIndex = __constant_cpu_to_le16 (1);
+	event->wLength = 0;
+
+	req->length = sizeof *event;
+	req->complete = eth_status_complete;
+	value = usb_ep_queue (dev->status_ep, req, GFP_ATOMIC);
+	if (value < 0)
+		DEBUG (dev, "status buf queue --> %d\n", value);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void eth_setup_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length)
+		DEBUG ((struct eth_dev *) ep->driver_data,
+				"setup complete --> %d, %d/%d\n",
+				req->status, req->actual, req->length);
+}
+
+#ifdef CONFIG_USB_ETH_RNDIS
+
+static void rndis_response_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length)
+		DEBUG ((struct eth_dev *) ep->driver_data,
+			"rndis response complete --> %d, %d/%d\n",
+			req->status, req->actual, req->length);
+
+	/* done sending after USB_CDC_GET_ENCAPSULATED_RESPONSE */
+}
+
+static void rndis_command_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct eth_dev          *dev = ep->driver_data;
+	int			status;
+	
+	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+	spin_lock(&dev->lock);
+	status = rndis_msg_parser (dev->rndis_config, (u8 *) req->buf);
+	if (status < 0)
+		ERROR(dev, "%s: rndis parse error %d\n", __FUNCTION__, status);
+	spin_unlock(&dev->lock);
+}
+
+#endif	/* RNDIS */
+
+/*
+ * The setup() callback implements all the ep0 functionality that's not
+ * handled lower down.  CDC has a number of less-common features:
+ *
+ *  - two interfaces:  control, and ethernet data
+ *  - Ethernet data interface has two altsettings:  default, and active
+ *  - class-specific descriptors for the control interface
+ *  - class-specific control requests
+ */
+static int
+eth_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct eth_dev		*dev = get_gadget_data (gadget);
+	struct usb_request	*req = dev->req;
+	int			value = -EOPNOTSUPP;
+	u16			wIndex = ctrl->wIndex;
+	u16			wValue = ctrl->wValue;
+	u16			wLength = ctrl->wLength;
+
+	/* descriptors just go into the pre-allocated ep0 buffer,
+	 * while config change events may enable network traffic.
+	 */
+	req->complete = eth_setup_complete;
+	switch (ctrl->bRequest) {
+
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			break;
+		switch (wValue >> 8) {
+
+		case USB_DT_DEVICE:
+			value = min (wLength, (u16) sizeof device_desc);
+			memcpy (req->buf, &device_desc, value);
+			break;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!gadget->is_dualspeed)
+				break;
+			value = min (wLength, (u16) sizeof dev_qualifier);
+			memcpy (req->buf, &dev_qualifier, value);
+			break;
+
+		case USB_DT_OTHER_SPEED_CONFIG:
+			if (!gadget->is_dualspeed)
+				break;
+			// FALLTHROUGH
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+		case USB_DT_CONFIG:
+			value = config_buf (gadget->speed, req->buf,
+					wValue >> 8,
+					wValue & 0xff,
+					gadget->is_otg);
+			if (value >= 0)
+				value = min (wLength, (u16) value);
+			break;
+
+		case USB_DT_STRING:
+			value = usb_gadget_get_string (&stringtab,
+					wValue & 0xff, req->buf);
+			if (value >= 0)
+				value = min (wLength, (u16) value);
+			break;
+		}
+		break;
+
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != 0)
+			break;
+		if (gadget->a_hnp_support)
+			DEBUG (dev, "HNP available\n");
+		else if (gadget->a_alt_hnp_support)
+			DEBUG (dev, "HNP needs a different root port\n");
+		spin_lock (&dev->lock);
+		value = eth_set_config (dev, wValue, GFP_ATOMIC);
+		spin_unlock (&dev->lock);
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			break;
+		*(u8 *)req->buf = dev->config;
+		value = min (wLength, (u16) 1);
+		break;
+
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != USB_RECIP_INTERFACE
+				|| !dev->config
+				|| wIndex > 1)
+			break;
+		if (!dev->cdc && wIndex != 0)
+			break;
+		spin_lock (&dev->lock);
+
+		/* PXA hardware partially handles SET_INTERFACE;
+		 * we need to kluge around that interference.
+		 */
+		if (gadget_is_pxa (gadget)) {
+			value = eth_set_config (dev, DEV_CONFIG_VALUE,
+						GFP_ATOMIC);
+			goto done_set_intf;
+		}
+
+#ifdef DEV_CONFIG_CDC
+		switch (wIndex) {
+		case 0:		/* control/master intf */
+			if (wValue != 0)
+				break;
+			if (dev->status) {
+				usb_ep_disable (dev->status_ep);
+				usb_ep_enable (dev->status_ep, dev->status);
+			}
+			value = 0;
+			break;
+		case 1:		/* data intf */
+			if (wValue > 1)
+				break;
+			usb_ep_disable (dev->in_ep);
+			usb_ep_disable (dev->out_ep);
+
+			/* CDC requires the data transfers not be done from
+			 * the default interface setting ... also, setting
+			 * the non-default interface clears filters etc.
+			 */
+			if (wValue == 1) {
+				usb_ep_enable (dev->in_ep, dev->in);
+				usb_ep_enable (dev->out_ep, dev->out);
+				dev->cdc_filter = DEFAULT_FILTER;
+				netif_carrier_on (dev->net);
+				if (dev->status)
+					issue_start_status (dev);
+				if (netif_running (dev->net)) {
+					spin_unlock (&dev->lock);
+					eth_start (dev, GFP_ATOMIC);
+					spin_lock (&dev->lock);
+				}
+			} else {
+				netif_stop_queue (dev->net);
+				netif_carrier_off (dev->net);
+			}
+			value = 0;
+			break;
+		}
+#else
+		/* FIXME this is wrong, as is the assumption that
+		 * all non-PXA hardware talks real CDC ...
+		 */
+		dev_warn (&gadget->dev, "set_interface ignored!\n");
+#endif /* DEV_CONFIG_CDC */
+
+done_set_intf:
+		spin_unlock (&dev->lock);
+		break;
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)
+				|| !dev->config
+				|| wIndex > 1)
+			break;
+		if (!(dev->cdc || dev->rndis) && wIndex != 0)
+			break;
+
+		/* for CDC, iff carrier is on, data interface is active. */
+		if (dev->rndis || wIndex != 1)
+			*(u8 *)req->buf = 0;
+		else
+			*(u8 *)req->buf = netif_carrier_ok (dev->net) ? 1 : 0;
+		value = min (wLength, (u16) 1);
+		break;
+
+#ifdef DEV_CONFIG_CDC
+	case USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
+				|| !dev->cdc
+				|| dev->rndis
+				|| wLength != 0
+				|| wIndex > 1)
+			break;
+		DEBUG (dev, "packet filter %02x\n", wValue);
+		dev->cdc_filter = wValue;
+		value = 0;
+		break;
+
+	/* and potentially:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+#endif /* DEV_CONFIG_CDC */
+
+#ifdef CONFIG_USB_ETH_RNDIS		
+	/* RNDIS uses the CDC command encapsulation mechanism to implement
+	 * an RPC scheme, with much getting/setting of attributes by OID.
+	 */
+	case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
+				|| !dev->rndis
+				|| wLength > USB_BUFSIZ
+				|| wValue
+				|| rndis_control_intf.bInterfaceNumber
+					!= wIndex)
+			break;
+		/* read the request, then process it */
+		value = wLength;
+		req->complete = rndis_command_complete;
+		/* later, rndis_control_ack () sends a notification */
+		break;
+		
+	case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if ((USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE)
+					== ctrl->bRequestType
+				&& dev->rndis
+				// && wLength >= 0x0400
+				&& !wValue
+				&& rndis_control_intf.bInterfaceNumber
+					== wIndex) {
+			u8 *buf;
+
+			/* return the result */
+			buf = rndis_get_next_response (dev->rndis_config,
+						       &value);
+			if (buf) {
+				memcpy (req->buf, buf, value);
+				req->complete = rndis_response_complete;
+				rndis_free_response(dev->rndis_config, buf);
+			}
+			/* else stalls ... spec says to avoid that */
+		}
+		break;
+#endif	/* RNDIS */
+
+	default:
+		VDEBUG (dev,
+			"unknown control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+	}
+
+	/* respond with data transfer before status phase? */
+	if (value >= 0) {
+		req->length = value;
+		req->zero = value < wLength
+				&& (value % gadget->ep0->maxpacket) == 0;
+		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DEBUG (dev, "ep_queue --> %d\n", value);
+			req->status = 0;
+			eth_setup_complete (gadget->ep0, req);
+		}
+	}
+
+	/* host either stalls (value < 0) or reports success */
+	return value;
+}
+
+static void
+eth_disconnect (struct usb_gadget *gadget)
+{
+	struct eth_dev		*dev = get_gadget_data (gadget);
+	unsigned long		flags;
+
+	spin_lock_irqsave (&dev->lock, flags);
+	netif_stop_queue (dev->net);
+	netif_carrier_off (dev->net);
+	eth_reset_config (dev);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* FIXME RNDIS should enter RNDIS_UNINITIALIZED */
+
+	/* next we may get setup() calls to enumerate new connections;
+	 * or an unbind() during shutdown (including removing module).
+	 */
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int eth_change_mtu (struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+
+	// FIXME if rndis, don't change while link's live
+
+	if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		return -ERANGE;
+	/* no zero-length packet read wanted after mtu-sized packets */
+	if (((new_mtu + sizeof (struct ethhdr)) % dev->in_ep->maxpacket) == 0)
+		return -EDOM;
+	net->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *eth_get_stats (struct net_device *net)
+{
+	return &((struct eth_dev *)netdev_priv(net))->stats;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	strlcpy(p->driver, shortname, sizeof p->driver);
+	strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
+	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+	strlcpy (p->bus_info, dev->gadget->dev.bus_id, sizeof p->bus_info);
+}
+
+static u32 eth_get_link(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	return dev->gadget->speed != USB_SPEED_UNKNOWN;
+}
+
+static struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = eth_get_link
+};
+
+static void defer_kevent (struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit (flag, &dev->todo))
+		return;
+	if (!schedule_work (&dev->work))
+		ERROR (dev, "kevent %d may have been dropped\n", flag);
+	else
+		DEBUG (dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete (struct usb_ep *ep, struct usb_request *req);
+
+static int
+rx_submit (struct eth_dev *dev, struct usb_request *req, int gfp_flags)
+{
+	struct sk_buff		*skb;
+	int			retval = -ENOMEM;
+	size_t			size;
+
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*maxpacket), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 *
+	 * RNDIS uses internal framing, and explicitly allows senders to
+	 * pad to end-of-packet.  That's potentially nice for speed,
+	 * but means receivers can't recover synch on their own.
+	 */
+	size = (sizeof (struct ethhdr) + dev->net->mtu + RX_EXTRA);
+	size += dev->out_ep->maxpacket - 1;
+#ifdef CONFIG_USB_ETH_RNDIS
+	if (dev->rndis)
+		size += sizeof (struct rndis_packet_msg_type);
+#endif	
+	size -= size % dev->out_ep->maxpacket;
+
+	if ((skb = alloc_skb (size + NET_IP_ALIGN, gfp_flags)) == 0) {
+		DEBUG (dev, "no rx skb\n");
+		goto enomem;
+	}
+	
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.  Note:
+	 * this doesn't account for variable-sized RNDIS headers.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	req->buf = skb->data;
+	req->length = size;
+	req->complete = rx_complete;
+	req->context = skb;
+
+	retval = usb_ep_queue (dev->out_ep, req, gfp_flags);
+	if (retval == -ENOMEM)
+enomem:
+		defer_kevent (dev, WORK_RX_MEMORY);
+	if (retval) {
+		DEBUG (dev, "rx submit --> %d\n", retval);
+		dev_kfree_skb_any (skb);
+		spin_lock (&dev->lock);
+		list_add (&req->list, &dev->rx_reqs);
+		spin_unlock (&dev->lock);
+	}
+	return retval;
+}
+
+static void rx_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = ep->driver_data;
+	int		status = req->status;
+
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put (skb, req->actual);
+#ifdef CONFIG_USB_ETH_RNDIS
+		/* we know MaxPacketsPerTransfer == 1 here */
+		if (dev->rndis)
+			rndis_rm_hdr (req->buf, &(skb->len));
+#endif
+		if (ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) {
+			dev->stats.rx_errors++;
+			dev->stats.rx_length_errors++;
+			DEBUG (dev, "rx length %d\n", skb->len);
+			break;
+		}
+
+		skb->dev = dev->net;
+		skb->protocol = eth_type_trans (skb, dev->net);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
+
+		/* no buffer copies needed, unless hardware can't
+		 * use skb buffers.
+		 */
+		status = netif_rx (skb);
+		skb = NULL;
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		// unlink
+	case -ESHUTDOWN:		// disconnect etc
+		VDEBUG (dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		// endpoint reset
+		DEBUG (dev, "rx %s reset\n", ep->name);
+		defer_kevent (dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any (skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->stats.rx_over_errors++;
+		// FALLTHROUGH
+	    
+	default:
+		dev->stats.rx_errors++;
+		DEBUG (dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any (skb);
+	if (!netif_running (dev->net)) {
+clean:
+		/* nobody reading rx_reqs, so no dev->lock */
+		list_add (&req->list, &dev->rx_reqs);
+		req = NULL;
+	}
+	if (req)
+		rx_submit (dev, req, GFP_ATOMIC);
+}
+
+static int prealloc (struct list_head *list, struct usb_ep *ep,
+			unsigned n, int gfp_flags)
+{
+	unsigned		i;
+	struct usb_request	*req;
+
+	if (!n)
+		return -ENOMEM;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry (req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+	while (i--) {
+		req = usb_ep_alloc_request (ep, gfp_flags);
+		if (!req)
+			return list_empty (list) ? -ENOMEM : 0;
+		list_add (&req->list, list);
+	}
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del (&req->list);
+		usb_ep_free_request (ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of (next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests (struct eth_dev *dev, unsigned n, int gfp_flags)
+{
+	int status;
+
+	status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	return 0;
+fail:
+	DEBUG (dev, "can't alloc requests\n");
+	return status;
+}
+
+static void rx_fill (struct eth_dev *dev, int gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+
+	clear_bit (WORK_RX_MEMORY, &dev->todo);
+
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave (&dev->lock, flags);
+	while (!list_empty (&dev->rx_reqs)) {
+		req = container_of (dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init (&req->list);
+		spin_unlock_irqrestore (&dev->lock, flags);
+
+		if (rx_submit (dev, req, gfp_flags) < 0) {
+			defer_kevent (dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave (&dev->lock, flags);
+	}
+	spin_unlock_irqrestore (&dev->lock, flags);
+}
+
+static void eth_work (void *_dev)
+{
+	struct eth_dev		*dev = _dev;
+
+	if (test_bit (WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running (dev->net))
+			rx_fill (dev, GFP_KERNEL);
+		else
+			clear_bit (WORK_RX_MEMORY, &dev->todo);
+	}
+
+	if (dev->todo)
+		DEBUG (dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = ep->driver_data;
+
+	switch (req->status) {
+	default:
+		dev->stats.tx_errors++;
+		VDEBUG (dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		// unlink
+	case -ESHUTDOWN:		// disconnect etc
+		break;
+	case 0:
+		dev->stats.tx_bytes += skb->len;
+	}
+	dev->stats.tx_packets++;
+
+	spin_lock (&dev->lock);
+	list_add (&req->list, &dev->tx_reqs);
+	spin_unlock (&dev->lock);
+	dev_kfree_skb_any (skb);
+
+	atomic_dec (&dev->tx_qlen);
+	if (netif_carrier_ok (dev->net))
+		netif_wake_queue (dev->net);
+}
+
+static inline int eth_is_promisc (struct eth_dev *dev)
+{
+	/* no filters for the CDC subset; always promisc */
+	if (subset_active (dev))
+		return 1;
+	return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			length = skb->len;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+
+	/* apply outgoing CDC or RNDIS filters */
+	if (!eth_is_promisc (dev)) {
+		u8		*dest = skb->data;
+
+		if (dest [0] & 0x01) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (memcmp (dest, net->broadcast, ETH_ALEN) == 0)
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(dev->cdc_filter & type)) {
+				dev_kfree_skb_any (skb);
+				return 0;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave (&dev->lock, flags);
+	req = container_of (dev->tx_reqs.next, struct usb_request, list);
+	list_del (&req->list);
+	if (list_empty (&dev->tx_reqs))
+		netif_stop_queue (net);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 * or there's not enough space for any RNDIS headers we need
+	 */
+#ifdef CONFIG_USB_ETH_RNDIS
+	if (dev->rndis) {
+		struct sk_buff	*skb_rndis;
+
+		skb_rndis = skb_realloc_headroom (skb,
+				sizeof (struct rndis_packet_msg_type));
+		if (!skb_rndis)
+			goto drop;
+	
+		dev_kfree_skb_any (skb);
+		skb = skb_rndis;
+		rndis_add_hdr (skb);
+		length = skb->len;
+	}
+#endif
+	req->buf = skb->data;
+	req->context = skb;
+	req->complete = tx_complete;
+
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	req->zero = 1;
+	if (!dev->zlp && (length % dev->in_ep->maxpacket) == 0)
+		length++;
+
+	req->length = length;
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+	/* throttle highspeed IRQ rate back slightly */
+	req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
+		? ((atomic_read (&dev->tx_qlen) % TX_DELAY) != 0)
+		: 0;
+#endif
+
+	retval = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
+	switch (retval) {
+	default:
+		DEBUG (dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+		net->trans_start = jiffies;
+		atomic_inc (&dev->tx_qlen);
+	}
+
+	if (retval) {
+#ifdef CONFIG_USB_ETH_RNDIS
+drop:
+#endif
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any (skb);
+		spin_lock_irqsave (&dev->lock, flags);
+		if (list_empty (&dev->tx_reqs))
+			netif_start_queue (net);
+		list_add (&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore (&dev->lock, flags);
+	}
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_ETH_RNDIS
+
+static void rndis_send_media_state (struct eth_dev *dev, int connect)
+{
+	if (!dev)
+		return;
+	
+	if (connect) {
+		if (rndis_signal_connect (dev->rndis_config))
+			return;
+	} else {
+		if (rndis_signal_disconnect (dev->rndis_config))
+			return;
+	}
+}
+
+static void
+rndis_control_ack_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length)
+		DEBUG ((struct eth_dev *) ep->driver_data,
+			"rndis control ack complete --> %d, %d/%d\n",
+			req->status, req->actual, req->length);
+
+	usb_ep_free_buffer(ep, req->buf, req->dma, 8);
+	usb_ep_free_request(ep, req);
+}
+
+static int rndis_control_ack (struct net_device *net)
+{
+	struct eth_dev          *dev = netdev_priv(net);
+	u32                     length;
+	struct usb_request      *resp;
+	
+	/* in case RNDIS calls this after disconnect */
+	if (!dev->status_ep) {
+		DEBUG (dev, "status ENODEV\n");
+		return -ENODEV;
+	}
+
+	/* Allocate memory for notification ie. ACK */
+	resp = usb_ep_alloc_request (dev->status_ep, GFP_ATOMIC);
+	if (!resp) {
+		DEBUG (dev, "status ENOMEM\n");
+		return -ENOMEM;
+	}
+	
+	resp->buf = usb_ep_alloc_buffer (dev->status_ep, 8,
+					 &resp->dma, GFP_ATOMIC);
+	if (!resp->buf) {
+		DEBUG (dev, "status buf ENOMEM\n");
+		usb_ep_free_request (dev->status_ep, resp);
+		return -ENOMEM;
+	}
+	
+	/* Send RNDIS RESPONSE_AVAILABLE notification;
+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE should work too
+	 */
+	resp->length = 8;
+	resp->complete = rndis_control_ack_complete;
+	
+	*((__le32 *) resp->buf) = __constant_cpu_to_le32 (1);
+	*((__le32 *) resp->buf + 1) = __constant_cpu_to_le32 (0);
+	
+	length = usb_ep_queue (dev->status_ep, resp, GFP_ATOMIC);
+	if (length < 0) {
+		resp->status = 0;
+		rndis_control_ack_complete (dev->status_ep, resp);
+	}
+	
+	return 0;
+}
+
+#endif	/* RNDIS */
+
+static void eth_start (struct eth_dev *dev, int gfp_flags)
+{
+	DEBUG (dev, "%s\n", __FUNCTION__);
+
+	/* fill the rx queue */
+	rx_fill (dev, gfp_flags);
+
+	/* and open the tx floodgates */ 
+	atomic_set (&dev->tx_qlen, 0);
+	netif_wake_queue (dev->net);
+#ifdef CONFIG_USB_ETH_RNDIS
+	if (dev->rndis) {
+		rndis_set_param_medium (dev->rndis_config,
+					NDIS_MEDIUM_802_3,
+					BITRATE(dev->gadget));
+		rndis_send_media_state (dev, 1);
+	}
+#endif	
+}
+
+static int eth_open (struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	DEBUG (dev, "%s\n", __FUNCTION__);
+	if (netif_carrier_ok (dev->net))
+		eth_start (dev, GFP_KERNEL);
+	return 0;
+}
+
+static int eth_stop (struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	VDEBUG (dev, "%s\n", __FUNCTION__);
+	netif_stop_queue (net);
+
+	DEBUG (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->stats.rx_packets, dev->stats.tx_packets, 
+		dev->stats.rx_errors, dev->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	if (dev->config) {
+		usb_ep_disable (dev->in_ep);
+		usb_ep_disable (dev->out_ep);
+		if (netif_carrier_ok (dev->net)) {
+			DEBUG (dev, "host still using in/out endpoints\n");
+			// FIXME idiom may leave toggle wrong here
+			usb_ep_enable (dev->in_ep, dev->in);
+			usb_ep_enable (dev->out_ep, dev->out);
+		}
+		if (dev->status_ep) {
+			usb_ep_disable (dev->status_ep);
+			usb_ep_enable (dev->status_ep, dev->status);
+		}
+	}
+	
+#ifdef	CONFIG_USB_ETH_RNDIS
+	if (dev->rndis) {
+		rndis_set_param_medium (dev->rndis_config,
+					NDIS_MEDIUM_802_3, 0);
+		rndis_send_media_state (dev, 0);
+	}
+#endif
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *eth_req_alloc (struct usb_ep *ep, unsigned size)
+{
+	struct usb_request	*req;
+
+	req = usb_ep_alloc_request (ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	req->buf = kmalloc (size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request (ep, req);
+		req = NULL;
+	}
+	return req;
+}
+
+static void
+eth_req_free (struct usb_ep *ep, struct usb_request *req)
+{
+	kfree (req->buf);
+	usb_ep_free_request (ep, req);
+}
+
+
+static void
+eth_unbind (struct usb_gadget *gadget)
+{
+	struct eth_dev		*dev = get_gadget_data (gadget);
+
+	DEBUG (dev, "unbind\n");
+#ifdef CONFIG_USB_ETH_RNDIS
+	rndis_deregister (dev->rndis_config);
+	rndis_exit ();
+#endif
+
+	/* we've already been disconnected ... no i/o is active */
+	if (dev->req) {
+		eth_req_free (gadget->ep0, dev->req);
+		dev->req = NULL;
+	}
+	if (dev->stat_req) {
+		eth_req_free (dev->status_ep, dev->stat_req);
+		dev->stat_req = NULL;
+	}
+
+	unregister_netdev (dev->net);
+	free_netdev(dev->net);
+
+	/* assuming we used keventd, it must quiesce too */
+	flush_scheduled_work ();
+	set_gadget_data (gadget, NULL);
+}
+
+static u8 __init nibble (unsigned char c)
+{
+	if (likely (isdigit (c)))
+		return c - '0';
+	c = toupper (c);
+	if (likely (isxdigit (c)))
+		return 10 + c - 'A';
+	return 0;
+}
+
+static void __init get_ether_addr (const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if((*str == '.') || (*str == ':'))
+				str++;
+			num = nibble(*str++) << 4;
+			num |= (nibble(*str++));
+			dev_addr [i] = num;
+		}
+		if (is_valid_ether_addr (dev_addr))
+			return;
+	}
+	random_ether_addr(dev_addr);
+}
+
+static int __init
+eth_bind (struct usb_gadget *gadget)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	u8			cdc = 1, zlp = 1, rndis = 1;
+	struct usb_ep		*in_ep, *out_ep, *status_ep = NULL;
+	int			status = -ENOMEM;
+
+	/* these flags are only ever cleared; compiler take note */
+#ifndef	DEV_CONFIG_CDC
+	cdc = 0;
+#endif
+#ifndef	CONFIG_USB_ETH_RNDIS
+	rndis = 0;
+#endif
+
+	/* Because most host side USB stacks handle CDC Ethernet, that
+	 * standard protocol is _strongly_ preferred for interop purposes.
+	 * (By everyone except Microsoft.)
+	 */
+	if (gadget_is_net2280 (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
+	} else if (gadget_is_dummy (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0202);
+	} else if (gadget_is_pxa (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0203);
+		/* pxa doesn't support altsettings */
+		cdc = 0;
+	} else if (gadget_is_sh(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0204);
+		/* sh doesn't support multiple interfaces or configs */
+		cdc = 0;
+		rndis = 0;
+	} else if (gadget_is_sa1100 (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0205);
+		/* hardware can't write zlps */
+		zlp = 0;
+		/* sa1100 CAN do CDC, without status endpoint ... we use
+		 * non-CDC to be compatible with ARM Linux-2.4 "usb-eth".
+		 */
+		cdc = 0;
+	} else if (gadget_is_goku (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0206);
+	} else if (gadget_is_mq11xx (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0207);
+	} else if (gadget_is_omap (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
+	} else if (gadget_is_lh7a40x(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
+	} else if (gadget_is_n9604(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0210);
+	} else if (gadget_is_pxa27x(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0211);
+ 	} else if (gadget_is_s3c2410(gadget)) {
+ 		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0212);
+	} else if (gadget_is_at91(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0213);
+	} else {
+		/* can't assume CDC works.  don't want to default to
+		 * anything less functional on CDC-capable hardware,
+		 * so we fail in this case.
+		 */
+		dev_err (&gadget->dev,
+			"controller '%s' not recognized\n",
+			gadget->name);
+		return -ENODEV;
+	}
+	snprintf (manufacturer, sizeof manufacturer, "%s %s/%s",
+		system_utsname.sysname, system_utsname.release,
+		gadget->name);
+
+	/* If there's an RNDIS configuration, that's what Windows wants to
+	 * be using ... so use these product IDs here and in the "linux.inf"
+	 * needed to install MSFT drivers.  Current Linux kernels will use
+	 * the second configuration if it's CDC Ethernet, and need some help
+	 * to choose the right configuration otherwise.
+	 */
+	if (rndis) {
+		device_desc.idVendor =
+			__constant_cpu_to_le16(RNDIS_VENDOR_NUM);
+		device_desc.idProduct =
+			__constant_cpu_to_le16(RNDIS_PRODUCT_NUM);
+		snprintf (product_desc, sizeof product_desc,
+			"RNDIS/%s", driver_desc);
+
+	/* CDC subset ... recognized by Linux since 2.4.10, but Windows
+	 * drivers aren't widely available.
+	 */
+	} else if (!cdc) {
+		device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
+		device_desc.idVendor =
+			__constant_cpu_to_le16(SIMPLE_VENDOR_NUM);
+		device_desc.idProduct =
+			__constant_cpu_to_le16(SIMPLE_PRODUCT_NUM);
+	}
+
+	/* support optional vendor/distro customization */
+	if (idVendor) {
+		if (!idProduct) {
+			dev_err (&gadget->dev, "idVendor needs idProduct!\n");
+			return -ENODEV;
+		}
+		device_desc.idVendor = cpu_to_le16(idVendor);
+		device_desc.idProduct = cpu_to_le16(idProduct);
+		if (bcdDevice)
+			device_desc.bcdDevice = cpu_to_le16(bcdDevice);
+	}
+	if (iManufacturer)
+		strlcpy (manufacturer, iManufacturer, sizeof manufacturer);
+	if (iProduct)
+		strlcpy (product_desc, iProduct, sizeof product_desc);
+
+	/* all we really need is bulk IN/OUT */
+	usb_ep_autoconfig_reset (gadget);
+	in_ep = usb_ep_autoconfig (gadget, &fs_source_desc);
+	if (!in_ep) {
+autoconf_fail:
+		dev_err (&gadget->dev,
+			"can't autoconfigure on %s\n",
+			gadget->name);
+		return -ENODEV;
+	}
+	EP_IN_NAME = in_ep->name;
+	in_ep->driver_data = in_ep;	/* claim */
+	
+	out_ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
+	if (!out_ep)
+		goto autoconf_fail;
+	EP_OUT_NAME = out_ep->name;
+	out_ep->driver_data = out_ep;	/* claim */
+
+#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
+	/* CDC Ethernet control interface doesn't require a status endpoint.
+	 * Since some hosts expect one, try to allocate one anyway.
+	 */
+	if (cdc || rndis) {
+		status_ep = usb_ep_autoconfig (gadget, &fs_status_desc);
+		if (status_ep) {
+			EP_STATUS_NAME = status_ep->name;
+			status_ep->driver_data = status_ep;	/* claim */
+		} else if (rndis) {
+			dev_err (&gadget->dev,
+				"can't run RNDIS on %s\n",
+				gadget->name);
+			return -ENODEV;
+#ifdef DEV_CONFIG_CDC
+		/* pxa25x only does CDC subset; often used with RNDIS */
+		} else if (cdc) {
+			control_intf.bNumEndpoints = 0;
+			/* FIXME remove endpoint from descriptor list */
+#endif
+		}
+	}
+#endif
+
+	/* one config:  cdc, else minimal subset */
+	if (!cdc) {
+		eth_config.bNumInterfaces = 1;
+		eth_config.iConfiguration = STRING_SUBSET;
+		fs_subset_descriptors();
+		hs_subset_descriptors();
+	}
+
+	/* For now RNDIS is always a second config */
+	if (rndis)
+		device_desc.bNumConfigurations = 2;
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+	if (rndis)
+		dev_qualifier.bNumConfigurations = 2;
+	else if (!cdc)
+		dev_qualifier.bDeviceClass = USB_CLASS_VENDOR_SPEC;
+
+	/* assumes ep0 uses the same value for both speeds ... */
+	dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
+
+	/* and that all endpoints are dual-speed */
+	hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
+	hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
+#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
+	if (EP_STATUS_NAME)
+		hs_status_desc.bEndpointAddress =
+				fs_status_desc.bEndpointAddress;
+#endif
+#endif	/* DUALSPEED */
+
+	device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
+	usb_gadget_set_selfpowered (gadget);
+
+	if (gadget->is_otg) {
+		otg_descriptor.bmAttributes |= USB_OTG_HNP,
+		eth_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+		eth_config.bMaxPower = 4;
+#ifdef	CONFIG_USB_ETH_RNDIS
+		rndis_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+		rndis_config.bMaxPower = 4;
+#endif
+	}
+
+ 	net = alloc_etherdev (sizeof *dev);
+ 	if (!net)
+		return status;
+	dev = netdev_priv(net);
+	spin_lock_init (&dev->lock);
+	INIT_WORK (&dev->work, eth_work, dev);
+	INIT_LIST_HEAD (&dev->tx_reqs);
+	INIT_LIST_HEAD (&dev->rx_reqs);
+
+	/* network device setup */
+	dev->net = net;
+	SET_MODULE_OWNER (net);
+	strcpy (net->name, "usb%d");
+	dev->cdc = cdc;
+	dev->zlp = zlp;
+
+	dev->in_ep = in_ep;
+	dev->out_ep = out_ep;
+	dev->status_ep = status_ep;
+
+	/* Module params for these addresses should come from ID proms.
+	 * The host side address is used with CDC and RNDIS, and commonly
+	 * ends up in a persistent config database.
+	 */
+	get_ether_addr(dev_addr, net->dev_addr);
+	if (cdc || rndis) {
+		get_ether_addr(host_addr, dev->host_mac);
+#ifdef	DEV_CONFIG_CDC
+		snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
+			dev->host_mac [0], dev->host_mac [1],
+			dev->host_mac [2], dev->host_mac [3],
+			dev->host_mac [4], dev->host_mac [5]);
+#endif
+	}
+
+	if (rndis) {
+		status = rndis_init();
+		if (status < 0) {
+			dev_err (&gadget->dev, "can't init RNDIS, %d\n",
+				status);
+			goto fail;
+		}
+	}
+
+	net->change_mtu = eth_change_mtu;
+	net->get_stats = eth_get_stats;
+	net->hard_start_xmit = eth_start_xmit;
+	net->open = eth_open;
+	net->stop = eth_stop;
+	// watchdog_timeo, tx_timeout ...
+	// set_multicast_list
+	SET_ETHTOOL_OPS(net, &ops);
+
+	/* preallocate control message data and buffer */
+	dev->req = eth_req_alloc (gadget->ep0, USB_BUFSIZ);
+	if (!dev->req)
+		goto fail;
+	dev->req->complete = eth_setup_complete;
+
+	/* ... and maybe likewise for status transfer */
+	if (dev->status_ep) {
+		dev->stat_req = eth_req_alloc (dev->status_ep,
+					STATUS_BYTECOUNT);
+		if (!dev->stat_req) {
+			eth_req_free (gadget->ep0, dev->req);
+			goto fail;
+		}
+	}
+
+	/* finish hookup to lower layer ... */
+	dev->gadget = gadget;
+	set_gadget_data (gadget, dev);
+	gadget->ep0->driver_data = dev;
+	
+	/* two kinds of host-initiated state changes:
+	 *  - iff DATA transfer is active, carrier is "on"
+	 *  - tx queueing enabled if open *and* carrier is "on"
+	 */
+	netif_stop_queue (dev->net);
+	netif_carrier_off (dev->net);
+
+ 	// SET_NETDEV_DEV (dev->net, &gadget->dev);
+ 	status = register_netdev (dev->net);
+	if (status < 0)
+		goto fail1;
+
+	INFO (dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
+	INFO (dev, "using %s, OUT %s IN %s%s%s\n", gadget->name,
+		EP_OUT_NAME, EP_IN_NAME,
+		EP_STATUS_NAME ? " STATUS " : "",
+		EP_STATUS_NAME ? EP_STATUS_NAME : ""
+		);
+	INFO (dev, "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		net->dev_addr [0], net->dev_addr [1],
+		net->dev_addr [2], net->dev_addr [3],
+		net->dev_addr [4], net->dev_addr [5]);
+
+	if (cdc || rndis)
+		INFO (dev, "HOST MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+			dev->host_mac [0], dev->host_mac [1],
+			dev->host_mac [2], dev->host_mac [3],
+			dev->host_mac [4], dev->host_mac [5]);
+
+#ifdef	CONFIG_USB_ETH_RNDIS
+	if (rndis) {
+		u32	vendorID = 0;
+
+		/* FIXME RNDIS vendor id == "vendor NIC code" == ? */
+		
+		dev->rndis_config = rndis_register (rndis_control_ack);
+		if (dev->rndis_config < 0) {
+fail0:
+			unregister_netdev (dev->net);
+			status = -ENODEV;
+			goto fail;
+		}
+		
+		/* these set up a lot of the OIDs that RNDIS needs */
+		rndis_set_host_mac (dev->rndis_config, dev->host_mac);
+		if (rndis_set_param_dev (dev->rndis_config, dev->net,
+					 &dev->stats))
+			goto fail0;
+		if (rndis_set_param_vendor (dev->rndis_config, vendorID,
+					    manufacturer))
+			goto fail0;
+		if (rndis_set_param_medium (dev->rndis_config,
+					    NDIS_MEDIUM_802_3,
+					    0))
+			goto fail0;
+		INFO (dev, "RNDIS ready\n");
+	}
+#endif	
+
+	return status;
+
+fail1:
+	dev_dbg(&gadget->dev, "register_netdev failed, %d\n", status);
+fail:
+	eth_unbind (gadget);
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+eth_suspend (struct usb_gadget *gadget)
+{
+	struct eth_dev		*dev = get_gadget_data (gadget);
+
+	DEBUG (dev, "suspend\n");
+	dev->suspended = 1;
+}
+
+static void
+eth_resume (struct usb_gadget *gadget)
+{
+	struct eth_dev		*dev = get_gadget_data (gadget);
+
+	DEBUG (dev, "resume\n");
+	dev->suspended = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver eth_driver = {
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	.speed		= USB_SPEED_HIGH,
+#else
+	.speed		= USB_SPEED_FULL,
+#endif
+	.function	= (char *) driver_desc,
+	.bind		= eth_bind,
+	.unbind		= eth_unbind,
+
+	.setup		= eth_setup,
+	.disconnect	= eth_disconnect,
+
+	.suspend	= eth_suspend,
+	.resume		= eth_resume,
+
+	.driver 	= {
+		.name		= (char *) shortname,
+		// .shutdown = ...
+		// .suspend = ...
+		// .resume = ...
+	},
+};
+
+MODULE_DESCRIPTION (DRIVER_DESC);
+MODULE_AUTHOR ("David Brownell, Benedikt Spanger");
+MODULE_LICENSE ("GPL");
+
+
+static int __init init (void)
+{
+	return usb_gadget_register_driver (&eth_driver);
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	usb_gadget_unregister_driver (&eth_driver);
+}
+module_exit (cleanup);
+
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
new file mode 100644
index 0000000..4857f0e
--- /dev/null
+++ b/drivers/usb/gadget/file_storage.c
@@ -0,0 +1,4139 @@
+/*
+ * file_storage.c -- File-backed USB Storage Gadget, for USB development
+ *
+ * Copyright (C) 2003-2005 Alan Stern
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * The File-backed Storage Gadget acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive.  In addition to providing an
+ * example of a genuinely useful gadget driver for a USB device, it also
+ * illustrates a technique of double-buffering for increased throughput.
+ * Last but not least, it gives an easy way to probe the behavior of the
+ * Mass Storage drivers in a USB host.
+ *
+ * Backing storage is provided by a regular file or a block device, specified
+ * by the "file" module parameter.  Access can be limited to read-only by
+ * setting the optional "ro" module parameter.  The gadget will indicate that
+ * it has removable media if the optional "removable" module parameter is set.
+ *
+ * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
+ * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
+ * by the optional "transport" module parameter.  It also supports the
+ * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
+ * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
+ * the optional "protocol" module parameter.  In addition, the default
+ * Vendor ID, Product ID, and release number can be overridden.
+ *
+ * There is support for multiple logical units (LUNs), each of which has
+ * its own backing file.  The number of LUNs can be set using the optional
+ * "luns" module parameter (anywhere from 1 to 8), and the corresponding
+ * files are specified using comma-separated lists for "file" and "ro".
+ * The default number of LUNs is taken from the number of "file" elements;
+ * it is 1 if "file" is not given.  If "removable" is not set then a backing
+ * file must be specified for each LUN.  If it is set, then an unspecified
+ * or empty backing filename means the LUN's medium is not loaded.
+ *
+ * Requirements are modest; only a bulk-in and a bulk-out endpoint are
+ * needed (an interrupt-out endpoint is also needed for CBI).  The memory
+ * requirement amounts to two 16K buffers, size configurable by a parameter.
+ * Support is included for both full-speed and high-speed operation.
+ *
+ * Module options:
+ *
+ *	file=filename[,filename...]
+ *				Required if "removable" is not set, names of
+ *					the files or block devices used for
+ *					backing storage
+ *	ro=b[,b...]		Default false, booleans for read-only access
+ *	removable		Default false, boolean for removable media
+ *	luns=N			Default N = number of filenames, number of
+ *					LUNs to support
+ *	transport=XXX		Default BBB, transport name (CB, CBI, or BBB)
+ *	protocol=YYY		Default SCSI, protocol name (RBC, 8020 or
+ *					ATAPI, QIC, UFI, 8070, or SCSI;
+ *					also 1 - 6)
+ *	vendor=0xVVVV		Default 0x0525 (NetChip), USB Vendor ID
+ *	product=0xPPPP		Default 0xa4a5 (FSG), USB Product ID
+ *	release=0xRRRR		Override the USB release number (bcdDevice)
+ *	buflen=N		Default N=16384, buffer size used (will be
+ *					rounded down to a multiple of
+ *					PAGE_CACHE_SIZE)
+ *	stall			Default determined according to the type of
+ *					USB device controller (usually true),
+ *					boolean to permit the driver to halt
+ *					bulk endpoints
+ *
+ * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro",
+ * "removable", and "luns" options are available; default values are used
+ * for everything else.
+ *
+ * The pathnames of the backing files and the ro settings are available in
+ * the attribute files "file" and "ro" in the lun<n> subdirectory of the
+ * gadget's sysfs directory.  If the "removable" option is set, writing to
+ * these files will simulate ejecting/loading the medium (writing an empty
+ * line means eject) and adjusting a write-enable tab.  Changes to the ro
+ * setting are not allowed when the medium is loaded.
+ *
+ * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
+ */
+
+
+/*
+ *				Driver Design
+ *
+ * The FSG driver is fairly straightforward.  There is a main kernel
+ * thread that handles most of the work.  Interrupt routines field
+ * callbacks from the controller driver: bulk- and interrupt-request
+ * completion notifications, endpoint-0 events, and disconnect events.
+ * Completion events are passed to the main thread by wakeup calls.  Many
+ * ep0 requests are handled at interrupt time, but SetInterface,
+ * SetConfiguration, and device reset requests are forwarded to the
+ * thread in the form of "exceptions" using SIGUSR1 signals (since they
+ * should interrupt any ongoing file I/O operations).
+ *
+ * The thread's main routine implements the standard command/data/status
+ * parts of a SCSI interaction.  It and its subroutines are full of tests
+ * for pending signals/exceptions -- all this polling is necessary since
+ * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
+ * indication that the driver really wants to be running in userspace.)
+ * An important point is that so long as the thread is alive it keeps an
+ * open reference to the backing file.  This will prevent unmounting
+ * the backing file's underlying filesystem and could cause problems
+ * during system shutdown, for example.  To prevent such problems, the
+ * thread catches INT, TERM, and KILL signals and converts them into
+ * an EXIT exception.
+ *
+ * In normal operation the main thread is started during the gadget's
+ * fsg_bind() callback and stopped during fsg_unbind().  But it can also
+ * exit when it receives a signal, and there's no point leaving the
+ * gadget running when the thread is dead.  So just before the thread
+ * exits, it deregisters the gadget driver.  This makes things a little
+ * tricky: The driver is deregistered at two places, and the exiting
+ * thread can indirectly call fsg_unbind() which in turn can tell the
+ * thread to exit.  The first problem is resolved through the use of the
+ * REGISTERED atomic bitflag; the driver will only be deregistered once.
+ * The second problem is resolved by having fsg_unbind() check
+ * fsg->state; it won't try to stop the thread if the state is already
+ * FSG_STATE_TERMINATED.
+ *
+ * To provide maximum throughput, the driver uses a circular pipeline of
+ * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
+ * arbitrarily long; in practice the benefits don't justify having more
+ * than 2 stages (i.e., double buffering).  But it helps to think of the
+ * pipeline as being a long one.  Each buffer head contains a bulk-in and
+ * a bulk-out request pointer (since the buffer can be used for both
+ * output and input -- directions always are given from the host's
+ * point of view) as well as a pointer to the buffer and various state
+ * variables.
+ *
+ * Use of the pipeline follows a simple protocol.  There is a variable
+ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
+ * At any time that buffer head may still be in use from an earlier
+ * request, so each buffer head has a state variable indicating whether
+ * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
+ * buffer head to be EMPTY, filling the buffer either by file I/O or by
+ * USB I/O (during which the buffer head is BUSY), and marking the buffer
+ * head FULL when the I/O is complete.  Then the buffer will be emptied
+ * (again possibly by USB I/O, during which it is marked BUSY) and
+ * finally marked EMPTY again (possibly by a completion routine).
+ *
+ * A module parameter tells the driver to avoid stalling the bulk
+ * endpoints wherever the transport specification allows.  This is
+ * necessary for some UDCs like the SuperH, which cannot reliably clear a
+ * halt on a bulk endpoint.  However, under certain circumstances the
+ * Bulk-only specification requires a stall.  In such cases the driver
+ * will halt the endpoint and set a flag indicating that it should clear
+ * the halt in software during the next device reset.  Hopefully this
+ * will permit everything to work correctly.  Furthermore, although the
+ * specification allows the bulk-out endpoint to halt when the host sends
+ * too much data, implementing this would cause an unavoidable race.
+ * The driver will always use the "no-stall" approach for OUT transfers.
+ *
+ * One subtle point concerns sending status-stage responses for ep0
+ * requests.  Some of these requests, such as device reset, can involve
+ * interrupting an ongoing file I/O operation, which might take an
+ * arbitrarily long time.  During that delay the host might give up on
+ * the original ep0 request and issue a new one.  When that happens the
+ * driver should not notify the host about completion of the original
+ * request, as the host will no longer be waiting for it.  So the driver
+ * assigns to each ep0 request a unique tag, and it keeps track of the
+ * tag value of the request associated with a long-running exception
+ * (device-reset, interface-change, or configuration-change).  When the
+ * exception handler is finished, the status-stage response is submitted
+ * only if the current ep0 request tag is equal to the exception request
+ * tag.  Thus only the most recently received ep0 request will get a
+ * status-stage response.
+ *
+ * Warning: This driver source file is too long.  It ought to be split up
+ * into a header file plus about 3 separate .c files, to handle the details
+ * of the Gadget, USB Mass Storage, and SCSI protocols.
+ */
+
+
+#undef DEBUG
+#undef VERBOSE
+#undef DUMP_MSGS
+
+#include <linux/config.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/suspend.h>
+#include <linux/utsname.h>
+#include <linux/wait.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include "gadget_chips.h"
+
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_DESC		"File-backed Storage Gadget"
+#define DRIVER_NAME		"g_file_storage"
+#define DRIVER_VERSION		"20 October 2004"
+
+static const char longname[] = DRIVER_DESC;
+static const char shortname[] = DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with any other driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures. */
+#define DRIVER_VENDOR_ID	0x0525	// NetChip
+#define DRIVER_PRODUCT_ID	0xa4a5	// Linux-USB File-backed Storage Gadget
+
+
+/*
+ * This driver assumes self-powered hardware and has no way for users to
+ * trigger remote wakeup.  It uses autoconfiguration to select endpoints
+ * and endpoint addresses.
+ */
+
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(f,level,fmt,args...) \
+	dev_printk(level , &(f)->gadget->dev , fmt , ## args)
+#define yprintk(l,level,fmt,args...) \
+	dev_printk(level , &(l)->dev , fmt , ## args)
+
+#ifdef DEBUG
+#define DBG(fsg,fmt,args...) \
+	xprintk(fsg , KERN_DEBUG , fmt , ## args)
+#define LDBG(lun,fmt,args...) \
+	yprintk(lun , KERN_DEBUG , fmt , ## args)
+#define MDBG(fmt,args...) \
+	printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
+#else
+#define DBG(fsg,fmt,args...) \
+	do { } while (0)
+#define LDBG(lun,fmt,args...) \
+	do { } while (0)
+#define MDBG(fmt,args...) \
+	do { } while (0)
+#undef VERBOSE
+#undef DUMP_MSGS
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDBG	DBG
+#define VLDBG	LDBG
+#else
+#define VDBG(fsg,fmt,args...) \
+	do { } while (0)
+#define VLDBG(lun,fmt,args...) \
+	do { } while (0)
+#endif /* VERBOSE */
+
+#define ERROR(fsg,fmt,args...) \
+	xprintk(fsg , KERN_ERR , fmt , ## args)
+#define LERROR(lun,fmt,args...) \
+	yprintk(lun , KERN_ERR , fmt , ## args)
+
+#define WARN(fsg,fmt,args...) \
+	xprintk(fsg , KERN_WARNING , fmt , ## args)
+#define LWARN(lun,fmt,args...) \
+	yprintk(lun , KERN_WARNING , fmt , ## args)
+
+#define INFO(fsg,fmt,args...) \
+	xprintk(fsg , KERN_INFO , fmt , ## args)
+#define LINFO(lun,fmt,args...) \
+	yprintk(lun , KERN_INFO , fmt , ## args)
+
+#define MINFO(fmt,args...) \
+	printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Encapsulate the module parameter settings */
+
+#define MAX_LUNS	8
+
+	/* Arggh!  There should be a module_param_array_named macro! */
+static char		*file[MAX_LUNS] = {NULL, };
+static int		ro[MAX_LUNS] = {0, };
+
+static struct {
+	int		num_filenames;
+	int		num_ros;
+	unsigned int	nluns;
+
+	char		*transport_parm;
+	char		*protocol_parm;
+	int		removable;
+	unsigned short	vendor;
+	unsigned short	product;
+	unsigned short	release;
+	unsigned int	buflen;
+	int		can_stall;
+
+	int		transport_type;
+	char		*transport_name;
+	int		protocol_type;
+	char		*protocol_name;
+
+} mod_data = {					// Default values
+	.transport_parm		= "BBB",
+	.protocol_parm		= "SCSI",
+	.removable		= 0,
+	.vendor			= DRIVER_VENDOR_ID,
+	.product		= DRIVER_PRODUCT_ID,
+	.release		= 0xffff,	// Use controller chip type
+	.buflen			= 16384,
+	.can_stall		= 1,
+	};
+
+
+module_param_array(file, charp, &mod_data.num_filenames, S_IRUGO);
+MODULE_PARM_DESC(file, "names of backing files or devices");
+
+module_param_array(ro, bool, &mod_data.num_ros, S_IRUGO);
+MODULE_PARM_DESC(ro, "true to force read-only");
+
+module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
+MODULE_PARM_DESC(luns, "number of LUNs");
+
+module_param_named(removable, mod_data.removable, bool, S_IRUGO);
+MODULE_PARM_DESC(removable, "true to simulate removable media");
+
+
+/* In the non-TEST version, only the module parameters listed above
+ * are available. */
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+
+module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO);
+MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
+
+module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO);
+MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
+		"8070, or SCSI)");
+
+module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
+MODULE_PARM_DESC(vendor, "USB Vendor ID");
+
+module_param_named(product, mod_data.product, ushort, S_IRUGO);
+MODULE_PARM_DESC(product, "USB Product ID");
+
+module_param_named(release, mod_data.release, ushort, S_IRUGO);
+MODULE_PARM_DESC(release, "USB release number");
+
+module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
+MODULE_PARM_DESC(buflen, "I/O buffer size");
+
+module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
+MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
+
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* USB protocol value = the transport method */
+#define USB_PR_CBI	0x00		// Control/Bulk/Interrupt
+#define USB_PR_CB	0x01		// Control/Bulk w/o interrupt
+#define USB_PR_BULK	0x50		// Bulk-only
+
+/* USB subclass value = the protocol encapsulation */
+#define USB_SC_RBC	0x01		// Reduced Block Commands (flash)
+#define USB_SC_8020	0x02		// SFF-8020i, MMC-2, ATAPI (CD-ROM)
+#define USB_SC_QIC	0x03		// QIC-157 (tape)
+#define USB_SC_UFI	0x04		// UFI (floppy)
+#define USB_SC_8070	0x05		// SFF-8070i (removable)
+#define USB_SC_SCSI	0x06		// Transparent SCSI
+
+/* Bulk-only data structures */
+
+/* Command Block Wrapper */
+struct bulk_cb_wrap {
+	__le32	Signature;		// Contains 'USBC'
+	u32	Tag;			// Unique per command id
+	__le32	DataTransferLength;	// Size of the data
+	u8	Flags;			// Direction in bit 7
+	u8	Lun;			// LUN (normally 0)
+	u8	Length;			// Of the CDB, <= MAX_COMMAND_SIZE
+	u8	CDB[16];		// Command Data Block
+};
+
+#define USB_BULK_CB_WRAP_LEN	31
+#define USB_BULK_CB_SIG		0x43425355	// Spells out USBC
+#define USB_BULK_IN_FLAG	0x80
+
+/* Command Status Wrapper */
+struct bulk_cs_wrap {
+	__le32	Signature;		// Should = 'USBS'
+	u32	Tag;			// Same as original command
+	__le32	Residue;		// Amount not transferred
+	u8	Status;			// See below
+};
+
+#define USB_BULK_CS_WRAP_LEN	13
+#define USB_BULK_CS_SIG		0x53425355	// Spells out 'USBS'
+#define USB_STATUS_PASS		0
+#define USB_STATUS_FAIL		1
+#define USB_STATUS_PHASE_ERROR	2
+
+/* Bulk-only class specific requests */
+#define USB_BULK_RESET_REQUEST		0xff
+#define USB_BULK_GET_MAX_LUN_REQUEST	0xfe
+
+
+/* CBI Interrupt data structure */
+struct interrupt_data {
+	u8	bType;
+	u8	bValue;
+};
+
+#define CBI_INTERRUPT_DATA_LEN		2
+
+/* CBI Accept Device-Specific Command request */
+#define USB_CBI_ADSC_REQUEST		0x00
+
+
+#define MAX_COMMAND_SIZE	16	// Length of a SCSI Command Data Block
+
+/* SCSI commands that we recognize */
+#define SC_FORMAT_UNIT			0x04
+#define SC_INQUIRY			0x12
+#define SC_MODE_SELECT_6		0x15
+#define SC_MODE_SELECT_10		0x55
+#define SC_MODE_SENSE_6			0x1a
+#define SC_MODE_SENSE_10		0x5a
+#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL	0x1e
+#define SC_READ_6			0x08
+#define SC_READ_10			0x28
+#define SC_READ_12			0xa8
+#define SC_READ_CAPACITY		0x25
+#define SC_READ_FORMAT_CAPACITIES	0x23
+#define SC_RELEASE			0x17
+#define SC_REQUEST_SENSE		0x03
+#define SC_RESERVE			0x16
+#define SC_SEND_DIAGNOSTIC		0x1d
+#define SC_START_STOP_UNIT		0x1b
+#define SC_SYNCHRONIZE_CACHE		0x35
+#define SC_TEST_UNIT_READY		0x00
+#define SC_VERIFY			0x2f
+#define SC_WRITE_6			0x0a
+#define SC_WRITE_10			0x2a
+#define SC_WRITE_12			0xaa
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	// Sense Key byte, etc.
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * These definitions will permit the compiler to avoid generating code for
+ * parts of the driver that aren't used in the non-TEST version.  Even gcc
+ * can recognize when a test of a constant expression yields a dead code
+ * path.
+ */
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+
+#define transport_is_bbb()	(mod_data.transport_type == USB_PR_BULK)
+#define transport_is_cbi()	(mod_data.transport_type == USB_PR_CBI)
+#define protocol_is_scsi()	(mod_data.protocol_type == USB_SC_SCSI)
+
+#else
+
+#define transport_is_bbb()	1
+#define transport_is_cbi()	0
+#define protocol_is_scsi()	1
+
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+struct lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	ro : 1;
+	unsigned int	prevent_medium_removal : 1;
+	unsigned int	registered : 1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	struct device	dev;
+};
+
+#define backing_file_is_open(curlun)	((curlun)->filp != NULL)
+
+static inline struct lun *dev_to_lun(struct device *dev)
+{
+	return container_of(dev, struct lun, dev);
+}
+
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	// An impossibly large value
+
+/* Number of buffers we will use.  2 is enough for double-buffering */
+#define NUM_BUFFERS	2
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+	void				*buf;
+	dma_addr_t			dma;
+	volatile enum fsg_buffer_state	state;
+	struct fsg_buffhd		*next;
+
+	/* The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here. */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	volatile int			inreq_busy;
+	struct usb_request		*outreq;
+	volatile int			outreq_busy;
+};
+
+enum fsg_state {
+	FSG_STATE_COMMAND_PHASE = -10,		// This one isn't used anywhere
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_INTERFACE_CHANGE,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_DISCONNECT,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+
+struct fsg_dev {
+	/* lock protects: state, all the req_busy's, and cbbuf_cmnd */
+	spinlock_t		lock;
+	struct usb_gadget	*gadget;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	struct usb_ep		*ep0;		// Handy copy of gadget->ep0
+	struct usb_request	*ep0req;	// For control responses
+	volatile unsigned int	ep0_req_tag;
+	const char		*ep0req_name;
+
+	struct usb_request	*intreq;	// For interrupt responses
+	volatile int		intreq_busy;
+	struct fsg_buffhd	*intr_buffhd;
+
+ 	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		// For exception handling
+	unsigned int		exception_req_tag;
+
+	u8			config, new_config;
+
+	unsigned int		running : 1;
+	unsigned int		bulk_in_enabled : 1;
+	unsigned int		bulk_out_enabled : 1;
+	unsigned int		intr_in_enabled : 1;
+	unsigned int		phase_error : 1;
+	unsigned int		short_packet_received : 1;
+	unsigned int		bad_lun_okay : 1;
+
+	unsigned long		atomic_bitflags;
+#define REGISTERED		0
+#define CLEAR_BULK_HALTS	1
+#define SUSPENDED		2
+
+	struct usb_ep		*bulk_in;
+	struct usb_ep		*bulk_out;
+	struct usb_ep		*intr_in;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	buffhds[NUM_BUFFERS];
+
+	wait_queue_head_t	thread_wqh;
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	int			thread_pid;
+	struct task_struct	*thread_task;
+	sigset_t		thread_signal_mask;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	unsigned int		lun;
+	u32			residue;
+	u32			usb_amount_left;
+
+	/* The CB protocol offers no way for a host to know when a command
+	 * has completed.  As a result the next command may arrive early,
+	 * and we will still have to handle it.  For that reason we need
+	 * a buffer to store new commands when using CB (or CBI, which
+	 * does not oblige a host to wait for command completion either). */
+	int			cbbuf_cmnd_size;
+	u8			cbbuf_cmnd[MAX_COMMAND_SIZE];
+
+	unsigned int		nluns;
+	struct lun		*luns;
+	struct lun		*curlun;
+	struct completion	lun_released;
+};
+
+typedef void (*fsg_routine_t)(struct fsg_dev *);
+
+static int inline exception_in_progress(struct fsg_dev *fsg)
+{
+	return (fsg->state > FSG_STATE_IDLE);
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void inline set_bulk_out_req_length(struct fsg_dev *fsg,
+		struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % fsg->bulk_out_maxpacket;
+	if (rem > 0)
+		length += fsg->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+static struct fsg_dev			*the_fsg;
+static struct usb_gadget_driver		fsg_driver;
+
+static void	close_backing_file(struct lun *curlun);
+static void	close_all_backing_files(struct fsg_dev *fsg);
+
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef DUMP_MSGS
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{
+	unsigned int	start, num, i;
+	char		line[52], *p;
+
+	if (length >= 512)
+		return;
+	DBG(fsg, "%s, length %u:\n", label, length);
+
+	start = 0;
+	while (length > 0) {
+		num = min(length, 16u);
+		p = line;
+		for (i = 0; i < num; ++i) {
+			if (i == 8)
+				*p++ = ' ';
+			sprintf(p, " %02x", buf[i]);
+			p += 3;
+		}
+		*p = 0;
+		printk(KERN_DEBUG "%6x: %s\n", start, line);
+		buf += num;
+		start += num;
+		length -= num;
+	}
+}
+
+static void inline dump_cdb(struct fsg_dev *fsg)
+{}
+
+#else
+
+static void inline dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{}
+
+static void inline dump_cdb(struct fsg_dev *fsg)
+{
+	int	i;
+	char	cmdbuf[3*MAX_COMMAND_SIZE + 1];
+
+	for (i = 0; i < fsg->cmnd_size; ++i)
+		sprintf(cmdbuf + i*3, " %02x", fsg->cmnd[i]);
+	VDBG(fsg, "SCSI CDB: %s\n", cmdbuf);
+}
+
+#endif /* DUMP_MSGS */
+
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+{
+	const char	*name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		name = ep->name;
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ep_set_halt(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Routines for unaligned data access */
+
+static u16 inline get_be16(u8 *buf)
+{
+	return ((u16) buf[0] << 8) | ((u16) buf[1]);
+}
+
+static u32 inline get_be32(u8 *buf)
+{
+	return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
+			((u32) buf[2] << 8) | ((u32) buf[3]);
+}
+
+static void inline put_be16(u8 *buf, u16 val)
+{
+	buf[0] = val >> 8;
+	buf[1] = val;
+}
+
+static void inline put_be32(u8 *buf, u32 val)
+{
+	buf[0] = val >> 24;
+	buf[1] = val >> 16;
+	buf[2] = val >> 8;
+	buf[3] = val;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * DESCRIPTORS ... most are static, but strings and (full) configuration
+ * descriptors are built on demand.  Also the (static) config and interface
+ * descriptors are adjusted during fsg_bind().
+ */
+#define STRING_MANUFACTURER	1
+#define STRING_PRODUCT		2
+#define STRING_SERIAL		3
+#define STRING_CONFIG		4
+#define STRING_INTERFACE	5
+
+/* There is only one configuration. */
+#define	CONFIG_VALUE		1
+
+static struct usb_device_descriptor
+device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		__constant_cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+
+	/* The next three values can be overridden by module parameters */
+	.idVendor =		__constant_cpu_to_le16(DRIVER_VENDOR_ID),
+	.idProduct =		__constant_cpu_to_le16(DRIVER_PRODUCT_ID),
+	.bcdDevice =		__constant_cpu_to_le16(0xffff),
+
+	.iManufacturer =	STRING_MANUFACTURER,
+	.iProduct =		STRING_PRODUCT,
+	.iSerialNumber =	STRING_SERIAL,
+	.bNumConfigurations =	1,
+};
+
+static struct usb_config_descriptor
+config_desc = {
+	.bLength =		sizeof config_desc,
+	.bDescriptorType =	USB_DT_CONFIG,
+
+	/* wTotalLength computed by usb_gadget_config_buf() */
+	.bNumInterfaces =	1,
+	.bConfigurationValue =	CONFIG_VALUE,
+	.iConfiguration =	STRING_CONFIG,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		1,	// self-powered
+};
+
+static struct usb_otg_descriptor
+otg_desc = {
+	.bLength =		sizeof(otg_desc),
+	.bDescriptorType =	USB_DT_OTG,
+
+	.bmAttributes =		USB_OTG_SRP,
+};
+
+/* There is only one interface. */
+
+static struct usb_interface_descriptor
+intf_desc = {
+	.bLength =		sizeof intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,		// Adjusted during fsg_bind()
+	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =	USB_SC_SCSI,	// Adjusted during fsg_bind()
+	.bInterfaceProtocol =	USB_PR_BULK,	// Adjusted during fsg_bind()
+	.iInterface =		STRING_INTERFACE,
+};
+
+/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
+ * and interrupt-in. */
+
+static struct usb_endpoint_descriptor
+fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+static struct usb_endpoint_descriptor
+fs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(2),
+	.bInterval =		32,	// frames -> 32 ms
+};
+
+static const struct usb_descriptor_header *fs_function[] = {
+	(struct usb_descriptor_header *) &otg_desc,
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	(struct usb_descriptor_header *) &fs_intr_in_desc,
+	NULL,
+};
+#define FS_FUNCTION_PRE_EP_ENTRIES	2
+
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+
+/*
+ * USB 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ *
+ * That means alternate endpoint descriptors (bigger packets)
+ * and a "device qualifier" ... plus more construction options
+ * for the config descriptor.
+ */
+static struct usb_qualifier_descriptor
+dev_qualifier = {
+	.bLength =		sizeof dev_qualifier,
+	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
+
+	.bcdUSB =		__constant_cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+
+	.bNumConfigurations =	1,
+};
+
+static struct usb_endpoint_descriptor
+hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		1,	// NAK every 1 uframe
+};
+
+static struct usb_endpoint_descriptor
+hs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(2),
+	.bInterval =		9,	// 2**(9-1) = 256 uframes -> 32 ms
+};
+
+static const struct usb_descriptor_header *hs_function[] = {
+	(struct usb_descriptor_header *) &otg_desc,
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	(struct usb_descriptor_header *) &hs_intr_in_desc,
+	NULL,
+};
+#define HS_FUNCTION_PRE_EP_ENTRIES	2
+
+/* Maxpacket and other transfer characteristics vary by speed. */
+#define ep_desc(g,fs,hs)	(((g)->speed==USB_SPEED_HIGH) ? (hs) : (fs))
+
+#else
+
+/* If there's no high speed support, always use the full-speed descriptor. */
+#define ep_desc(g,fs,hs)	fs
+
+#endif	/* !CONFIG_USB_GADGET_DUALSPEED */
+
+
+/* The CBI specification limits the serial string to 12 uppercase hexadecimal
+ * characters. */
+static char				manufacturer[64];
+static char				serial[13];
+
+/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
+static struct usb_string		strings[] = {
+	{STRING_MANUFACTURER,	manufacturer},
+	{STRING_PRODUCT,	longname},
+	{STRING_SERIAL,		serial},
+	{STRING_CONFIG,		"Self-powered"},
+	{STRING_INTERFACE,	"Mass Storage"},
+	{}
+};
+
+static struct usb_gadget_strings	stringtab = {
+	.language	= 0x0409,		// en-us
+	.strings	= strings,
+};
+
+
+/*
+ * Config descriptors must agree with the code that sets configurations
+ * and with code managing interfaces and their altsettings.  They must
+ * also handle different speeds and other-speed requests.
+ */
+static int populate_config_buf(struct usb_gadget *gadget,
+		u8 *buf, u8 type, unsigned index)
+{
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	enum usb_device_speed			speed = gadget->speed;
+#endif
+	int					len;
+	const struct usb_descriptor_header	**function;
+
+	if (index > 0)
+		return -EINVAL;
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	if (type == USB_DT_OTHER_SPEED_CONFIG)
+		speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
+	if (speed == USB_SPEED_HIGH)
+		function = hs_function;
+	else
+#endif
+		function = fs_function;
+
+	/* for now, don't advertise srp-only devices */
+	if (!gadget->is_otg)
+		function++;
+
+	len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
+	((struct usb_config_descriptor *) buf)->bDescriptorType = type;
+	return len;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+static void wakeup_thread(struct fsg_dev *fsg)
+{
+	/* Tell the main thread that something has happened */
+	fsg->thread_wakeup_needed = 1;
+	wake_up_all(&fsg->thread_wqh);
+}
+
+
+static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
+{
+	unsigned long		flags;
+	struct task_struct	*thread_task;
+
+	/* Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal. */
+	spin_lock_irqsave(&fsg->lock, flags);
+	if (fsg->state <= new_state) {
+		fsg->exception_req_tag = fsg->ep0_req_tag;
+		fsg->state = new_state;
+		thread_task = fsg->thread_task;
+		if (thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED, thread_task);
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* The disconnect callback and ep0 routines.  These always run in_irq,
+ * except that ep0_queue() is called in the main thread to acknowledge
+ * completion of various requests: set config, set interface, and
+ * Bulk-only device reset. */
+
+static void fsg_disconnect(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+
+	DBG(fsg, "disconnect or port reset\n");
+	raise_exception(fsg, FSG_STATE_DISCONNECT);
+}
+
+
+static int ep0_queue(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
+	if (rc != 0 && rc != -ESHUTDOWN) {
+
+		/* We can't do much more than wait for a reset */
+		WARN(fsg, "error in submission: %s --> %d\n",
+				fsg->ep0->name, rc);
+	}
+	return rc;
+}
+
+static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = (struct fsg_dev *) ep->driver_data;
+
+	if (req->actual > 0)
+		dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	if (req->status == 0 && req->context)
+		((fsg_routine_t) (req->context))(fsg);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = (struct fsg_dev *) ep->driver_data;
+	struct fsg_buffhd	*bh = (struct fsg_buffhd *) req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	spin_lock(&fsg->lock);
+	bh->inreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	spin_unlock(&fsg->lock);
+	wakeup_thread(fsg);
+}
+
+static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = (struct fsg_dev *) ep->driver_data;
+	struct fsg_buffhd	*bh = (struct fsg_buffhd *) req->context;
+
+	dump_msg(fsg, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
+				req->status, req->actual,
+				bh->bulk_out_intended_length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	spin_lock(&fsg->lock);
+	bh->outreq_busy = 0;
+	bh->state = BUF_STATE_FULL;
+	spin_unlock(&fsg->lock);
+	wakeup_thread(fsg);
+}
+
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = (struct fsg_dev *) ep->driver_data;
+	struct fsg_buffhd	*bh = (struct fsg_buffhd *) req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	spin_lock(&fsg->lock);
+	fsg->intreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	spin_unlock(&fsg->lock);
+	wakeup_thread(fsg);
+}
+
+#else
+static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
+{}
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Ep0 class-specific handlers.  These always run in_irq. */
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = fsg->ep0req;
+	static u8		cbi_reset_cmnd[6] = {
+			SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
+
+	/* Error in command transfer? */
+	if (req->status || req->length != req->actual ||
+			req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
+
+		/* Not all controllers allow a protocol stall after
+		 * receiving control-out data, but we'll try anyway. */
+		fsg_set_halt(fsg, fsg->ep0);
+		return;			// Wait for reset
+	}
+
+	/* Is it the special reset command? */
+	if (req->actual >= sizeof cbi_reset_cmnd &&
+			memcmp(req->buf, cbi_reset_cmnd,
+				sizeof cbi_reset_cmnd) == 0) {
+
+		/* Raise an exception to stop the current operation
+		 * and reinitialize our state. */
+		DBG(fsg, "cbi reset request\n");
+		raise_exception(fsg, FSG_STATE_RESET);
+		return;
+	}
+
+	VDBG(fsg, "CB[I] accept device-specific command\n");
+	spin_lock(&fsg->lock);
+
+	/* Save the command for later */
+	if (fsg->cbbuf_cmnd_size)
+		WARN(fsg, "CB[I] overwriting previous command\n");
+	fsg->cbbuf_cmnd_size = req->actual;
+	memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
+
+	spin_unlock(&fsg->lock);
+	wakeup_thread(fsg);
+}
+
+#else
+static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{}
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+static int class_setup_req(struct fsg_dev *fsg,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_request	*req = fsg->ep0req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = ctrl->wIndex;
+	u16			w_length = ctrl->wLength;
+
+	if (!fsg->config)
+		return value;
+
+	/* Handle Bulk-only class-specific requests */
+	if (transport_is_bbb()) {
+		switch (ctrl->bRequest) {
+
+		case USB_BULK_RESET_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_index != 0) {
+				value = -EDOM;
+				break;
+			}
+
+			/* Raise an exception to stop the current operation
+			 * and reinitialize our state. */
+			DBG(fsg, "bulk reset request\n");
+			raise_exception(fsg, FSG_STATE_RESET);
+			value = DELAYED_STATUS;
+			break;
+
+		case USB_BULK_GET_MAX_LUN_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_IN |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_index != 0) {
+				value = -EDOM;
+				break;
+			}
+			VDBG(fsg, "get max LUN\n");
+			*(u8 *) req->buf = fsg->nluns - 1;
+			value = min(w_length, (u16) 1);
+			break;
+		}
+	}
+
+	/* Handle CBI class-specific requests */
+	else {
+		switch (ctrl->bRequest) {
+
+		case USB_CBI_ADSC_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_index != 0) {
+				value = -EDOM;
+				break;
+			}
+			if (w_length > MAX_COMMAND_SIZE) {
+				value = -EOVERFLOW;
+				break;
+			}
+			value = w_length;
+			fsg->ep0req->context = received_cbi_adsc;
+			break;
+		}
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(fsg,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			ctrl->wValue, w_index, w_length);
+	return value;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Ep0 standard request handlers.  These always run in_irq. */
+
+static int standard_setup_req(struct fsg_dev *fsg,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_request	*req = fsg->ep0req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = ctrl->wIndex;
+	u16			w_value = ctrl->wValue;
+	u16			w_length = ctrl->wLength;
+
+	/* Usually this just stores reply data in the pre-allocated ep0 buffer,
+	 * but config change events will also reconfigure hardware. */
+	switch (ctrl->bRequest) {
+
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
+				USB_RECIP_DEVICE))
+			break;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			VDBG(fsg, "get device descriptor\n");
+			value = min(w_length, (u16) sizeof device_desc);
+			memcpy(req->buf, &device_desc, value);
+			break;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+		case USB_DT_DEVICE_QUALIFIER:
+			VDBG(fsg, "get device qualifier\n");
+			if (!fsg->gadget->is_dualspeed)
+				break;
+			value = min(w_length, (u16) sizeof dev_qualifier);
+			memcpy(req->buf, &dev_qualifier, value);
+			break;
+
+		case USB_DT_OTHER_SPEED_CONFIG:
+			VDBG(fsg, "get other-speed config descriptor\n");
+			if (!fsg->gadget->is_dualspeed)
+				break;
+			goto get_config;
+#endif
+		case USB_DT_CONFIG:
+			VDBG(fsg, "get configuration descriptor\n");
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+		get_config:
+#endif
+			value = populate_config_buf(fsg->gadget,
+					req->buf,
+					w_value >> 8,
+					w_value & 0xff);
+			if (value >= 0)
+				value = min(w_length, (u16) value);
+			break;
+
+		case USB_DT_STRING:
+			VDBG(fsg, "get string descriptor\n");
+
+			/* wIndex == language code */
+			value = usb_gadget_get_string(&stringtab,
+					w_value & 0xff, req->buf);
+			if (value >= 0)
+				value = min(w_length, (u16) value);
+			break;
+		}
+		break;
+
+	/* One config, two speeds */
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
+				USB_RECIP_DEVICE))
+			break;
+		VDBG(fsg, "set configuration\n");
+		if (w_value == CONFIG_VALUE || w_value == 0) {
+			fsg->new_config = w_value;
+
+			/* Raise an exception to wipe out previous transaction
+			 * state (queued bufs, etc) and set the new config. */
+			raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
+			value = DELAYED_STATUS;
+		}
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
+				USB_RECIP_DEVICE))
+			break;
+		VDBG(fsg, "get configuration\n");
+		*(u8 *) req->buf = fsg->config;
+		value = min(w_length, (u16) 1);
+		break;
+
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
+				USB_RECIP_INTERFACE))
+			break;
+		if (fsg->config && w_index == 0) {
+
+			/* Raise an exception to wipe out previous transaction
+			 * state (queued bufs, etc) and install the new
+			 * interface altsetting. */
+			raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
+			value = DELAYED_STATUS;
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
+				USB_RECIP_INTERFACE))
+			break;
+		if (!fsg->config)
+			break;
+		if (w_index != 0) {
+			value = -EDOM;
+			break;
+		}
+		VDBG(fsg, "get interface\n");
+		*(u8 *) req->buf = 0;
+		value = min(w_length, (u16) 1);
+		break;
+
+	default:
+		VDBG(fsg,
+			"unknown control req %02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	return value;
+}
+
+
+static int fsg_setup(struct usb_gadget *gadget,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+	int			rc;
+
+	++fsg->ep0_req_tag;		// Record arrival of a new request
+	fsg->ep0req->context = NULL;
+	fsg->ep0req->length = 0;
+	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
+		rc = class_setup_req(fsg, ctrl);
+	else
+		rc = standard_setup_req(fsg, ctrl);
+
+	/* Respond with data/status or defer until later? */
+	if (rc >= 0 && rc != DELAYED_STATUS) {
+		fsg->ep0req->length = rc;
+		fsg->ep0req->zero = (rc < ctrl->wLength &&
+				(rc % gadget->ep0->maxpacket) == 0);
+		fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
+				"ep0-in" : "ep0-out");
+		rc = ep0_queue(fsg);
+	}
+
+	/* Device either stalls (rc < 0) or reports success */
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+		struct usb_request *req, volatile int *pbusy,
+		volatile enum fsg_buffer_state *state)
+{
+	int	rc;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+	else if (ep == fsg->intr_in)
+		dump_msg(fsg, "intr-in", req->buf, req->length);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	rc = usb_ep_queue(ep, req, GFP_KERNEL);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/* Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled. */
+		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+						req->length == 0))
+			WARN(fsg, "error in submission: %s --> %d\n",
+					ep->name, rc);
+	}
+}
+
+
+static int sleep_thread(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	/* Wait until a signal arrives or we are woken up */
+	rc = wait_event_interruptible(fsg->thread_wqh,
+			fsg->thread_wakeup_needed);
+	fsg->thread_wakeup_needed = 0;
+	if (current->flags & PF_FREEZE)
+		refrigerator(PF_FREEZE);
+	return (rc ? -EINTR : 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_READ_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Carry out the file reads */
+	amount_left = fsg->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		// No default reply
+
+	for (;;) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 * Finally, if we're not at a page boundary, don't read past
+		 *	the next page.
+		 * If this means reading 0 then we were asked to read past
+		 *	the end of file. */
+		amount = min((unsigned int) amount_left, mod_data.buflen);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
+		if (partial_page > 0)
+			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
+					partial_page);
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			if ((rc = sleep_thread(fsg)) != 0)
+				return rc;
+		}
+
+		/* If we were asked to read past the end of file,
+		 * end with an empty buffer. */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	// Round down to a block
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		fsg->residue -= nread;
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		// No more left to read
+
+		/* Send this buffer and go read some more */
+		bh->inreq->zero = 0;
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		fsg->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		// No default reply
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nwritten;
+	int			rc;
+
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	curlun->filp->f_flags &= ~O_SYNC;	// Default is not to wait
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_WRITE_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (fsg->cmnd[1] & 0x08)	// FUA
+			curlun->filp->f_flags |= O_SYNC;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << 9;
+	amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/* Figure out how much we want to get:
+			 * Try to get the remaining amount.
+			 * But don't get more than the buffer size.
+			 * And don't try to go past the end of the file.
+			 * If we're not at a page boundary,
+			 *	don't go past the next page.
+			 * If this means getting 0, then we were asked
+			 *	to write past the end of file.
+			 * Finally, round down to a block boundary. */
+			amount = min(amount_left_to_req, mod_data.buflen);
+			amount = min((loff_t) amount, curlun->file_length -
+					usb_offset);
+			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
+			if (partial_page > 0)
+				amount = min(amount,
+	(unsigned int) PAGE_CACHE_SIZE - partial_page);
+
+			if (amount == 0) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info = usb_offset >> 9;
+				continue;
+			}
+			amount -= (amount & 511);
+			if (amount == 0) {
+
+				/* Why were we were asked to transfer a
+				 * partial block? */
+				get_some_more = 0;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			fsg->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = fsg->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			// We stopped early
+		if (bh->state == BUF_STATE_FULL) {
+			fsg->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info = file_offset >> 9;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+	"write %u @ %llu beyond end %llu\n",
+	amount, (unsigned long long) file_offset,
+	(unsigned long long) curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					(char __user *) bh->buf,
+					amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+					(unsigned long long) file_offset,
+					(int) nwritten);
+			if (signal_pending(current))
+				return -EINTR;		// Interrupted!
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+						(int) nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+						(int) nwritten, amount);
+				nwritten -= (nwritten & 511);
+						// Round down to a block
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			fsg->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info = file_offset >> 9;
+				break;
+			}
+
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual != bh->outreq->length) {
+				fsg->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		if ((rc = sleep_thread(fsg)) != 0)
+			return rc;
+	}
+
+	return -EIO;		// No default reply
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Sync the file data, don't bother with the metadata.
+ * This code was copied from fs/buffer.c:sys_fdatasync(). */
+static int fsync_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode;
+	int		rc, err;
+
+	if (curlun->ro || !filp)
+		return 0;
+	if (!filp->f_op->fsync)
+		return -EINVAL;
+
+	inode = filp->f_dentry->d_inode;
+	down(&inode->i_sem);
+	current->flags |= PF_SYNCWRITE;
+	rc = filemap_fdatawrite(inode->i_mapping);
+	err = filp->f_op->fsync(filp, filp->f_dentry, 1);
+	if (!rc)
+		rc = err;
+	err = filemap_fdatawait(inode->i_mapping);
+	if (!rc)
+		rc = err;
+	current->flags &= ~PF_SYNCWRITE;
+	up(&inode->i_sem);
+	VLDBG(curlun, "fdatasync -> %d\n", rc);
+	return rc;
+}
+
+static void fsync_all(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		fsync_sub(&fsg->luns[i]);
+}
+
+static int do_synchronize_cache(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_inode_pages(inode->i_mapping);
+	VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	lba = get_be32(&fsg->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it. */
+	if ((fsg->cmnd[1] & ~0x10) != 0) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_be16(&fsg->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		// No default reply
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << 9;
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 * If this means reading 0 then we were asked to read
+		 * past the end of file. */
+		amount = min((unsigned int) amount_left, mod_data.buflen);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	// Round down to a sector
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	u8	*buf = (u8 *) bh->buf;
+
+	static char vendor_id[] = "Linux   ";
+	static char product_id[] = "File-Stor Gadget";
+
+	if (!fsg->curlun) {		// Unsupported LUNs are okay
+		fsg->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		// Unsupported, no device-type
+		return 36;
+	}
+
+	memset(buf, 0, 8);	// Non-removable, direct-access device
+	if (mod_data.removable)
+		buf[1] = 0x80;
+	buf[2] = 2;		// ANSI SCSI level 2
+	buf[3] = 2;		// SCSI-2 INQUIRY data format
+	buf[4] = 31;		// Additional length
+				// No special options
+	sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, product_id,
+			mod_data.release);
+	return 36;
+}
+
+
+static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		// Unsupported LUNs are okay
+		fsg->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = 0x80 | 0x70;			// Valid, current error
+	buf[2] = SK(sd);
+	put_be32(&buf[3], sdinfo);		// Sense information
+	buf[7] = 18 - 8;			// Additional sense length
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+
+static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u32		lba = get_be32(&fsg->cmnd[2]);
+	int		pmi = fsg->cmnd[8];
+	u8		*buf = (u8 *) bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_be32(&buf[0], curlun->num_sectors - 1);	// Max logical block
+	put_be32(&buf[4], 512);				// Block length
+	return 8;
+}
+
+
+static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		mscmnd = fsg->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((fsg->cmnd[1] & ~0x08) != 0) {		// Mask away DBD
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = fsg->cmnd[2] >> 6;
+	page_code = fsg->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/* Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later. */
+	memset(buf, 0, 8);
+	if (mscmnd == SC_MODE_SENSE_6) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		// WP, DPOFUA
+		buf += 4;
+		limit = 255;
+	} else {			// SC_MODE_SENSE_10
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		// WP, DPOFUA
+		buf += 8;
+		limit = 65535;		// Should really be mod_data.buflen
+	}
+
+	/* No block descriptors */
+
+	/* The mode pages, in numerical order.  The only page we support
+	 * is the Caching page. */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		// Page code
+		buf[1] = 10;		// Page length
+		memset(buf+2, 0, 10);	// None of the fields are changeable
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	// Write cache enable,
+					// Read cache not disabled
+					// No cache retention priorities
+			put_be16(&buf[4], 0xffff);  // Don't disable prefetch
+					// Minimum prefetch = 0
+			put_be16(&buf[8], 0xffff);  // Maximum prefetch
+			put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling
+		}
+		buf += 12;
+	}
+
+	/* Check that a valid page was requested and the mode data length
+	 * isn't too long. */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == SC_MODE_SENSE_6)
+		buf0[0] = len - 1;
+	else
+		put_be16(buf0, len - 2);
+	return len;
+}
+
+
+static int do_start_stop(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		loej, start;
+
+	if (!mod_data.removable) {
+		curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	// int immed = fsg->cmnd[1] & 0x01;
+	loej = fsg->cmnd[4] & 0x02;
+	start = fsg->cmnd[4] & 0x01;
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+	if ((fsg->cmnd[1] & ~0x01) != 0 ||		// Mask away Immed
+			(fsg->cmnd[4] & ~0x03) != 0) {	// Mask LoEj, Start
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (!start) {
+
+		/* Are we allowed to unload the media? */
+		if (curlun->prevent_medium_removal) {
+			LDBG(curlun, "unload attempt prevented\n");
+			curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+			return -EINVAL;
+		}
+		if (loej) {		// Simulate an unload/eject
+			up_read(&fsg->filesem);
+			down_write(&fsg->filesem);
+			close_backing_file(curlun);
+			up_write(&fsg->filesem);
+			down_read(&fsg->filesem);
+		}
+	} else {
+
+		/* Our emulation doesn't support mounting; the medium is
+		 * available for use as soon as it is loaded. */
+		if (!backing_file_is_open(curlun)) {
+			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+			return -EINVAL;
+		}
+	}
+#endif
+	return 0;
+}
+
+
+static int do_prevent_allow(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		prevent;
+
+	if (!mod_data.removable) {
+		curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	prevent = fsg->cmnd[4] & 0x01;
+	if ((fsg->cmnd[4] & ~0x01) != 0) {		// Mask away Prevent
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_dev *fsg,
+			struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;		// Only the Current/Maximum Capacity Descriptor
+	buf += 4;
+
+	put_be32(&buf[0], curlun->num_sectors);		// Number of blocks
+	put_be32(&buf[4], 512);				// Block length
+	buf[4] = 0x02;					// Current capacity
+	return 12;
+}
+
+
+static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+
+	/* We don't support MODE SELECT */
+	curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARN(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int pad_with_zeros(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	u32			nkeep = bh->inreq->length;
+	u32			nsend;
+	int			rc;
+
+	bh->state = BUF_STATE_EMPTY;		// For the first iteration
+	fsg->usb_amount_left = nkeep + fsg->residue;
+	while (fsg->usb_amount_left > 0) {
+
+		/* Wait for the next buffer to be free */
+		while (bh->state != BUF_STATE_EMPTY) {
+			if ((rc = sleep_thread(fsg)) != 0)
+				return rc;
+		}
+
+		nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen);
+		memset(bh->buf + nkeep, 0, nsend - nkeep);
+		bh->inreq->length = nsend;
+		bh->inreq->zero = 0;
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		bh = fsg->next_buffhd_to_fill = bh->next;
+		fsg->usb_amount_left -= nsend;
+		nkeep = 0;
+	}
+	return 0;
+}
+
+static int throw_away_data(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
+			fsg->usb_amount_left > 0) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			bh->state = BUF_STATE_EMPTY;
+			fsg->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual != bh->outreq->length ||
+					bh->outreq->status != 0) {
+				raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
+			amount = min(fsg->usb_amount_left,
+					(u32) mod_data.buflen);
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			fsg->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		if ((rc = sleep_thread(fsg)) != 0)
+			return rc;
+	}
+	return 0;
+}
+
+
+static int finish_reply(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	int			rc = 0;
+
+	switch (fsg->data_dir) {
+	case DATA_DIR_NONE:
+		break;			// Nothing to send
+
+	/* If we don't know whether the host wants to read or write,
+	 * this must be CB or CBI with an unknown command.  We mustn't
+	 * try to send or receive any data.  So stall both bulk pipes
+	 * if we can and wait for a reset. */
+	case DATA_DIR_UNKNOWN:
+		if (mod_data.can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			rc = halt_bulk_in_endpoint(fsg);
+		}
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (fsg->data_size == 0)
+			;		// Nothing to send
+
+		/* If there's no residue, simply send the last buffer */
+		else if (fsg->residue == 0) {
+			bh->inreq->zero = 0;
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+		}
+
+		/* There is a residue.  For CB and CBI, simply mark the end
+		 * of the data with a short packet.  However, if we are
+		 * allowed to stall, there was no data at all (residue ==
+		 * data_size), and the command failed (invalid LUN or
+		 * sense data is set), then halt the bulk-in endpoint
+		 * instead. */
+		else if (!transport_is_bbb()) {
+			if (mod_data.can_stall &&
+					fsg->residue == fsg->data_size &&
+	(!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
+				bh->state = BUF_STATE_EMPTY;
+				rc = halt_bulk_in_endpoint(fsg);
+			} else {
+				bh->inreq->zero = 1;
+				start_transfer(fsg, fsg->bulk_in, bh->inreq,
+						&bh->inreq_busy, &bh->state);
+				fsg->next_buffhd_to_fill = bh->next;
+			}
+		}
+
+		/* For Bulk-only, if we're allowed to stall then send the
+		 * short packet and halt the bulk-in endpoint.  If we can't
+		 * stall, pad out the remaining data with 0's. */
+		else {
+			if (mod_data.can_stall) {
+				bh->inreq->zero = 1;
+				start_transfer(fsg, fsg->bulk_in, bh->inreq,
+						&bh->inreq_busy, &bh->state);
+				fsg->next_buffhd_to_fill = bh->next;
+				rc = halt_bulk_in_endpoint(fsg);
+			} else
+				rc = pad_with_zeros(fsg);
+		}
+		break;
+
+	/* We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests. */
+	case DATA_DIR_FROM_HOST:
+		if (fsg->residue == 0)
+			;		// Nothing to receive
+
+		/* Did the host stop sending unexpectedly early? */
+		else if (fsg->short_packet_received) {
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+
+		/* We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on. */
+#if 0
+		else if (mod_data.can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+#endif
+
+		/* We can't stall.  Read in the excess data and throw it
+		 * all away. */
+		else
+			rc = throw_away_data(fsg);
+		break;
+	}
+	return rc;
+}
+
+
+static int send_status(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u8			status = USB_STATUS_PASS;
+	u32			sd, sdinfo = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		if ((rc = sleep_thread(fsg)) != 0)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (fsg->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (fsg->phase_error) {
+		DBG(fsg, "sending phase-error status\n");
+		status = USB_STATUS_PHASE_ERROR;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(fsg, "sending command-failure status\n");
+		status = USB_STATUS_FAIL;
+		VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	if (transport_is_bbb()) {
+		struct bulk_cs_wrap	*csw = (struct bulk_cs_wrap *) bh->buf;
+
+		/* Store and send the Bulk-only CSW */
+		csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
+		csw->Tag = fsg->tag;
+		csw->Residue = cpu_to_le32(fsg->residue);
+		csw->Status = status;
+
+		bh->inreq->length = USB_BULK_CS_WRAP_LEN;
+		bh->inreq->zero = 0;
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+
+	} else if (mod_data.transport_type == USB_PR_CB) {
+
+		/* Control-Bulk transport has no status phase! */
+		return 0;
+
+	} else {			// USB_PR_CBI
+		struct interrupt_data	*buf = (struct interrupt_data *)
+						bh->buf;
+
+		/* Store and send the Interrupt data.  UFI sends the ASC
+		 * and ASCQ bytes.  Everything else sends a Type (which
+		 * is always 0) and the status Value. */
+		if (mod_data.protocol_type == USB_SC_UFI) {
+			buf->bType = ASC(sd);
+			buf->bValue = ASCQ(sd);
+		} else {
+			buf->bType = 0;
+			buf->bValue = status;
+		}
+		fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
+
+		fsg->intr_buffhd = bh;		// Point to the right buffhd
+		fsg->intreq->buf = bh->inreq->buf;
+		fsg->intreq->dma = bh->inreq->dma;
+		fsg->intreq->context = bh;
+		start_transfer(fsg, fsg->intr_in, fsg->intreq,
+				&fsg->intreq_busy, &bh->state);
+	}
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_dev *fsg, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = fsg->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct lun		*curlun;
+
+	/* Adjust the expected cmnd_size for protocol encapsulation padding.
+	 * Transparent SCSI doesn't pad. */
+	if (protocol_is_scsi())
+		;
+
+	/* There's some disagreement as to whether RBC pads commands or not.
+	 * We'll play it safe and accept either form. */
+	else if (mod_data.protocol_type == USB_SC_RBC) {
+		if (fsg->cmnd_size == 12)
+			cmnd_size = 12;
+
+	/* All the other protocols pad to 12 bytes */
+	} else
+		cmnd_size = 12;
+
+	hdlen[0] = 0;
+	if (fsg->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
+				fsg->data_size);
+	VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+			name, cmnd_size, dirletter[(int) data_dir],
+			fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
+
+	/* We can't reply at all until we know the correct data direction
+	 * and size. */
+	if (fsg->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (fsg->data_dir == DATA_DIR_UNKNOWN) {	// CB or CBI
+		fsg->data_dir = data_dir;
+		fsg->data_size = fsg->data_size_from_cmnd;
+
+	} else {					// Bulk-only
+		if (fsg->data_size < fsg->data_size_from_cmnd) {
+
+			/* Host data size < Device data size is a phase error.
+			 * Carry out the command, but only transfer as much
+			 * as we are allowed. */
+			fsg->data_size_from_cmnd = fsg->data_size;
+			fsg->phase_error = 1;
+		}
+	}
+	fsg->residue = fsg->usb_amount_left = fsg->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
+		fsg->phase_error = 1;
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != fsg->cmnd_size) {
+
+		/* Special case workaround: MS-Windows issues REQUEST SENSE
+		 * with cbw->Length == 12 (it should be 6). */
+		if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
+			cmnd_size = fsg->cmnd_size;
+		else {
+			fsg->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are oonsistent */
+	if (transport_is_bbb()) {
+		if (fsg->lun != lun)
+			DBG(fsg, "using LUN %d from CBW, "
+					"not LUN %d from CDB\n",
+					fsg->lun, lun);
+	} else
+		fsg->lun = lun;		// Use LUN from the command
+
+	/* Check the LUN */
+	if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
+		fsg->curlun = curlun = &fsg->luns[fsg->lun];
+		if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+		}
+	} else {
+		fsg->curlun = curlun = NULL;
+		fsg->bad_lun_okay = 0;
+
+		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not. */
+		if (fsg->cmnd[0] != SC_INQUIRY &&
+				fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			DBG(fsg, "unsupported LUN %d\n", fsg->lun);
+			return -EINVAL;
+		}
+	}
+
+	/* If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+			fsg->cmnd[0] != SC_INQUIRY &&
+			fsg->cmnd[0] != SC_REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	fsg->cmnd[1] &= 0x1f;			// Mask away the LUN
+	for (i = 1; i < cmnd_size; ++i) {
+		if (fsg->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !backing_file_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int do_scsi_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(fsg);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		if ((rc = sleep_thread(fsg)) != 0)
+			return rc;
+		}
+	fsg->phase_error = 0;
+	fsg->short_packet_received = 0;
+
+	down_read(&fsg->filesem);	// We're using the backing file
+	switch (fsg->cmnd[0]) {
+
+	case SC_INQUIRY:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"INQUIRY")) == 0)
+			reply = do_inquiry(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(1<<1) | (1<<4), 0,
+				"MODE SELECT(6)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (3<<7), 0,
+				"MODE SELECT(10)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (1<<4), 0,
+				"MODE SENSE(6)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (3<<7), 0,
+				"MODE SENSE(10)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<4), 0,
+				"PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
+			reply = do_prevent_allow(fsg);
+		break;
+
+	case SC_READ_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(7<<1) | (1<<4), 1,
+				"READ(6)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"READ(10)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"READ(12)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_CAPACITY:
+		fsg->data_size_from_cmnd = 8;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(0xf<<2) | (1<<8), 1,
+				"READ CAPACITY")) == 0)
+			reply = do_read_capacity(fsg, bh);
+		break;
+
+	case SC_READ_FORMAT_CAPACITIES:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7), 1,
+				"READ FORMAT CAPACITIES")) == 0)
+			reply = do_read_format_capacities(fsg, bh);
+		break;
+
+	case SC_REQUEST_SENSE:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"REQUEST SENSE")) == 0)
+			reply = do_request_sense(fsg, bh);
+		break;
+
+	case SC_START_STOP_UNIT:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<1) | (1<<4), 0,
+				"START-STOP UNIT")) == 0)
+			reply = do_start_stop(fsg);
+		break;
+
+	case SC_SYNCHRONIZE_CACHE:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(0xf<<2) | (3<<7), 1,
+				"SYNCHRONIZE CACHE")) == 0)
+			reply = do_synchronize_cache(fsg);
+		break;
+
+	case SC_TEST_UNIT_READY:
+		fsg->data_size_from_cmnd = 0;
+		reply = check_command(fsg, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/* Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0. */
+	case SC_VERIFY:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"VERIFY")) == 0)
+			reply = do_verify(fsg);
+		break;
+
+	case SC_WRITE_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(7<<1) | (1<<4), 1,
+				"WRITE(6)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"WRITE(10)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"WRITE(12)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	/* Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks. */
+	case SC_FORMAT_UNIT:
+	case SC_RELEASE:
+	case SC_RESERVE:
+	case SC_SEND_DIAGNOSTIC:
+		// Fall through
+
+	default:
+		fsg->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
+		if ((reply = check_command(fsg, fsg->cmnd_size,
+				DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+			fsg->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&fsg->filesem);
+
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		// Error reply length
+	if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32) reply, fsg->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		fsg->residue -= reply;
+	}				// Otherwise it's already set
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = (struct bulk_cb_wrap *) req->buf;
+
+	/* Was this a real packet? */
+	if (req->status)
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != USB_BULK_CB_WRAP_LEN ||
+			cbw->Signature != __constant_cpu_to_le32(
+				USB_BULK_CB_SIG)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+
+		/* The Bulk-only spec says we MUST stall the bulk pipes!
+		 * If we want to avoid stalls, set a flag so that we will
+		 * clear the endpoint halts at the next reset. */
+		if (!mod_data.can_stall)
+			set_bit(CLEAR_BULK_HALTS, &fsg->atomic_bitflags);
+		fsg_set_halt(fsg, fsg->bulk_out);
+		halt_bulk_in_endpoint(fsg);
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+			cbw->Length < 6 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+
+		/* We can do anything we want here, so let's stall the
+		 * bulk pipes if we are allowed to. */
+		if (mod_data.can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			halt_bulk_in_endpoint(fsg);
+		}
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	fsg->cmnd_size = cbw->Length;
+	memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
+	if (cbw->Flags & USB_BULK_IN_FLAG)
+		fsg->data_dir = DATA_DIR_TO_HOST;
+	else
+		fsg->data_dir = DATA_DIR_FROM_HOST;
+	fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (fsg->data_size == 0)
+		fsg->data_dir = DATA_DIR_NONE;
+	fsg->lun = cbw->Lun;
+	fsg->tag = cbw->Tag;
+	return 0;
+}
+
+
+static int get_next_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	if (transport_is_bbb()) {
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			if ((rc = sleep_thread(fsg)) != 0)
+				return rc;
+			}
+
+		/* Queue a request to read a Bulk-only CBW */
+		set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
+		start_transfer(fsg, fsg->bulk_out, bh->outreq,
+				&bh->outreq_busy, &bh->state);
+
+		/* We will drain the buffer in software, which means we
+		 * can reuse it for the next filling.  No need to advance
+		 * next_buffhd_to_fill. */
+
+		/* Wait for the CBW to arrive */
+		while (bh->state != BUF_STATE_FULL) {
+			if ((rc = sleep_thread(fsg)) != 0)
+				return rc;
+			}
+		rc = received_cbw(fsg, bh);
+		bh->state = BUF_STATE_EMPTY;
+
+	} else {		// USB_PR_CB or USB_PR_CBI
+
+		/* Wait for the next command to arrive */
+		while (fsg->cbbuf_cmnd_size == 0) {
+			if ((rc = sleep_thread(fsg)) != 0)
+				return rc;
+			}
+
+		/* Is the previous status interrupt request still busy?
+		 * The host is allowed to skip reading the status,
+		 * so we must cancel it. */
+		if (fsg->intreq_busy)
+			usb_ep_dequeue(fsg->intr_in, fsg->intreq);
+
+		/* Copy the command and mark the buffer empty */
+		fsg->data_dir = DATA_DIR_UNKNOWN;
+		spin_lock_irq(&fsg->lock);
+		fsg->cmnd_size = fsg->cbbuf_cmnd_size;
+		memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
+		fsg->cbbuf_cmnd_size = 0;
+		spin_unlock_irq(&fsg->lock);
+	}
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
+		const struct usb_endpoint_descriptor *d)
+{
+	int	rc;
+
+	ep->driver_data = fsg;
+	rc = usb_ep_enable(ep, d);
+	if (rc)
+		ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
+	return rc;
+}
+
+static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (*preq)
+		return 0;
+	ERROR(fsg, "can't allocate request for %s\n", ep->name);
+	return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface.  The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_dev *fsg, int altsetting)
+{
+	int	rc = 0;
+	int	i;
+	const struct usb_endpoint_descriptor	*d;
+
+	if (fsg->running)
+		DBG(fsg, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd *bh = &fsg->buffhds[i];
+
+		if (bh->inreq) {
+			usb_ep_free_request(fsg->bulk_in, bh->inreq);
+			bh->inreq = NULL;
+		}
+		if (bh->outreq) {
+			usb_ep_free_request(fsg->bulk_out, bh->outreq);
+			bh->outreq = NULL;
+		}
+	}
+	if (fsg->intreq) {
+		usb_ep_free_request(fsg->intr_in, fsg->intreq);
+		fsg->intreq = NULL;
+	}
+
+	/* Disable the endpoints */
+	if (fsg->bulk_in_enabled) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+	}
+	if (fsg->bulk_out_enabled) {
+		usb_ep_disable(fsg->bulk_out);
+		fsg->bulk_out_enabled = 0;
+	}
+	if (fsg->intr_in_enabled) {
+		usb_ep_disable(fsg->intr_in);
+		fsg->intr_in_enabled = 0;
+	}
+
+	fsg->running = 0;
+	if (altsetting < 0 || rc != 0)
+		return rc;
+
+	DBG(fsg, "set interface %d\n", altsetting);
+
+	/* Enable the endpoints */
+	d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
+	if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
+		goto reset;
+	fsg->bulk_in_enabled = 1;
+
+	d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
+	if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
+		goto reset;
+	fsg->bulk_out_enabled = 1;
+	fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+
+	if (transport_is_cbi()) {
+		d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
+		if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
+			goto reset;
+		fsg->intr_in_enabled = 1;
+	}
+
+	/* Allocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
+			goto reset;
+		if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->dma = bh->outreq->dma = bh->dma;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+	if (transport_is_cbi()) {
+		if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
+			goto reset;
+		fsg->intreq->complete = intr_in_complete;
+	}
+
+	fsg->running = 1;
+	for (i = 0; i < fsg->nluns; ++i)
+		fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+	return rc;
+}
+
+
+/*
+ * Change our operational configuration.  This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions.  Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_dev *fsg, u8 new_config)
+{
+	int	rc = 0;
+
+	/* Disable the single interface */
+	if (fsg->config != 0) {
+		DBG(fsg, "reset config\n");
+		fsg->config = 0;
+		rc = do_set_interface(fsg, -1);
+	}
+
+	/* Enable the interface */
+	if (new_config != 0) {
+		fsg->config = new_config;
+		if ((rc = do_set_interface(fsg, 0)) != 0)
+			fsg->config = 0;	// Reset on errors
+		else {
+			char *speed;
+
+			switch (fsg->gadget->speed) {
+			case USB_SPEED_LOW:	speed = "low";	break;
+			case USB_SPEED_FULL:	speed = "full";	break;
+			case USB_SPEED_HIGH:	speed = "high";	break;
+			default: 		speed = "?";	break;
+			}
+			INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
+		}
+	}
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_dev *fsg)
+{
+	siginfo_t		info;
+	int			sig;
+	int			i;
+	int			num_active;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	u8			new_config;
+	struct lun		*curlun;
+	unsigned int		exception_req_tag;
+	int			rc;
+
+	/* Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception. */
+	for (;;) {
+		sig = dequeue_signal_lock(current, &fsg->thread_signal_mask,
+				&info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (fsg->state < FSG_STATE_EXIT)
+				DBG(fsg, "Main thread exiting on signal\n");
+			raise_exception(fsg, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Cancel all the pending transfers */
+	if (fsg->intreq_busy)
+		usb_ep_dequeue(fsg->intr_in, fsg->intreq);
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		bh = &fsg->buffhds[i];
+		if (bh->inreq_busy)
+			usb_ep_dequeue(fsg->bulk_in, bh->inreq);
+		if (bh->outreq_busy)
+			usb_ep_dequeue(fsg->bulk_out, bh->outreq);
+	}
+
+	/* Wait until everything is idle */
+	for (;;) {
+		num_active = fsg->intreq_busy;
+		for (i = 0; i < NUM_BUFFERS; ++i) {
+			bh = &fsg->buffhds[i];
+			num_active += bh->inreq_busy + bh->outreq_busy;
+		}
+		if (num_active == 0)
+			break;
+		if (sleep_thread(fsg))
+			return;
+	}
+
+	/* Clear out the controller's fifos */
+	if (fsg->bulk_in_enabled)
+		usb_ep_fifo_flush(fsg->bulk_in);
+	if (fsg->bulk_out_enabled)
+		usb_ep_fifo_flush(fsg->bulk_out);
+	if (fsg->intr_in_enabled)
+		usb_ep_fifo_flush(fsg->intr_in);
+
+	/* Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler. */
+	spin_lock_irq(&fsg->lock);
+
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		bh = &fsg->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
+			&fsg->buffhds[0];
+
+	exception_req_tag = fsg->exception_req_tag;
+	new_config = fsg->new_config;
+	old_state = fsg->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		fsg->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < fsg->nluns; ++i) {
+			curlun = &fsg->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = curlun->unit_attention_data =
+					SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+		}
+		fsg->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irq(&fsg->lock);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	default:
+		break;
+
+	case FSG_STATE_ABORT_BULK_OUT:
+		send_status(fsg);
+		spin_lock_irq(&fsg->lock);
+		if (fsg->state == FSG_STATE_STATUS_PHASE)
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&fsg->lock);
+		break;
+
+	case FSG_STATE_RESET:
+		/* In case we were forced against our will to halt a
+		 * bulk endpoint, clear the halt now.  (The SuperH UDC
+		 * requires this.) */
+		if (test_and_clear_bit(CLEAR_BULK_HALTS,
+				&fsg->atomic_bitflags)) {
+			usb_ep_clear_halt(fsg->bulk_in);
+			usb_ep_clear_halt(fsg->bulk_out);
+		}
+
+		if (transport_is_bbb()) {
+			if (fsg->ep0_req_tag == exception_req_tag)
+				ep0_queue(fsg);	// Complete the status stage
+
+		} else if (transport_is_cbi())
+			send_status(fsg);	// Status by interrupt pipe
+
+		/* Technically this should go here, but it would only be
+		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
+		 * CONFIG_CHANGE cases. */
+		// for (i = 0; i < fsg->nluns; ++i)
+		//	fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+		break;
+
+	case FSG_STATE_INTERFACE_CHANGE:
+		rc = do_set_interface(fsg, 0);
+		if (fsg->ep0_req_tag != exception_req_tag)
+			break;
+		if (rc != 0)			// STALL on errors
+			fsg_set_halt(fsg, fsg->ep0);
+		else				// Complete the status stage
+			ep0_queue(fsg);
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		rc = do_set_config(fsg, new_config);
+		if (fsg->ep0_req_tag != exception_req_tag)
+			break;
+		if (rc != 0)			// STALL on errors
+			fsg_set_halt(fsg, fsg->ep0);
+		else				// Complete the status stage
+			ep0_queue(fsg);
+		break;
+
+	case FSG_STATE_DISCONNECT:
+		fsync_all(fsg);
+		do_set_config(fsg, 0);		// Unconfigured state
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_config(fsg, 0);			// Free resources
+		spin_lock_irq(&fsg->lock);
+		fsg->state = FSG_STATE_TERMINATED;	// Stop the thread
+		spin_unlock_irq(&fsg->lock);
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *fsg_)
+{
+	struct fsg_dev		*fsg = (struct fsg_dev *) fsg_;
+
+	fsg->thread_task = current;
+
+	/* Release all our userspace resources */
+	daemonize("file-storage-gadget");
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	siginitsetinv(&fsg->thread_signal_mask, sigmask(SIGINT) |
+			sigmask(SIGTERM) | sigmask(SIGKILL) |
+			sigmask(SIGUSR1));
+	sigprocmask(SIG_SETMASK, &fsg->thread_signal_mask, NULL);
+
+	/* Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay. */
+	set_fs(get_ds());
+
+	/* Wait for the gadget registration to finish up */
+	wait_for_completion(&fsg->thread_notifier);
+
+	/* The main loop */
+	while (fsg->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(fsg) || signal_pending(current)) {
+			handle_exception(fsg);
+			continue;
+		}
+
+		if (!fsg->running) {
+			sleep_thread(fsg);
+			continue;
+		}
+
+		if (get_next_command(fsg))
+			continue;
+
+		spin_lock_irq(&fsg->lock);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irq(&fsg->lock);
+
+		if (do_scsi_command(fsg) || finish_reply(fsg))
+			continue;
+
+		spin_lock_irq(&fsg->lock);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irq(&fsg->lock);
+
+		if (send_status(fsg))
+			continue;
+
+		spin_lock_irq(&fsg->lock);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&fsg->lock);
+		}
+
+	fsg->thread_task = NULL;
+	flush_signals(current);
+
+	/* In case we are exiting because of a signal, unregister the
+	 * gadget driver and close the backing file. */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) {
+		usb_gadget_unregister_driver(&fsg_driver);
+		close_all_backing_files(fsg);
+	}
+
+	/* Let the unbind and cleanup routines know the thread has exited */
+	complete_and_exit(&fsg->thread_notifier, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing. */
+
+static int open_backing_file(struct lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (-EROFS == PTR_ERR(filp))
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	if (filp->f_dentry)
+		inode = filp->f_dentry->d_inode;
+	if (inode && S_ISBLK(inode->i_mode)) {
+		if (bdev_read_only(inode->i_bdev))
+			ro = 1;
+	} else if (!inode || !S_ISREG(inode->i_mode)) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/* If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only. */
+	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+	num_sectors = size >> 9;	// File size in 512-byte sectors
+	if (num_sectors == 0) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	get_file(filp);
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	LDBG(curlun, "open backing file: %s\n", filename);
+	rc = 0;
+
+out:
+	filp_close(filp, current->files);
+	return rc;
+}
+
+
+static void close_backing_file(struct lun *curlun)
+{
+	if (curlun->filp) {
+		LDBG(curlun, "close backing file\n");
+		fput(curlun->filp);
+		curlun->filp = NULL;
+	}
+}
+
+static void close_all_backing_files(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		close_backing_file(&fsg->luns[i]);
+}
+
+
+static ssize_t show_ro(struct device *dev, char *buf)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+
+	return sprintf(buf, "%d\n", curlun->ro);
+}
+
+static ssize_t show_file(struct device *dev, char *buf)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = (struct fsg_dev *) dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {	// Get the complete pathname
+		p = d_path(curlun->filp->f_dentry, curlun->filp->f_vfsmnt,
+				buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		// Add a newline
+			buf[++rc] = 0;
+		}
+	} else {				// No file, return 0 bytes
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(&fsg->filesem);
+	return rc;
+}
+
+
+static ssize_t store_ro(struct device *dev, const char *buf, size_t count)
+{
+	ssize_t		rc = count;
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = (struct fsg_dev *) dev_get_drvdata(dev);
+	int		i;
+
+	if (sscanf(buf, "%d", &i) != 1)
+		return -EINVAL;
+
+	/* Allow the write-enable status to change only while the backing file
+	 * is closed. */
+	down_read(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {
+		LDBG(curlun, "read-only status change prevented\n");
+		rc = -EBUSY;
+	} else {
+		curlun->ro = !!i;
+		LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+	}
+	up_read(&fsg->filesem);
+	return rc;
+}
+
+static ssize_t store_file(struct device *dev, const char *buf, size_t count)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = (struct fsg_dev *) dev_get_drvdata(dev);
+	int		rc = 0;
+
+	if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
+		LDBG(curlun, "eject attempt prevented\n");
+		return -EBUSY;				// "Door is locked"
+	}
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;		// Ugh!
+
+	/* Eject current medium */
+	down_write(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {
+		close_backing_file(curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+
+	/* Load new medium */
+	if (count > 0 && buf[0]) {
+		rc = open_backing_file(curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	}
+	up_write(&fsg->filesem);
+	return (rc < 0 ? rc : count);
+}
+
+
+/* The write permissions and store_xxx pointers are set in fsg_bind() */
+static DEVICE_ATTR(ro, 0444, show_ro, NULL);
+static DEVICE_ATTR(file, 0444, show_file, NULL);
+
+
+/*-------------------------------------------------------------------------*/
+
+static void lun_release(struct device *dev)
+{
+	struct fsg_dev	*fsg = (struct fsg_dev *) dev_get_drvdata(dev);
+
+	complete(&fsg->lun_released);
+}
+
+static void fsg_unbind(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+	int			i;
+	struct lun		*curlun;
+	struct usb_request	*req = fsg->ep0req;
+
+	DBG(fsg, "unbind\n");
+	clear_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Unregister the sysfs attribute files and the LUNs */
+	init_completion(&fsg->lun_released);
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (curlun->registered) {
+			device_remove_file(&curlun->dev, &dev_attr_ro);
+			device_remove_file(&curlun->dev, &dev_attr_file);
+			device_unregister(&curlun->dev);
+			wait_for_completion(&fsg->lun_released);
+			curlun->registered = 0;
+		}
+	}
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (fsg->state != FSG_STATE_TERMINATED) {
+		raise_exception(fsg, FSG_STATE_EXIT);
+		wait_for_completion(&fsg->thread_notifier);
+
+		/* The cleanup routine waits for this completion also */
+		complete(&fsg->thread_notifier);
+	}
+
+	/* Free the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		if (bh->buf)
+			usb_ep_free_buffer(fsg->bulk_in, bh->buf, bh->dma,
+					mod_data.buflen);
+	}
+
+	/* Free the request and buffer for endpoint 0 */
+	if (req) {
+		if (req->buf)
+			usb_ep_free_buffer(fsg->ep0, req->buf,
+					req->dma, EP0_BUFSIZE);
+		usb_ep_free_request(fsg->ep0, req);
+	}
+
+	set_gadget_data(gadget, NULL);
+}
+
+
+static int __init check_parameters(struct fsg_dev *fsg)
+{
+	int	prot;
+
+	/* Store the default values */
+	mod_data.transport_type = USB_PR_BULK;
+	mod_data.transport_name = "Bulk-only";
+	mod_data.protocol_type = USB_SC_SCSI;
+	mod_data.protocol_name = "Transparent SCSI";
+
+	if (gadget_is_sh(fsg->gadget))
+		mod_data.can_stall = 0;
+
+	if (mod_data.release == 0xffff) {	// Parameter wasn't set
+		if (gadget_is_net2280(fsg->gadget))
+			mod_data.release = 0x0301;
+		else if (gadget_is_dummy(fsg->gadget))
+			mod_data.release = 0x0302;
+		else if (gadget_is_pxa(fsg->gadget))
+			mod_data.release = 0x0303;
+		else if (gadget_is_sh(fsg->gadget))
+			mod_data.release = 0x0304;
+
+		/* The sa1100 controller is not supported */
+
+		else if (gadget_is_goku(fsg->gadget))
+			mod_data.release = 0x0306;
+		else if (gadget_is_mq11xx(fsg->gadget))
+			mod_data.release = 0x0307;
+		else if (gadget_is_omap(fsg->gadget))
+			mod_data.release = 0x0308;
+		else if (gadget_is_lh7a40x(fsg->gadget))
+			mod_data.release = 0x0309;
+		else if (gadget_is_n9604(fsg->gadget))
+			mod_data.release = 0x0310;
+		else if (gadget_is_pxa27x(fsg->gadget))
+			mod_data.release = 0x0311;
+		else if (gadget_is_s3c2410(gadget))
+			mod_data.release = 0x0312;
+		else if (gadget_is_at91(fsg->gadget))
+			mod_data.release = 0x0313;
+		else {
+			WARN(fsg, "controller '%s' not recognized\n",
+				fsg->gadget->name);
+			mod_data.release = 0x0399;
+		}
+	}
+
+	prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+	if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
+		;		// Use default setting
+	} else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
+		mod_data.transport_type = USB_PR_CB;
+		mod_data.transport_name = "Control-Bulk";
+	} else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
+		mod_data.transport_type = USB_PR_CBI;
+		mod_data.transport_name = "Control-Bulk-Interrupt";
+	} else {
+		ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
+		return -EINVAL;
+	}
+
+	if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
+			prot == USB_SC_SCSI) {
+		;		// Use default setting
+	} else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
+			prot == USB_SC_RBC) {
+		mod_data.protocol_type = USB_SC_RBC;
+		mod_data.protocol_name = "RBC";
+	} else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
+			strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
+			prot == USB_SC_8020) {
+		mod_data.protocol_type = USB_SC_8020;
+		mod_data.protocol_name = "8020i (ATAPI)";
+	} else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
+			prot == USB_SC_QIC) {
+		mod_data.protocol_type = USB_SC_QIC;
+		mod_data.protocol_name = "QIC-157";
+	} else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
+			prot == USB_SC_UFI) {
+		mod_data.protocol_type = USB_SC_UFI;
+		mod_data.protocol_name = "UFI";
+	} else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
+			prot == USB_SC_8070) {
+		mod_data.protocol_type = USB_SC_8070;
+		mod_data.protocol_name = "8070i";
+	} else {
+		ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
+		return -EINVAL;
+	}
+
+	mod_data.buflen &= PAGE_CACHE_MASK;
+	if (mod_data.buflen <= 0) {
+		ERROR(fsg, "invalid buflen\n");
+		return -ETOOSMALL;
+	}
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+	return 0;
+}
+
+
+static int __init fsg_bind(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	int			rc;
+	int			i;
+	struct lun		*curlun;
+	struct usb_ep		*ep;
+	struct usb_request	*req;
+	char			*pathbuf, *p;
+
+	fsg->gadget = gadget;
+	set_gadget_data(gadget, fsg);
+	fsg->ep0 = gadget->ep0;
+	fsg->ep0->driver_data = fsg;
+
+	if ((rc = check_parameters(fsg)) != 0)
+		goto out;
+
+	if (mod_data.removable) {	// Enable the store_xxx attributes
+		dev_attr_ro.attr.mode = dev_attr_file.attr.mode = 0644;
+		dev_attr_ro.store = store_ro;
+		dev_attr_file.store = store_file;
+	}
+
+	/* Find out how many LUNs there should be */
+	i = mod_data.nluns;
+	if (i == 0)
+		i = max(mod_data.num_filenames, 1);
+	if (i > MAX_LUNS) {
+		ERROR(fsg, "invalid number of LUNs: %d\n", i);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs. */
+	fsg->luns = kmalloc(i * sizeof(struct lun), GFP_KERNEL);
+	if (!fsg->luns) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	memset(fsg->luns, 0, i * sizeof(struct lun));
+	fsg->nluns = i;
+
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		curlun->ro = ro[i];
+		curlun->dev.parent = &gadget->dev;
+		curlun->dev.driver = &fsg_driver.driver;
+		dev_set_drvdata(&curlun->dev, fsg);
+		snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
+				"%s-lun%d", gadget->dev.bus_id, i);
+
+		if ((rc = device_register(&curlun->dev)) != 0)
+			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
+		else {
+			curlun->registered = 1;
+			curlun->dev.release = lun_release;
+			device_create_file(&curlun->dev, &dev_attr_ro);
+			device_create_file(&curlun->dev, &dev_attr_file);
+		}
+
+		if (file[i] && *file[i]) {
+			if ((rc = open_backing_file(curlun, file[i])) != 0)
+				goto out;
+		} else if (!mod_data.removable) {
+			ERROR(fsg, "no file given for LUN%d\n", i);
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* Find all the endpoints we will use */
+	usb_ep_autoconfig_reset(gadget);
+	ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg;		// claim the endpoint
+	fsg->bulk_in = ep;
+
+	ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg;		// claim the endpoint
+	fsg->bulk_out = ep;
+
+	if (transport_is_cbi()) {
+		ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc);
+		if (!ep)
+			goto autoconf_fail;
+		ep->driver_data = fsg;		// claim the endpoint
+		fsg->intr_in = ep;
+	}
+
+	/* Fix up the descriptors */
+	device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
+	device_desc.idVendor = cpu_to_le16(mod_data.vendor);
+	device_desc.idProduct = cpu_to_le16(mod_data.product);
+	device_desc.bcdDevice = cpu_to_le16(mod_data.release);
+
+	i = (transport_is_cbi() ? 3 : 2);	// Number of endpoints
+	intf_desc.bNumEndpoints = i;
+	intf_desc.bInterfaceSubClass = mod_data.protocol_type;
+	intf_desc.bInterfaceProtocol = mod_data.transport_type;
+	fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+
+	/* Assume ep0 uses the same maxpacket value for both speeds */
+	dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
+
+	/* Assume that all endpoint addresses are the same for both speeds */
+	hs_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress;
+	hs_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress;
+	hs_intr_in_desc.bEndpointAddress = fs_intr_in_desc.bEndpointAddress;
+#endif
+
+	if (gadget->is_otg) {
+		otg_desc.bmAttributes |= USB_OTG_HNP,
+		config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	rc = -ENOMEM;
+
+	/* Allocate the request and buffer for endpoint 0 */
+	fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
+	if (!req)
+		goto out;
+	req->buf = usb_ep_alloc_buffer(fsg->ep0, EP0_BUFSIZE,
+			&req->dma, GFP_KERNEL);
+	if (!req->buf)
+		goto out;
+	req->complete = ep0_complete;
+
+	/* Allocate the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		bh->buf = usb_ep_alloc_buffer(fsg->bulk_in, mod_data.buflen,
+				&bh->dma, GFP_KERNEL);
+		if (!bh->buf)
+			goto out;
+		bh->next = bh + 1;
+	}
+	fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+
+	/* This should reflect the actual gadget power source */
+	usb_gadget_set_selfpowered(gadget);
+
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+			system_utsname.sysname, system_utsname.release,
+			gadget->name);
+
+	/* On a real device, serial[] would be loaded from permanent
+	 * storage.  We just encode it from the driver version string. */
+	for (i = 0; i < sizeof(serial) - 2; i += 2) {
+		unsigned char		c = DRIVER_VERSION[i / 2];
+
+		if (!c)
+			break;
+		sprintf(&serial[i], "%02X", c);
+	}
+
+	if ((rc = kernel_thread(fsg_main_thread, fsg, (CLONE_VM | CLONE_FS |
+			CLONE_FILES))) < 0)
+		goto out;
+	fsg->thread_pid = rc;
+
+	INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+	INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (backing_file_is_open(curlun)) {
+			p = NULL;
+			if (pathbuf) {
+				p = d_path(curlun->filp->f_dentry,
+					curlun->filp->f_vfsmnt,
+					pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = NULL;
+			}
+			LINFO(curlun, "ro=%d, file: %s\n",
+					curlun->ro, (p ? p : "(error)"));
+		}
+	}
+	kfree(pathbuf);
+
+	DBG(fsg, "transport=%s (x%02x)\n",
+			mod_data.transport_name, mod_data.transport_type);
+	DBG(fsg, "protocol=%s (x%02x)\n",
+			mod_data.protocol_name, mod_data.protocol_type);
+	DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
+			mod_data.vendor, mod_data.product, mod_data.release);
+	DBG(fsg, "removable=%d, stall=%d, buflen=%u\n",
+			mod_data.removable, mod_data.can_stall,
+			mod_data.buflen);
+	DBG(fsg, "I/O thread pid: %d\n", fsg->thread_pid);
+	return 0;
+
+autoconf_fail:
+	ERROR(fsg, "unable to autoconfigure all endpoints\n");
+	rc = -ENOTSUPP;
+
+out:
+	fsg->state = FSG_STATE_TERMINATED;	// The thread is dead
+	fsg_unbind(gadget);
+	close_all_backing_files(fsg);
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_suspend(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+
+	DBG(fsg, "suspend\n");
+	set_bit(SUSPENDED, &fsg->atomic_bitflags);
+}
+
+static void fsg_resume(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+
+	DBG(fsg, "resume\n");
+	clear_bit(SUSPENDED, &fsg->atomic_bitflags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver		fsg_driver = {
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	.speed		= USB_SPEED_HIGH,
+#else
+	.speed		= USB_SPEED_FULL,
+#endif
+	.function	= (char *) longname,
+	.bind		= fsg_bind,
+	.unbind		= fsg_unbind,
+	.disconnect	= fsg_disconnect,
+	.setup		= fsg_setup,
+	.suspend	= fsg_suspend,
+	.resume		= fsg_resume,
+
+	.driver		= {
+		.name		= (char *) shortname,
+		// .release = ...
+		// .suspend = ...
+		// .resume = ...
+	},
+};
+
+
+static int __init fsg_alloc(void)
+{
+	struct fsg_dev		*fsg;
+
+	fsg = kmalloc(sizeof *fsg, GFP_KERNEL);
+	if (!fsg)
+		return -ENOMEM;
+	memset(fsg, 0, sizeof *fsg);
+	spin_lock_init(&fsg->lock);
+	init_rwsem(&fsg->filesem);
+	init_waitqueue_head(&fsg->thread_wqh);
+	init_completion(&fsg->thread_notifier);
+
+	the_fsg = fsg;
+	return 0;
+}
+
+
+static void fsg_free(struct fsg_dev *fsg)
+{
+	kfree(fsg->luns);
+	kfree(fsg);
+}
+
+
+static int __init fsg_init(void)
+{
+	int		rc;
+	struct fsg_dev	*fsg;
+
+	if ((rc = fsg_alloc()) != 0)
+		return rc;
+	fsg = the_fsg;
+	if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0) {
+		fsg_free(fsg);
+		return rc;
+	}
+	set_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Tell the thread to start working */
+	complete(&fsg->thread_notifier);
+	return 0;
+}
+module_init(fsg_init);
+
+
+static void __exit fsg_cleanup(void)
+{
+	struct fsg_dev	*fsg = the_fsg;
+
+	/* Unregister the driver iff the thread hasn't already done so */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
+		usb_gadget_unregister_driver(&fsg_driver);
+
+	/* Wait for the thread to finish up */
+	wait_for_completion(&fsg->thread_notifier);
+
+	close_all_backing_files(fsg);
+	fsg_free(fsg);
+}
+module_exit(fsg_cleanup);
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
new file mode 100644
index 0000000..ea2eb52
--- /dev/null
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -0,0 +1,92 @@
+/*
+ * USB device controllers have lots of quirks.  Use these macros in
+ * gadget drivers or other code that needs to deal with them, and which
+ * autoconfigures instead of using early binding to the hardware.
+ *
+ * This could eventually work like the ARM mach_is_*() stuff, driven by
+ * some config file that gets updated as new hardware is supported.
+ *
+ * NOTE:  some of these controller drivers may not be available yet.
+ */
+#ifdef CONFIG_USB_GADGET_NET2280
+#define	gadget_is_net2280(g)	!strcmp("net2280", (g)->name)
+#else
+#define	gadget_is_net2280(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_DUMMY_HCD
+#define	gadget_is_dummy(g)	!strcmp("dummy_udc", (g)->name)
+#else
+#define	gadget_is_dummy(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_PXA2XX
+#define	gadget_is_pxa(g)	!strcmp("pxa2xx_udc", (g)->name)
+#else
+#define	gadget_is_pxa(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_GOKU
+#define	gadget_is_goku(g)	!strcmp("goku_udc", (g)->name)
+#else
+#define	gadget_is_goku(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_SUPERH
+#define	gadget_is_sh(g)		!strcmp("sh_udc", (g)->name)
+#else
+#define	gadget_is_sh(g)		0
+#endif
+
+#ifdef CONFIG_USB_GADGET_SA1100
+#define	gadget_is_sa1100(g)	!strcmp("sa1100_udc", (g)->name)
+#else
+#define	gadget_is_sa1100(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_LH7A40X
+#define	gadget_is_lh7a40x(g)	!strcmp("lh7a40x_udc", (g)->name)
+#else
+#define	gadget_is_lh7a40x(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MQ11XX
+#define	gadget_is_mq11xx(g)	!strcmp("mq11xx_udc", (g)->name)
+#else
+#define	gadget_is_mq11xx(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_OMAP
+#define	gadget_is_omap(g)	!strcmp("omap_udc", (g)->name)
+#else
+#define	gadget_is_omap(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_N9604
+#define	gadget_is_n9604(g)	!strcmp("n9604_udc", (g)->name)
+#else
+#define	gadget_is_n9604(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_PXA27X
+#define	gadget_is_pxa27x(g)	!strcmp("pxa27x_udc", (g)->name)
+#else
+#define	gadget_is_pxa27x(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_S3C2410
+#define gadget_is_s3c2410(g)    !strcmp("s3c2410_udc", (g)->name)
+#else
+#define gadget_is_s3c2410(g)    0
+#endif
+
+#ifdef CONFIG_USB_GADGET_AT91
+#define gadget_is_at91(g)	!strcmp("at91_udc", (g)->name)
+#else
+#define gadget_is_at91(g)	0
+#endif
+
+// CONFIG_USB_GADGET_SX2
+// CONFIG_USB_GADGET_AU1X00
+// ...
+
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
new file mode 100644
index 0000000..005db7c
--- /dev/null
+++ b/drivers/usb/gadget/goku_udc.c
@@ -0,0 +1,1984 @@
+/*
+ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
+ *
+ * Copyright (C) 2000-2002 Lineo
+ *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
+ * Copyright (C) 2002 Toshiba Corporation
+ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
+ *
+ *  - Endpoint numbering is fixed: ep{1,2,3}-bulk
+ *  - Gadget drivers can choose ep maxpacket (8/16/32/64)
+ *  - Gadget drivers can choose direction (IN, OUT)
+ *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
+ */
+
+#undef DEBUG
+// #define	VERBOSE		/* extra debug messages (success too) */
+// #define	USB_TRACE	/* packet-level success messages */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+
+#include "goku_udc.h"
+
+#define	DRIVER_DESC		"TC86C001 USB Device Controller"
+#define	DRIVER_VERSION		"30-Oct 2003"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+static const char driver_name [] = "goku_udc";
+static const char driver_desc [] = DRIVER_DESC;
+
+MODULE_AUTHOR("source@mvista.com");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+
+/*
+ * IN dma behaves ok under testing, though the IN-dma abort paths don't
+ * seem to behave quite as expected.  Used by default.
+ *
+ * OUT dma documents design problems handling the common "short packet"
+ * transfer termination policy; it couldn't enabled by default, even
+ * if the OUT-dma abort problems had a resolution.
+ */
+static unsigned use_dma = 1;
+
+#if 0
+//#include <linux/moduleparam.h>
+/* "modprobe goku_udc use_dma=1" etc
+ *	0 to disable dma
+ *	1 to use IN dma only (normal operation)
+ *	2 to use IN and OUT dma
+ */
+module_param(use_dma, uint, S_IRUGO);
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void nuke(struct goku_ep *, int status);
+
+static inline void
+command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
+{
+	writel(COMMAND_EP(epnum) | command, &regs->Command);
+	udelay(300);
+}
+
+static int
+goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct goku_udc	*dev;
+	struct goku_ep	*ep;
+	u32		mode;
+	u16		max;
+	unsigned long	flags;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dev = ep->dev;
+	if (ep == &dev->ep[0])
+		return -EINVAL;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+	if (ep->num != (desc->bEndpointAddress & 0x0f))
+		return -EINVAL;
+
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+	case USB_ENDPOINT_XFER_INT:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
+			!= EPxSTATUS_EP_INVALID)
+		return -EBUSY;
+
+	/* enabling the no-toggle interrupt mode would need an api hook */
+	mode = 0;
+	max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
+	switch (max) {
+	case 64:	mode++;
+	case 32:	mode++;
+	case 16:	mode++;
+	case 8:		mode <<= 3;
+			break;
+	default:
+		return -EINVAL;
+	}
+	mode |= 2 << 1;		/* bulk, or intr-with-toggle */
+
+	/* ep1/ep2 dma direction is chosen early; it works in the other
+	 * direction, with pio.  be cautious with out-dma.
+	 */
+	ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
+	if (ep->is_in) {
+		mode |= 1;
+		ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
+	} else {
+		ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
+		if (ep->dma)
+			DBG(dev, "%s out-dma hides short packets\n",
+				ep->ep.name);
+	}
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+
+	/* ep1 and ep2 can do double buffering and/or dma */
+	if (ep->num < 3) {
+		struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+		u32				tmp;
+
+		/* double buffer except (for now) with pio in */
+		tmp = ((ep->dma || !ep->is_in)
+				? 0x10	/* double buffered */
+				: 0x11	/* single buffer */
+			) << ep->num;
+		tmp |= readl(&regs->EPxSingle);
+		writel(tmp, &regs->EPxSingle);
+
+		tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
+		tmp |= readl(&regs->EPxBCS);
+		writel(tmp, &regs->EPxBCS);
+	}
+	writel(mode, ep->reg_mode);
+	command(ep->dev->regs, COMMAND_RESET, ep->num);
+	ep->ep.maxpacket = max;
+	ep->stopped = 0;
+	ep->desc = desc;
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+	DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
+		ep->is_in ? "IN" : "OUT",
+		ep->dma ? "dma" : "pio",
+		max);
+
+	return 0;
+}
+
+static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
+{
+	struct goku_udc		*dev = ep->dev;
+
+	if (regs) {
+		command(regs, COMMAND_INVALID, ep->num);
+		if (ep->num) {
+			if (ep->num == UDC_MSTWR_ENDPOINT)
+				dev->int_enable &= ~(INT_MSTWREND
+							|INT_MSTWRTMOUT);
+			else if (ep->num == UDC_MSTRD_ENDPOINT)
+				dev->int_enable &= ~INT_MSTRDEND;
+			dev->int_enable &= ~INT_EPxDATASET (ep->num);
+		} else
+			dev->int_enable &= ~INT_EP0;
+		writel(dev->int_enable, &regs->int_enable);
+		readl(&regs->int_enable);
+		if (ep->num < 3) {
+			struct goku_udc_regs __iomem	*r = ep->dev->regs;
+			u32				tmp;
+
+			tmp = readl(&r->EPxSingle);
+			tmp &= ~(0x11 << ep->num);
+			writel(tmp, &r->EPxSingle);
+
+			tmp = readl(&r->EPxBCS);
+			tmp &= ~(0x11 << ep->num);
+			writel(tmp, &r->EPxBCS);
+		}
+		/* reset dma in case we're still using it */
+		if (ep->dma) {
+			u32	master;
+
+			master = readl(&regs->dma_master) & MST_RW_BITS;
+			if (ep->num == UDC_MSTWR_ENDPOINT) {
+				master &= ~MST_W_BITS;
+				master |= MST_WR_RESET;
+			} else {
+				master &= ~MST_R_BITS;
+				master |= MST_RD_RESET;
+			}
+			writel(master, &regs->dma_master);
+		}
+	}
+
+	ep->ep.maxpacket = MAX_FIFO_SIZE;
+	ep->desc = NULL;
+	ep->stopped = 1;
+	ep->irqs = 0;
+	ep->dma = 0;
+}
+
+static int goku_ep_disable(struct usb_ep *_ep)
+{
+	struct goku_ep	*ep;
+	struct goku_udc	*dev;
+	unsigned long	flags;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep || !ep->desc)
+		return -ENODEV;
+	dev = ep->dev;
+	if (dev->ep0state == EP0_SUSPEND)
+		return -EBUSY;
+
+	VDBG(dev, "disable %s\n", _ep->name);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	nuke(ep, -ESHUTDOWN);
+	ep_reset(dev->regs, ep);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+goku_alloc_request(struct usb_ep *_ep, int gfp_flags)
+{
+	struct goku_request	*req;
+
+	if (!_ep)
+		return NULL;
+	req = kmalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	memset(req, 0, sizeof *req);
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+	return &req->req;
+}
+
+static void
+goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct goku_request	*req;
+
+	if (!_ep || !_req)
+		return;
+
+	req = container_of(_req, struct goku_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#undef USE_KMALLOC
+
+/* many common platforms have dma-coherent caches, which means that it's
+ * safe to use kmalloc() memory for all i/o buffers without using any
+ * cache flushing calls.  (unless you're trying to share cache lines
+ * between dma and non-dma activities, which is a slow idea in any case.)
+ *
+ * other platforms need more care, with 2.6 having a moderately general
+ * solution except for the common "buffer is smaller than a page" case.
+ */
+#if	defined(CONFIG_X86)
+#define USE_KMALLOC
+
+#elif	defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
+#define USE_KMALLOC
+
+#elif	defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+#define USE_KMALLOC
+
+#endif
+
+/* allocating buffers this way eliminates dma mapping overhead, which
+ * on some platforms will mean eliminating a per-io buffer copy.  with
+ * some kinds of system caches, further tweaks may still be needed.
+ */
+static void *
+goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
+			dma_addr_t *dma, int  gfp_flags)
+{
+	void		*retval;
+	struct goku_ep	*ep;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep)
+		return NULL;
+	*dma = DMA_ADDR_INVALID;
+
+#if	defined(USE_KMALLOC)
+	retval = kmalloc(bytes, gfp_flags);
+	if (retval)
+		*dma = virt_to_phys(retval);
+#else
+	if (ep->dma) {
+		/* the main problem with this call is that it wastes memory
+		 * on typical 1/N page allocations: it allocates 1-N pages.
+		 */
+#warning Using dma_alloc_coherent even with buffers smaller than a page.
+		retval = dma_alloc_coherent(&ep->dev->pdev->dev,
+				bytes, dma, gfp_flags);
+	} else
+		retval = kmalloc(bytes, gfp_flags);
+#endif
+	return retval;
+}
+
+static void
+goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
+{
+	/* free memory into the right allocator */
+#ifndef	USE_KMALLOC
+	if (dma != DMA_ADDR_INVALID) {
+		struct goku_ep	*ep;
+
+		ep = container_of(_ep, struct goku_ep, ep);
+		if (!_ep)
+			return;
+		dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
+	} else
+#endif
+		kfree (buf);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+done(struct goku_ep *ep, struct goku_request *req, int status)
+{
+	struct goku_udc		*dev;
+	unsigned		stopped = ep->stopped;
+
+	list_del_init(&req->queue);
+
+	if (likely(req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+	if (req->mapped) {
+		pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
+			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	}
+
+#ifndef USB_TRACE
+	if (status && status != -ESHUTDOWN)
+#endif
+		VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&dev->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&dev->lock);
+	ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
+{
+	unsigned	length, count;
+
+	length = min(req->req.length - req->req.actual, max);
+	req->req.actual += length;
+
+	count = length;
+	while (likely(count--))
+		writel(*buf++, fifo);
+	return length;
+}
+
+// return:  0 = still running, 1 = completed, negative = errno
+static int write_fifo(struct goku_ep *ep, struct goku_request *req)
+{
+	struct goku_udc	*dev = ep->dev;
+	u32		tmp;
+	u8		*buf;
+	unsigned	count;
+	int		is_last;
+
+	tmp = readl(&dev->regs->DataSet);
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	dev = ep->dev;
+	if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
+		return -EL2HLT;
+
+	/* NOTE:  just single-buffered PIO-IN for now.  */
+	if (unlikely((tmp & DATASET_A(ep->num)) != 0))
+		return 0;
+
+	/* clear our "packet available" irq */
+	if (ep->num != 0)
+		writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
+
+	count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
+
+	/* last packet often short (sometimes a zlp, especially on ep0) */
+	if (unlikely(count != ep->ep.maxpacket)) {
+		writel(~(1<<ep->num), &dev->regs->EOP);
+		if (ep->num == 0) {
+			dev->ep[0].stopped = 1;
+			dev->ep0state = EP0_STATUS;
+		}
+		is_last = 1;
+	} else {
+		if (likely(req->req.length != req->req.actual)
+				|| req->req.zero)
+			is_last = 0;
+		else
+			is_last = 1;
+	}
+#if 0		/* printk seemed to trash is_last...*/
+//#ifdef USB_TRACE
+	VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
+		ep->ep.name, count, is_last ? "/last" : "",
+		req->req.length - req->req.actual, req);
+#endif
+
+	/* requests complete when all IN data is in the FIFO,
+	 * or sometimes later, if a zlp was needed.
+	 */
+	if (is_last) {
+		done(ep, req, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int read_fifo(struct goku_ep *ep, struct goku_request *req)
+{
+	struct goku_udc_regs __iomem	*regs;
+	u32				size, set;
+	u8				*buf;
+	unsigned			bufferspace, is_short, dbuff;
+
+	regs = ep->dev->regs;
+top:
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+
+	if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
+		return -EL2HLT;
+
+	dbuff = (ep->num == 1 || ep->num == 2);
+	do {
+		/* ack dataset irq matching the status we'll handle */
+		if (ep->num != 0)
+			writel(~INT_EPxDATASET(ep->num), &regs->int_status);
+
+		set = readl(&regs->DataSet) & DATASET_AB(ep->num);
+		size = readl(&regs->EPxSizeLA[ep->num]);
+		bufferspace = req->req.length - req->req.actual;
+
+		/* usually do nothing without an OUT packet */
+		if (likely(ep->num != 0 || bufferspace != 0)) {
+			if (unlikely(set == 0))
+				break;
+			/* use ep1/ep2 double-buffering for OUT */
+			if (!(size & PACKET_ACTIVE))
+				size = readl(&regs->EPxSizeLB[ep->num]);
+			if (!(size & PACKET_ACTIVE)) 	// "can't happen"
+				break;
+			size &= DATASIZE;	/* EPxSizeH == 0 */
+
+		/* ep0out no-out-data case for set_config, etc */
+		} else
+			size = 0;
+
+		/* read all bytes from this packet */
+		req->req.actual += size;
+		is_short = (size < ep->ep.maxpacket);
+#ifdef USB_TRACE
+		VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
+			ep->ep.name, size, is_short ? "/S" : "",
+			req, req->req.actual, req->req.length);
+#endif
+		while (likely(size-- != 0)) {
+			u8	byte = (u8) readl(ep->reg_fifo);
+
+			if (unlikely(bufferspace == 0)) {
+				/* this happens when the driver's buffer
+				 * is smaller than what the host sent.
+				 * discard the extra data in this packet.
+				 */
+				if (req->req.status != -EOVERFLOW)
+					DBG(ep->dev, "%s overflow %u\n",
+						ep->ep.name, size);
+				req->req.status = -EOVERFLOW;
+			} else {
+				*buf++ = byte;
+				bufferspace--;
+			}
+		}
+
+		/* completion */
+		if (unlikely(is_short || req->req.actual == req->req.length)) {
+			if (unlikely(ep->num == 0)) {
+				/* non-control endpoints now usable? */
+				if (ep->dev->req_config)
+					writel(ep->dev->configured
+							? USBSTATE_CONFIGURED
+							: 0,
+						&regs->UsbState);
+				/* ep0out status stage */
+				writel(~(1<<0), &regs->EOP);
+				ep->stopped = 1;
+				ep->dev->ep0state = EP0_STATUS;
+			}
+			done(ep, req, 0);
+
+			/* empty the second buffer asap */
+			if (dbuff && !list_empty(&ep->queue)) {
+				req = list_entry(ep->queue.next,
+						struct goku_request, queue);
+				goto top;
+			}
+			return 1;
+		}
+	} while (dbuff);
+	return 0;
+}
+
+static inline void
+pio_irq_enable(struct goku_udc *dev,
+		struct goku_udc_regs __iomem *regs, int epnum)
+{
+	dev->int_enable |= INT_EPxDATASET (epnum);
+	writel(dev->int_enable, &regs->int_enable);
+	/* write may still be posted */
+}
+
+static inline void
+pio_irq_disable(struct goku_udc *dev,
+		struct goku_udc_regs __iomem *regs, int epnum)
+{
+	dev->int_enable &= ~INT_EPxDATASET (epnum);
+	writel(dev->int_enable, &regs->int_enable);
+	/* write may still be posted */
+}
+
+static inline void
+pio_advance(struct goku_ep *ep)
+{
+	struct goku_request	*req;
+
+	if (unlikely(list_empty (&ep->queue)))
+		return;
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+	(ep->is_in ? write_fifo : read_fifo)(ep, req);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+// return:  0 = q running, 1 = q stopped, negative = errno
+static int start_dma(struct goku_ep *ep, struct goku_request *req)
+{
+	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+	u32				master;
+	u32				start = req->req.dma;
+	u32				end = start + req->req.length - 1;
+
+	master = readl(&regs->dma_master) & MST_RW_BITS;
+
+	/* re-init the bits affecting IN dma; careful with zlps */
+	if (likely(ep->is_in)) {
+		if (unlikely(master & MST_RD_ENA)) {
+			DBG (ep->dev, "start, IN active dma %03x!!\n",
+				master);
+//			return -EL2HLT;
+		}
+		writel(end, &regs->in_dma_end);
+		writel(start, &regs->in_dma_start);
+
+		master &= ~MST_R_BITS;
+		if (unlikely(req->req.length == 0))
+			master = MST_RD_ENA | MST_RD_EOPB;
+		else if ((req->req.length % ep->ep.maxpacket) != 0
+					|| req->req.zero)
+			master = MST_RD_ENA | MST_EOPB_ENA;
+		else
+			master = MST_RD_ENA | MST_EOPB_DIS;
+
+		ep->dev->int_enable |= INT_MSTRDEND;
+
+	/* Goku DMA-OUT merges short packets, which plays poorly with
+	 * protocols where short packets mark the transfer boundaries.
+	 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
+	 * ending transfers after 3 SOFs; we don't turn it on.
+	 */
+	} else {
+		if (unlikely(master & MST_WR_ENA)) {
+			DBG (ep->dev, "start, OUT active dma %03x!!\n",
+				master);
+//			return -EL2HLT;
+		}
+		writel(end, &regs->out_dma_end);
+		writel(start, &regs->out_dma_start);
+
+		master &= ~MST_W_BITS;
+		master |= MST_WR_ENA | MST_TIMEOUT_DIS;
+
+		ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
+	}
+
+	writel(master, &regs->dma_master);
+	writel(ep->dev->int_enable, &regs->int_enable);
+	return 0;
+}
+
+static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
+{
+	struct goku_request		*req;
+	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+	u32				master;
+
+	master = readl(&regs->dma_master);
+
+	if (unlikely(list_empty(&ep->queue))) {
+stop:
+		if (ep->is_in)
+			dev->int_enable &= ~INT_MSTRDEND;
+		else
+			dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
+		writel(dev->int_enable, &regs->int_enable);
+		return;
+	}
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+
+	/* normal hw dma completion (not abort) */
+	if (likely(ep->is_in)) {
+		if (unlikely(master & MST_RD_ENA))
+			return;
+		req->req.actual = readl(&regs->in_dma_current);
+	} else {
+		if (unlikely(master & MST_WR_ENA))
+			return;
+
+		/* hardware merges short packets, and also hides packet
+		 * overruns.  a partial packet MAY be in the fifo here.
+		 */
+		req->req.actual = readl(&regs->out_dma_current);
+	}
+	req->req.actual -= req->req.dma;
+	req->req.actual++;
+
+#ifdef USB_TRACE
+	VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
+		ep->ep.name, ep->is_in ? "IN" : "OUT",
+		req->req.actual, req->req.length, req);
+#endif
+	done(ep, req, 0);
+	if (list_empty(&ep->queue))
+		goto stop;
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+	(void) start_dma(ep, req);
+}
+
+static void abort_dma(struct goku_ep *ep, int status)
+{
+	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+	struct goku_request		*req;
+	u32				curr, master;
+
+	/* NAK future host requests, hoping the implicit delay lets the
+	 * dma engine finish reading (or writing) its latest packet and
+	 * empty the dma buffer (up to 16 bytes).
+	 *
+	 * This avoids needing to clean up a partial packet in the fifo;
+	 * we can't do that for IN without side effects to HALT and TOGGLE.
+	 */
+	command(regs, COMMAND_FIFO_DISABLE, ep->num);
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+	master = readl(&regs->dma_master) & MST_RW_BITS;
+
+	/* FIXME using these resets isn't usably documented. this may
+	 * not work unless it's followed by disabling the endpoint.
+	 *
+	 * FIXME the OUT reset path doesn't even behave consistently.
+	 */
+	if (ep->is_in) {
+		if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
+			goto finished;
+		curr = readl(&regs->in_dma_current);
+
+		writel(curr, &regs->in_dma_end);
+		writel(curr, &regs->in_dma_start);
+
+		master &= ~MST_R_BITS;
+		master |= MST_RD_RESET;
+		writel(master, &regs->dma_master);
+
+		if (readl(&regs->dma_master) & MST_RD_ENA)
+			DBG(ep->dev, "IN dma active after reset!\n");
+
+	} else {
+		if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
+			goto finished;
+		curr = readl(&regs->out_dma_current);
+
+		writel(curr, &regs->out_dma_end);
+		writel(curr, &regs->out_dma_start);
+
+		master &= ~MST_W_BITS;
+		master |= MST_WR_RESET;
+		writel(master, &regs->dma_master);
+
+		if (readl(&regs->dma_master) & MST_WR_ENA)
+			DBG(ep->dev, "OUT dma active after reset!\n");
+	}
+	req->req.actual = (curr - req->req.dma) + 1;
+	req->req.status = status;
+
+	VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
+		ep->is_in ? "IN" : "OUT",
+		req->req.actual, req->req.length);
+
+	command(regs, COMMAND_FIFO_ENABLE, ep->num);
+
+	return;
+
+finished:
+	/* dma already completed; no abort needed */
+	command(regs, COMMAND_FIFO_ENABLE, ep->num);
+	req->req.actual = req->req.length;
+	req->req.status = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+goku_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
+{
+	struct goku_request	*req;
+	struct goku_ep		*ep;
+	struct goku_udc		*dev;
+	unsigned long		flags;
+	int			status;
+
+	/* always require a cpu-view buffer so pio works */
+	req = container_of(_req, struct goku_request, req);
+	if (unlikely(!_req || !_req->complete
+			|| !_req->buf || !list_empty(&req->queue)))
+		return -EINVAL;
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
+		return -EINVAL;
+	dev = ep->dev;
+	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	/* can't touch registers when suspended */
+	if (dev->ep0state == EP0_SUSPEND)
+		return -EBUSY;
+
+	/* set up dma mapping in case the caller didn't */
+	if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
+		_req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
+			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+		req->mapped = 1;
+	}
+
+#ifdef USB_TRACE
+	VDBG(dev, "%s queue req %p, len %u buf %p\n",
+			_ep->name, _req, _req->length, _req->buf);
+#endif
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* for ep0 IN without premature status, zlp is required and
+	 * writing EOP starts the status stage (OUT).
+	 */
+	if (unlikely(ep->num == 0 && ep->is_in))
+		_req->zero = 1;
+
+	/* kickstart this i/o queue? */
+	status = 0;
+	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
+		/* dma:  done after dma completion IRQ (or error)
+		 * pio:  done after last fifo operation
+		 */
+		if (ep->dma)
+			status = start_dma(ep, req);
+		else
+			status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
+
+		if (unlikely(status != 0)) {
+			if (status > 0)
+				status = 0;
+			req = NULL;
+		}
+
+	} /* else pio or dma irq handler advances the queue. */
+
+	if (likely(req != 0))
+		list_add_tail(&req->queue, &ep->queue);
+
+	if (likely(!list_empty(&ep->queue))
+			&& likely(ep->num != 0)
+			&& !ep->dma
+			&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
+		pio_irq_enable(dev, dev->regs, ep->num);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* pci writes may still be posted */
+	return status;
+}
+
+/* dequeue ALL requests */
+static void nuke(struct goku_ep *ep, int status)
+{
+	struct goku_request	*req;
+
+	ep->stopped = 1;
+	if (list_empty(&ep->queue))
+		return;
+	if (ep->dma)
+		abort_dma(ep, status);
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct goku_request, queue);
+		done(ep, req, status);
+	}
+}
+
+/* dequeue JUST ONE request */
+static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct goku_request	*req;
+	struct goku_ep		*ep;
+	struct goku_udc		*dev;
+	unsigned long		flags;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep || !_req || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver)
+		return -ESHUTDOWN;
+
+	/* we can't touch (dma) registers when suspended */
+	if (dev->ep0state == EP0_SUSPEND)
+		return -EBUSY;
+
+	VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
+		ep->is_in ? "IN" : "OUT",
+		ep->dma ? "dma" : "pio",
+		_req);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore (&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
+		abort_dma(ep, -ECONNRESET);
+		done(ep, req, -ECONNRESET);
+		dma_advance(dev, ep);
+	} else if (!list_empty(&req->queue))
+		done(ep, req, -ECONNRESET);
+	else
+		req = NULL;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return req ? 0 : -EOPNOTSUPP;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void goku_clear_halt(struct goku_ep *ep)
+{
+	// assert (ep->num !=0)
+	VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
+	command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
+	command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
+	if (ep->stopped) {
+		ep->stopped = 0;
+		if (ep->dma) {
+			struct goku_request	*req;
+
+			if (list_empty(&ep->queue))
+				return;
+			req = list_entry(ep->queue.next, struct goku_request,
+						queue);
+			(void) start_dma(ep, req);
+		} else
+			pio_advance(ep);
+	}
+}
+
+static int goku_set_halt(struct usb_ep *_ep, int value)
+{
+	struct goku_ep	*ep;
+	unsigned long	flags;
+	int		retval = 0;
+
+	if (!_ep)
+		return -ENODEV;
+	ep = container_of (_ep, struct goku_ep, ep);
+
+	if (ep->num == 0) {
+		if (value) {
+			ep->dev->ep0state = EP0_STALL;
+			ep->dev->ep[0].stopped = 1;
+		} else
+			return -EINVAL;
+
+	/* don't change EPxSTATUS_EP_INVALID to READY */
+	} else if (!ep->desc) {
+		DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	if (!list_empty(&ep->queue))
+		retval = -EAGAIN;
+	else if (ep->is_in && value
+			/* data in (either) packet buffer? */
+			&& (readl(&ep->dev->regs->DataSet)
+					& DATASET_AB(ep->num)))
+		retval = -EAGAIN;
+	else if (!value)
+		goku_clear_halt(ep);
+	else {
+		ep->stopped = 1;
+		VDBG(ep->dev, "%s set halt\n", ep->ep.name);
+		command(ep->dev->regs, COMMAND_STALL, ep->num);
+		readl(ep->reg_status);
+	}
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return retval;
+}
+
+static int goku_fifo_status(struct usb_ep *_ep)
+{
+	struct goku_ep			*ep;
+	struct goku_udc_regs __iomem	*regs;
+	u32				size;
+
+	if (!_ep)
+		return -ENODEV;
+	ep = container_of(_ep, struct goku_ep, ep);
+
+	/* size is only reported sanely for OUT */
+	if (ep->is_in)
+		return -EOPNOTSUPP;
+
+	/* ignores 16-byte dma buffer; SizeH == 0 */
+	regs = ep->dev->regs;
+	size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
+	size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
+	VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
+	return size;
+}
+
+static void goku_fifo_flush(struct usb_ep *_ep)
+{
+	struct goku_ep			*ep;
+	struct goku_udc_regs __iomem	*regs;
+	u32				size;
+
+	if (!_ep)
+		return;
+	ep = container_of(_ep, struct goku_ep, ep);
+	VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
+
+	/* don't change EPxSTATUS_EP_INVALID to READY */
+	if (!ep->desc && ep->num != 0) {
+		DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
+		return;
+	}
+
+	regs = ep->dev->regs;
+	size = readl(&regs->EPxSizeLA[ep->num]);
+	size &= DATASIZE;
+
+	/* Non-desirable behavior:  FIFO_CLEAR also clears the
+	 * endpoint halt feature.  For OUT, we _could_ just read
+	 * the bytes out (PIO, if !ep->dma); for in, no choice.
+	 */
+	if (size)
+		command(regs, COMMAND_FIFO_CLEAR, ep->num);
+}
+
+static struct usb_ep_ops goku_ep_ops = {
+	.enable		= goku_ep_enable,
+	.disable	= goku_ep_disable,
+
+	.alloc_request	= goku_alloc_request,
+	.free_request	= goku_free_request,
+
+	.alloc_buffer	= goku_alloc_buffer,
+	.free_buffer	= goku_free_buffer,
+
+	.queue		= goku_queue,
+	.dequeue	= goku_dequeue,
+
+	.set_halt	= goku_set_halt,
+	.fifo_status	= goku_fifo_status,
+	.fifo_flush	= goku_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int goku_get_frame(struct usb_gadget *_gadget)
+{
+	return -EOPNOTSUPP;
+}
+
+static const struct usb_gadget_ops goku_ops = {
+	.get_frame	= goku_get_frame,
+	// no remote wakeup
+	// not selfpowered
+};
+
+/*-------------------------------------------------------------------------*/
+
+static inline char *dmastr(void)
+{
+	if (use_dma == 0)
+		return "(dma disabled)";
+	else if (use_dma == 2)
+		return "(dma IN and OUT)";
+	else
+		return "(dma IN)";
+}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static const char proc_node_name [] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void
+dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
+{
+	int t;
+
+	/* int_status is the same format ... */
+	t = scnprintf(*next, *size,
+		"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
+		label, mask,
+		(mask & INT_PWRDETECT) ? " power" : "",
+		(mask & INT_SYSERROR) ? " sys" : "",
+		(mask & INT_MSTRDEND) ? " in-dma" : "",
+		(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
+
+		(mask & INT_MSTWREND) ? " out-dma" : "",
+		(mask & INT_MSTWRSET) ? " wrset" : "",
+		(mask & INT_ERR) ? " err" : "",
+		(mask & INT_SOF) ? " sof" : "",
+
+		(mask & INT_EP3NAK) ? " ep3nak" : "",
+		(mask & INT_EP2NAK) ? " ep2nak" : "",
+		(mask & INT_EP1NAK) ? " ep1nak" : "",
+		(mask & INT_EP3DATASET) ? " ep3" : "",
+
+		(mask & INT_EP2DATASET) ? " ep2" : "",
+		(mask & INT_EP1DATASET) ? " ep1" : "",
+		(mask & INT_STATUSNAK) ? " ep0snak" : "",
+		(mask & INT_STATUS) ? " ep0status" : "",
+
+		(mask & INT_SETUP) ? " setup" : "",
+		(mask & INT_ENDPOINT0) ? " ep0" : "",
+		(mask & INT_USBRESET) ? " reset" : "",
+		(mask & INT_SUSPEND) ? " suspend" : "");
+	*size -= t;
+	*next += t;
+}
+
+
+static int
+udc_proc_read(char *buffer, char **start, off_t off, int count,
+		int *eof, void *_dev)
+{
+	char				*buf = buffer;
+	struct goku_udc			*dev = _dev;
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	char				*next = buf;
+	unsigned			size = count;
+	unsigned long			flags;
+	int				i, t, is_usb_connected;
+	u32				tmp;
+
+	if (off != 0)
+		return 0;
+
+	local_irq_save(flags);
+
+	/* basic device status */
+	tmp = readl(&regs->power_detect);
+	is_usb_connected = tmp & PW_DETECT;
+	t = scnprintf(next, size,
+		"%s - %s\n"
+		"%s version: %s %s\n"
+		"Gadget driver: %s\n"
+		"Host %s, %s\n"
+		"\n",
+		pci_name(dev->pdev), driver_desc,
+		driver_name, DRIVER_VERSION, dmastr(),
+		dev->driver ? dev->driver->driver.name : "(none)",
+		is_usb_connected
+			? ((tmp & PW_PULLUP) ? "full speed" : "powered")
+			: "disconnected",
+		({char *tmp;
+		switch(dev->ep0state){
+		case EP0_DISCONNECT:	tmp = "ep0_disconnect"; break;
+		case EP0_IDLE:		tmp = "ep0_idle"; break;
+		case EP0_IN:		tmp = "ep0_in"; break;
+		case EP0_OUT:		tmp = "ep0_out"; break;
+		case EP0_STATUS:	tmp = "ep0_status"; break;
+		case EP0_STALL:		tmp = "ep0_stall"; break;
+		case EP0_SUSPEND:	tmp = "ep0_suspend"; break;
+		default:		tmp = "ep0_?"; break;
+		} tmp; })
+		);
+	size -= t;
+	next += t;
+
+	dump_intmask("int_status", readl(&regs->int_status), &next, &size);
+	dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
+
+	if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
+		goto done;
+
+	/* registers for (active) device and ep0 */
+	t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
+			"single.bcs %02x.%02x state %x addr %u\n",
+			dev->irqs, readl(&regs->DataSet),
+			readl(&regs->EPxSingle), readl(&regs->EPxBCS),
+			readl(&regs->UsbState),
+			readl(&regs->address));
+	size -= t;
+	next += t;
+
+	tmp = readl(&regs->dma_master);
+	t = scnprintf(next, size,
+		"dma %03X =" EIGHTBITS "%s %s\n", tmp,
+		(tmp & MST_EOPB_DIS) ? " eopb-" : "",
+		(tmp & MST_EOPB_ENA) ? " eopb+" : "",
+		(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
+		(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
+
+		(tmp & MST_RD_EOPB) ? " eopb" : "",
+		(tmp & MST_RD_RESET) ? " in_reset" : "",
+		(tmp & MST_WR_RESET) ? " out_reset" : "",
+		(tmp & MST_RD_ENA) ? " IN" : "",
+
+		(tmp & MST_WR_ENA) ? " OUT" : "",
+		(tmp & MST_CONNECTION)
+			? "ep1in/ep2out"
+			: "ep1out/ep2in");
+	size -= t;
+	next += t;
+
+	/* dump endpoint queues */
+	for (i = 0; i < 4; i++) {
+		struct goku_ep		*ep = &dev->ep [i];
+		struct goku_request	*req;
+		int			t;
+
+		if (i && !ep->desc)
+			continue;
+
+		tmp = readl(ep->reg_status);
+		t = scnprintf(next, size,
+			"%s %s max %u %s, irqs %lu, "
+			"status %02x (%s) " FOURBITS "\n",
+			ep->ep.name,
+			ep->is_in ? "in" : "out",
+			ep->ep.maxpacket,
+			ep->dma ? "dma" : "pio",
+			ep->irqs,
+			tmp, ({ char *s;
+			switch (tmp & EPxSTATUS_EP_MASK) {
+			case EPxSTATUS_EP_READY:
+				s = "ready"; break;
+			case EPxSTATUS_EP_DATAIN:
+				s = "packet"; break;
+			case EPxSTATUS_EP_FULL:
+				s = "full"; break;
+			case EPxSTATUS_EP_TX_ERR:	// host will retry
+				s = "tx_err"; break;
+			case EPxSTATUS_EP_RX_ERR:
+				s = "rx_err"; break;
+			case EPxSTATUS_EP_BUSY:		/* ep0 only */
+				s = "busy"; break;
+			case EPxSTATUS_EP_STALL:
+				s = "stall"; break;
+			case EPxSTATUS_EP_INVALID:	// these "can't happen"
+				s = "invalid"; break;
+			default:
+				s = "?"; break;
+			}; s; }),
+			(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
+			(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
+			(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
+			(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
+			);
+		if (t <= 0 || t > size)
+			goto done;
+		size -= t;
+		next += t;
+
+		if (list_empty(&ep->queue)) {
+			t = scnprintf(next, size, "\t(nothing queued)\n");
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+			continue;
+		}
+		list_for_each_entry(req, &ep->queue, queue) {
+			if (ep->dma && req->queue.prev == &ep->queue) {
+				if (i == UDC_MSTRD_ENDPOINT)
+					tmp = readl(&regs->in_dma_current);
+				else
+					tmp = readl(&regs->out_dma_current);
+				tmp -= req->req.dma;
+				tmp++;
+			} else
+				tmp = req->req.actual;
+
+			t = scnprintf(next, size,
+				"\treq %p len %u/%u buf %p\n",
+				&req->req, tmp, req->req.length,
+				req->req.buf);
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+		}
+	}
+
+done:
+	local_irq_restore(flags);
+	*eof = 1;
+	return count - size;
+}
+
+#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+static void udc_reinit (struct goku_udc *dev)
+{
+	static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
+	
+	unsigned i;
+
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	dev->gadget.ep0 = &dev->ep [0].ep;
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	dev->ep0state = EP0_DISCONNECT;
+	dev->irqs = 0;
+
+	for (i = 0; i < 4; i++) {
+		struct goku_ep	*ep = &dev->ep[i];
+
+		ep->num = i;
+		ep->ep.name = names[i];
+		ep->reg_fifo = &dev->regs->ep_fifo [i];
+		ep->reg_status = &dev->regs->ep_status [i];
+		ep->reg_mode = &dev->regs->ep_mode[i];
+
+		ep->ep.ops = &goku_ep_ops;
+		list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+		ep->dev = dev;
+		INIT_LIST_HEAD (&ep->queue);
+
+		ep_reset(NULL, ep);
+	}
+
+	dev->ep[0].reg_mode = NULL;
+	dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
+	list_del_init (&dev->ep[0].ep.ep_list);
+}
+
+static void udc_reset(struct goku_udc *dev)
+{
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+
+	writel(0, &regs->power_detect);
+	writel(0, &regs->int_enable);
+	readl(&regs->int_enable);
+	dev->int_enable = 0;
+
+	/* deassert reset, leave USB D+ at hi-Z (no pullup)
+	 * don't let INT_PWRDETECT sequence begin
+	 */
+	udelay(250);
+	writel(PW_RESETB, &regs->power_detect);
+	readl(&regs->int_enable);
+}
+
+static void ep0_start(struct goku_udc *dev)
+{
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	unsigned			i;
+
+	VDBG(dev, "%s\n", __FUNCTION__);
+
+	udc_reset(dev);
+	udc_reinit (dev);
+	//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
+
+	/* hw handles set_address, set_feature, get_status; maybe more */
+	writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
+		| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
+		| G_REQMODE_GET_DESC
+		| G_REQMODE_CLEAR_FEAT
+		, &regs->reqmode);
+
+	for (i = 0; i < 4; i++)
+		dev->ep[i].irqs = 0;
+
+	/* can't modify descriptors after writing UsbReady */
+	for (i = 0; i < DESC_LEN; i++)
+		writel(0, &regs->descriptors[i]);
+	writel(0, &regs->UsbReady);
+
+	/* expect ep0 requests when the host drops reset */
+	writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
+	dev->int_enable = INT_DEVWIDE | INT_EP0;
+	writel(dev->int_enable, &dev->regs->int_enable);
+	readl(&regs->int_enable);
+	dev->gadget.speed = USB_SPEED_FULL;
+	dev->ep0state = EP0_IDLE;
+}
+
+static void udc_enable(struct goku_udc *dev)
+{
+	/* start enumeration now, or after power detect irq */
+	if (readl(&dev->regs->power_detect) & PW_DETECT)
+		ep0_start(dev);
+	else {
+		DBG(dev, "%s\n", __FUNCTION__);
+		dev->int_enable = INT_PWRDETECT;
+		writel(dev->int_enable, &dev->regs->int_enable);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* keeping it simple:
+ * - one bus driver, initted first;
+ * - one function driver, initted second
+ */
+
+static struct goku_udc	*the_controller;
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct goku_udc	*dev = the_controller;
+	int			retval;
+
+	if (!driver
+			|| driver->speed != USB_SPEED_FULL
+			|| !driver->bind
+			|| !driver->unbind
+			|| !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	/* hook up the driver */
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+	retval = driver->bind(&dev->gadget);
+	if (retval) {
+		DBG(dev, "bind to driver %s --> error %d\n",
+				driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	/* then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	udc_enable(dev);
+
+	DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void
+stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
+{
+	unsigned	i;
+
+	DBG (dev, "%s\n", __FUNCTION__);
+
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* disconnect gadget driver after quiesceing hw and the driver */
+	udc_reset (dev);
+	for (i = 0; i < 4; i++)
+		nuke(&dev->ep [i], -ESHUTDOWN);
+	if (driver) {
+		spin_unlock(&dev->lock);
+		driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+
+	if (dev->driver)
+		udc_enable(dev);
+}
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct goku_udc	*dev = the_controller;
+	unsigned long	flags;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->driver = NULL;
+	stop_activity(dev, driver);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	driver->unbind(&dev->gadget);
+
+	DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+static void ep0_setup(struct goku_udc *dev)
+{
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	struct usb_ctrlrequest		ctrl;
+	int				tmp;
+
+	/* read SETUP packet and enter DATA stage */
+	ctrl.bRequestType = readl(&regs->bRequestType);
+	ctrl.bRequest = readl(&regs->bRequest);
+	ctrl.wValue  = (readl(&regs->wValueH)  << 8) | readl(&regs->wValueL);
+	ctrl.wIndex  = (readl(&regs->wIndexH)  << 8) | readl(&regs->wIndexL);
+	ctrl.wLength = (readl(&regs->wLengthH) << 8) | readl(&regs->wLengthL);
+	writel(0, &regs->SetupRecv);
+
+	nuke(&dev->ep[0], 0);
+	dev->ep[0].stopped = 0;
+	if (likely(ctrl.bRequestType & USB_DIR_IN)) {
+		dev->ep[0].is_in = 1;
+		dev->ep0state = EP0_IN;
+		/* detect early status stages */
+		writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
+	} else {
+		dev->ep[0].is_in = 0;
+		dev->ep0state = EP0_OUT;
+
+		/* NOTE:  CLEAR_FEATURE is done in software so that we can
+		 * synchronize transfer restarts after bulk IN stalls.  data
+		 * won't even enter the fifo until the halt is cleared.
+		 */
+		switch (ctrl.bRequest) {
+		case USB_REQ_CLEAR_FEATURE:
+			switch (ctrl.bRequestType) {
+			case USB_RECIP_ENDPOINT:
+				tmp = ctrl.wIndex & 0x0f;
+				/* active endpoint */
+				if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
+					goto stall;
+				if (ctrl.wIndex & USB_DIR_IN) {
+					if (!dev->ep[tmp].is_in)
+						goto stall;
+				} else {
+					if (dev->ep[tmp].is_in)
+						goto stall;
+				}
+				if (ctrl.wValue != USB_ENDPOINT_HALT)
+					goto stall;
+				if (tmp)
+					goku_clear_halt(&dev->ep[tmp]);
+succeed:
+				/* start ep0out status stage */
+				writel(~(1<<0), &regs->EOP);
+				dev->ep[0].stopped = 1;
+				dev->ep0state = EP0_STATUS;
+				return;
+			case USB_RECIP_DEVICE:
+				/* device remote wakeup: always clear */
+				if (ctrl.wValue != 1)
+					goto stall;
+				VDBG(dev, "clear dev remote wakeup\n");
+				goto succeed;
+			case USB_RECIP_INTERFACE:
+				goto stall;
+			default:		/* pass to gadget driver */
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+#ifdef USB_TRACE
+	VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+		ctrl.bRequestType, ctrl.bRequest,
+		ctrl.wValue, ctrl.wIndex, ctrl.wLength);
+#endif
+
+	/* hw wants to know when we're configured (or not) */
+	dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
+				&& ctrl.bRequestType == USB_RECIP_DEVICE);
+	if (unlikely(dev->req_config))
+		dev->configured = (ctrl.wValue != 0);
+
+	/* delegate everything to the gadget driver.
+	 * it may respond after this irq handler returns.
+	 */
+	spin_unlock (&dev->lock);
+	tmp = dev->driver->setup(&dev->gadget, &ctrl);
+	spin_lock (&dev->lock);
+	if (unlikely(tmp < 0)) {
+stall:
+#ifdef USB_TRACE
+		VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
+				ctrl.bRequestType, ctrl.bRequest, tmp);
+#endif
+		command(regs, COMMAND_STALL, 0);
+		dev->ep[0].stopped = 1;
+		dev->ep0state = EP0_STALL;
+	}
+
+	/* expect at least one data or status stage irq */
+}
+
+#define ACK(irqbit) { \
+		stat &= ~irqbit; \
+		writel(~irqbit, &regs->int_status); \
+		handled = 1; \
+		}
+
+static irqreturn_t goku_irq(int irq, void *_dev, struct pt_regs *r)
+{
+	struct goku_udc			*dev = _dev;
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	struct goku_ep			*ep;
+	u32				stat, handled = 0;
+	unsigned			i, rescans = 5;
+
+	spin_lock(&dev->lock);
+
+rescan:
+	stat = readl(&regs->int_status) & dev->int_enable;
+        if (!stat)
+		goto done;
+	dev->irqs++;
+
+	/* device-wide irqs */
+	if (unlikely(stat & INT_DEVWIDE)) {
+		if (stat & INT_SYSERROR) {
+			ERROR(dev, "system error\n");
+			stop_activity(dev, dev->driver);
+			stat = 0;
+			handled = 1;
+			// FIXME have a neater way to prevent re-enumeration
+			dev->driver = NULL;
+			goto done;
+		}
+		if (stat & INT_PWRDETECT) {
+			writel(~stat, &regs->int_status);
+			if (readl(&dev->regs->power_detect) & PW_DETECT) {
+				VDBG(dev, "connect\n");
+				ep0_start(dev);
+			} else {
+				DBG(dev, "disconnect\n");
+				if (dev->gadget.speed == USB_SPEED_FULL)
+					stop_activity(dev, dev->driver);
+				dev->ep0state = EP0_DISCONNECT;
+				dev->int_enable = INT_DEVWIDE;
+				writel(dev->int_enable, &dev->regs->int_enable);
+			}
+			stat = 0;
+			handled = 1;
+			goto done;
+		}
+		if (stat & INT_SUSPEND) {
+			ACK(INT_SUSPEND);
+			if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
+				switch (dev->ep0state) {
+				case EP0_DISCONNECT:
+				case EP0_SUSPEND:
+					goto pm_next;
+				default:
+					break;
+				}
+				DBG(dev, "USB suspend\n");
+				dev->ep0state = EP0_SUSPEND;
+				if (dev->gadget.speed != USB_SPEED_UNKNOWN
+						&& dev->driver
+						&& dev->driver->suspend) {
+					spin_unlock(&dev->lock);
+					dev->driver->suspend(&dev->gadget);
+					spin_lock(&dev->lock);
+				}
+			} else {
+				if (dev->ep0state != EP0_SUSPEND) {
+					DBG(dev, "bogus USB resume %d\n",
+						dev->ep0state);
+					goto pm_next;
+				}
+				DBG(dev, "USB resume\n");
+				dev->ep0state = EP0_IDLE;
+				if (dev->gadget.speed != USB_SPEED_UNKNOWN
+						&& dev->driver
+						&& dev->driver->resume) {
+					spin_unlock(&dev->lock);
+					dev->driver->resume(&dev->gadget);
+					spin_lock(&dev->lock);
+				}
+			}
+		}
+pm_next:
+		if (stat & INT_USBRESET) {		/* hub reset done */
+			ACK(INT_USBRESET);
+			INFO(dev, "USB reset done, gadget %s\n",
+				dev->driver->driver.name);
+		}
+		// and INT_ERR on some endpoint's crc/bitstuff/... problem
+	}
+
+	/* progress ep0 setup, data, or status stages.
+	 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
+	 */
+	if (stat & INT_SETUP) {
+		ACK(INT_SETUP);
+		dev->ep[0].irqs++;
+		ep0_setup(dev);
+	}
+        if (stat & INT_STATUSNAK) {
+		ACK(INT_STATUSNAK|INT_ENDPOINT0);
+		if (dev->ep0state == EP0_IN) {
+			ep = &dev->ep[0];
+			ep->irqs++;
+			nuke(ep, 0);
+			writel(~(1<<0), &regs->EOP);
+			dev->ep0state = EP0_STATUS;
+		}
+	}
+        if (stat & INT_ENDPOINT0) {
+		ACK(INT_ENDPOINT0);
+		ep = &dev->ep[0];
+		ep->irqs++;
+		pio_advance(ep);
+        }
+
+	/* dma completion */
+        if (stat & INT_MSTRDEND) {	/* IN */
+		ACK(INT_MSTRDEND);
+		ep = &dev->ep[UDC_MSTRD_ENDPOINT];
+		ep->irqs++;
+		dma_advance(dev, ep);
+        }
+        if (stat & INT_MSTWREND) {	/* OUT */
+		ACK(INT_MSTWREND);
+		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
+		ep->irqs++;
+		dma_advance(dev, ep);
+        }
+        if (stat & INT_MSTWRTMOUT) {	/* OUT */
+		ACK(INT_MSTWRTMOUT);
+		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
+		ep->irqs++;
+		ERROR(dev, "%s write timeout ?\n", ep->ep.name);
+		// reset dma? then dma_advance()
+        }
+
+	/* pio */
+	for (i = 1; i < 4; i++) {
+		u32		tmp = INT_EPxDATASET(i);
+
+		if (!(stat & tmp))
+			continue;
+		ep = &dev->ep[i];
+		pio_advance(ep);
+		if (list_empty (&ep->queue))
+			pio_irq_disable(dev, regs, i);
+		stat &= ~tmp;
+		handled = 1;
+		ep->irqs++;
+	}
+
+	if (rescans--)
+		goto rescan;
+
+done:
+	(void)readl(&regs->int_enable);
+	spin_unlock(&dev->lock);
+	if (stat)
+		DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
+				readl(&regs->int_status), dev->int_enable);
+	return IRQ_RETVAL(handled);
+}
+
+#undef ACK
+
+/*-------------------------------------------------------------------------*/
+
+static void gadget_release(struct device *_dev)
+{
+	struct goku_udc	*dev = dev_get_drvdata(_dev);
+
+	kfree(dev);
+}
+
+/* tear down the binding between this driver and the pci device */
+
+static void goku_remove(struct pci_dev *pdev)
+{
+	struct goku_udc		*dev = pci_get_drvdata(pdev);
+
+	DBG(dev, "%s\n", __FUNCTION__);
+	/* start with the driver above us */
+	if (dev->driver) {
+		/* should have been done already by driver model core */
+		WARN(dev, "pci remove, driver '%s' is still registered\n",
+				dev->driver->driver.name);
+		usb_gadget_unregister_driver(dev->driver);
+	}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	remove_proc_entry(proc_node_name, NULL);
+#endif
+	if (dev->regs)
+		udc_reset(dev);
+	if (dev->got_irq)
+		free_irq(pdev->irq, dev);
+	if (dev->regs)
+		iounmap(dev->regs);
+	if (dev->got_region)
+		release_mem_region(pci_resource_start (pdev, 0),
+				pci_resource_len (pdev, 0));
+	if (dev->enabled)
+		pci_disable_device(pdev);
+	device_unregister(&dev->gadget.dev);
+
+	pci_set_drvdata(pdev, NULL);
+	dev->regs = NULL;
+	the_controller = NULL;
+
+	INFO(dev, "unbind\n");
+}
+
+/* wrap this driver around the specified pci device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+
+static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct goku_udc		*dev = NULL;
+	unsigned long		resource, len;
+	void __iomem		*base = NULL;
+	int			retval;
+	char			buf [8], *bufp;
+
+	/* if you want to support more than one controller in a system,
+	 * usb_gadget_driver_{register,unregister}() must change.
+	 */
+	if (the_controller) {
+		WARN(dev, "ignoring %s\n", pci_name(pdev));
+		return -EBUSY;
+	}
+	if (!pdev->irq) {
+		printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
+		retval = -ENODEV;
+		goto done;
+	}
+
+	/* alloc, and start init */
+	dev = kmalloc (sizeof *dev, SLAB_KERNEL);
+	if (dev == NULL){
+		pr_debug("enomem %s\n", pci_name(pdev));
+		retval = -ENOMEM;
+		goto done;
+	}
+
+	memset(dev, 0, sizeof *dev);
+	spin_lock_init(&dev->lock);
+	dev->pdev = pdev;
+	dev->gadget.ops = &goku_ops;
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	strcpy(dev->gadget.dev.bus_id, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = driver_name;
+
+	/* now all the pci goodies ... */
+	retval = pci_enable_device(pdev);
+	if (retval < 0) {
+		DBG(dev, "can't enable, %d\n", retval);
+		goto done;
+	}
+	dev->enabled = 1;
+
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		DBG(dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto done;
+	}
+	dev->got_region = 1;
+
+	base = ioremap_nocache(resource, len);
+	if (base == NULL) {
+		DBG(dev, "can't map memory\n");
+		retval = -EFAULT;
+		goto done;
+	}
+	dev->regs = (struct goku_udc_regs __iomem *) base;
+
+	pci_set_drvdata(pdev, dev);
+	INFO(dev, "%s\n", driver_desc);
+	INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
+#ifndef __sparc__
+	scnprintf(buf, sizeof buf, "%d", pdev->irq);
+	bufp = buf;
+#else
+	bufp = __irq_itoa(pdev->irq);
+#endif
+	INFO(dev, "irq %s, pci mem %p\n", bufp, base);
+
+	/* init to known state, then setup irqs */
+	udc_reset(dev);
+	udc_reinit (dev);
+	if (request_irq(pdev->irq, goku_irq, SA_SHIRQ/*|SA_SAMPLE_RANDOM*/,
+			driver_name, dev) != 0) {
+		DBG(dev, "request interrupt %s failed\n", bufp);
+		retval = -EBUSY;
+		goto done;
+	}
+	dev->got_irq = 1;
+	if (use_dma)
+		pci_set_master(pdev);
+
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
+#endif
+
+	/* done */
+	the_controller = dev;
+	device_register(&dev->gadget.dev);
+
+	return 0;
+
+done:
+	if (dev)
+		goku_remove (pdev);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static struct pci_device_id pci_ids [] = { {
+	.class = 	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask = 	~0,
+	.vendor =	0x102f,		/* Toshiba */
+	.device =	0x0107,		/* this UDC */
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+
+}, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE (pci, pci_ids);
+
+static struct pci_driver goku_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	goku_probe,
+	.remove =	goku_remove,
+
+	/* FIXME add power management support */
+};
+
+static int __init init (void)
+{
+	return pci_register_driver (&goku_pci_driver);
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	pci_unregister_driver (&goku_pci_driver);
+}
+module_exit (cleanup);
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
new file mode 100644
index 0000000..ea8c8e5
--- /dev/null
+++ b/drivers/usb/gadget/goku_udc.h
@@ -0,0 +1,290 @@
+/*
+ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
+ *
+ * Copyright (C) 2000-2002 Lineo
+ *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
+ * Copyright (C) 2002 Toshiba Corporation
+ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * PCI BAR 0 points to these registers.
+ */
+struct goku_udc_regs {
+	/* irq management */
+	u32	int_status;		/* 0x000 */
+	u32	int_enable;
+#define INT_SUSPEND		0x00001		/* or resume */
+#define INT_USBRESET		0x00002
+#define INT_ENDPOINT0		0x00004
+#define INT_SETUP		0x00008
+#define INT_STATUS		0x00010
+#define INT_STATUSNAK		0x00020
+#define INT_EPxDATASET(n)	(0x00020 << (n))	/* 0 < n < 4 */
+#	define INT_EP1DATASET		0x00040
+#	define INT_EP2DATASET		0x00080
+#	define INT_EP3DATASET		0x00100
+#define INT_EPnNAK(n)		(0x00100 < (n))		/* 0 < n < 4 */
+#	define INT_EP1NAK		0x00200
+#	define INT_EP2NAK		0x00400
+#	define INT_EP3NAK		0x00800
+#define INT_SOF			0x01000
+#define INT_ERR			0x02000
+#define INT_MSTWRSET		0x04000
+#define INT_MSTWREND		0x08000
+#define INT_MSTWRTMOUT		0x10000
+#define INT_MSTRDEND		0x20000
+#define INT_SYSERROR		0x40000
+#define INT_PWRDETECT		0x80000
+
+#define	INT_DEVWIDE		(INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
+#define	INT_EP0 		(INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
+
+	u32	dma_master;
+#define MST_EOPB_DIS		0x0800
+#define MST_EOPB_ENA		0x0400
+#define MST_TIMEOUT_DIS		0x0200
+#define MST_TIMEOUT_ENA		0x0100
+#define MST_RD_EOPB		0x0080		/* write-only */
+#define MST_RD_RESET		0x0040
+#define MST_WR_RESET		0x0020
+#define MST_RD_ENA		0x0004		/* 1:start, 0:ignore */
+#define MST_WR_ENA		0x0002		/* 1:start, 0:ignore */
+#define MST_CONNECTION		0x0001		/* 0 for ep1out/ep2in */
+
+#define MST_R_BITS		(MST_EOPB_DIS|MST_EOPB_ENA \
+					|MST_RD_ENA|MST_RD_RESET)
+#define MST_W_BITS		(MST_TIMEOUT_DIS|MST_TIMEOUT_ENA \
+					|MST_WR_ENA|MST_WR_RESET)
+#define MST_RW_BITS		(MST_R_BITS|MST_W_BITS \
+					|MST_CONNECTION)
+
+/* these values assume (dma_master & MST_CONNECTION) == 0 */
+#define UDC_MSTWR_ENDPOINT        1
+#define UDC_MSTRD_ENDPOINT        2
+
+	/* dma master write */
+	u32	out_dma_start;
+	u32	out_dma_end;
+	u32	out_dma_current;
+
+	/* dma master read */
+	u32	in_dma_start;
+	u32	in_dma_end;
+	u32	in_dma_current;
+
+	u32	power_detect;
+#define PW_DETECT		0x04
+#define PW_RESETB		0x02
+#define PW_PULLUP		0x01
+
+	u8	_reserved0 [0x1d8];
+
+	/* endpoint registers */
+	u32	ep_fifo [4];		/* 0x200 */
+	u8	_reserved1 [0x10];
+	u32	ep_mode [4];		/* only 1-3 valid */
+	u8	_reserved2 [0x10];
+
+	u32	ep_status [4];
+#define EPxSTATUS_TOGGLE	0x40
+#define EPxSTATUS_SUSPEND	0x20
+#define EPxSTATUS_EP_MASK	(0x07<<2)
+#	define EPxSTATUS_EP_READY	(0<<2)
+#	define EPxSTATUS_EP_DATAIN	(1<<2)
+#	define EPxSTATUS_EP_FULL	(2<<2)
+#	define EPxSTATUS_EP_TX_ERR	(3<<2)
+#	define EPxSTATUS_EP_RX_ERR	(4<<2)
+#	define EPxSTATUS_EP_BUSY	(5<<2)
+#	define EPxSTATUS_EP_STALL	(6<<2)
+#	define EPxSTATUS_EP_INVALID	(7<<2)
+#define EPxSTATUS_FIFO_DISABLE	0x02
+#define EPxSTATUS_STAGE_ERROR	0x01
+
+	u8	_reserved3 [0x10];
+	u32	EPxSizeLA[4];
+#define PACKET_ACTIVE		(1<<7)
+#define DATASIZE		0x7f
+	u8	_reserved3a [0x10];
+	u32	EPxSizeLB[4];		/* only 1,2 valid */
+	u8	_reserved3b [0x10];
+	u32	EPxSizeHA[4];		/* only 1-3 valid */
+	u8	_reserved3c [0x10];
+	u32	EPxSizeHB[4];		/* only 1,2 valid */
+	u8	_reserved4[0x30];
+
+	/* SETUP packet contents */
+	u32	bRequestType;		/* 0x300 */
+	u32	bRequest;
+	u32	wValueL;
+	u32	wValueH;
+	u32	wIndexL;
+	u32	wIndexH;
+	u32	wLengthL;
+	u32	wLengthH;
+
+	/* command interaction/handshaking */
+	u32	SetupRecv;		/* 0x320 */
+	u32	CurrConfig;
+	u32	StdRequest;
+	u32	Request;
+	u32	DataSet;
+#define DATASET_A(epnum)	(1<<(2*(epnum)))
+#define DATASET_B(epnum)	(2<<(2*(epnum)))
+#define DATASET_AB(epnum)	(3<<(2*(epnum)))
+	u8	_reserved5[4];
+
+	u32	UsbState;
+#define USBSTATE_CONFIGURED	0x04
+#define USBSTATE_ADDRESSED	0x02
+#define USBSTATE_DEFAULT	0x01
+
+	u32	EOP;
+
+	u32	Command;		/* 0x340 */
+#define COMMAND_SETDATA0	2
+#define COMMAND_RESET		3
+#define COMMAND_STALL		4
+#define COMMAND_INVALID		5
+#define COMMAND_FIFO_DISABLE	7
+#define COMMAND_FIFO_ENABLE	8
+#define COMMAND_INIT_DESCRIPTOR	9
+#define COMMAND_FIFO_CLEAR	10	/* also stall */
+#define COMMAND_STALL_CLEAR	11
+#define COMMAND_EP(n)		((n) << 4)
+
+	u32	EPxSingle;
+	u8	_reserved6[4];
+	u32	EPxBCS;
+	u8	_reserved7[8];
+	u32	IntControl;
+#define ICONTROL_STATUSNAK	1
+	u8	_reserved8[4];
+
+	u32	reqmode;	// 0x360 standard request mode, low 8 bits
+#define G_REQMODE_SET_INTF	(1<<7)
+#define G_REQMODE_GET_INTF	(1<<6)
+#define G_REQMODE_SET_CONF	(1<<5)
+#define G_REQMODE_GET_CONF	(1<<4)
+#define G_REQMODE_GET_DESC	(1<<3)
+#define G_REQMODE_SET_FEAT	(1<<2)
+#define G_REQMODE_CLEAR_FEAT	(1<<1)
+#define G_REQMODE_GET_STATUS	(1<<0)
+
+	u32	ReqMode;
+	u8	_reserved9[0x18];
+	u32	PortStatus;		/* 0x380 */
+	u8	_reserved10[8];
+	u32	address;
+	u32	buff_test;
+	u8	_reserved11[4];
+	u32	UsbReady;
+	u8	_reserved12[4];
+	u32	SetDescStall;		/* 0x3a0 */
+	u8	_reserved13[0x45c];
+
+	/* hardware could handle limited GET_DESCRIPTOR duties */
+#define	DESC_LEN	0x80
+	u32	descriptors[DESC_LEN];	/* 0x800 */
+	u8	_reserved14[0x600];
+
+} __attribute__ ((packed));
+
+#define	MAX_FIFO_SIZE	64
+#define	MAX_EP0_SIZE	8		/* ep0 fifo is bigger, though */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+
+struct goku_ep {
+	struct usb_ep				ep;
+	struct goku_udc				*dev;
+	unsigned long				irqs;
+
+	unsigned				num:8,
+						dma:1,
+						is_in:1,
+						stopped:1;
+
+	/* analogous to a host-side qh */
+	struct list_head			queue;
+	const struct usb_endpoint_descriptor	*desc;
+
+	u32 __iomem				*reg_fifo;
+	u32 __iomem				*reg_mode;
+	u32 __iomem				*reg_status;
+};
+
+struct goku_request {
+	struct usb_request		req;
+	struct list_head		queue;
+
+	unsigned			mapped:1;
+};
+
+enum ep0state {
+	EP0_DISCONNECT,		/* no host */
+	EP0_IDLE,		/* between STATUS ack and SETUP report */
+	EP0_IN, EP0_OUT, 	/* data stage */
+	EP0_STATUS,		/* status stage */
+	EP0_STALL,		/* data or status stages */
+	EP0_SUSPEND,		/* usb suspend */
+};
+
+struct goku_udc {
+	/* each pci device provides one gadget, several endpoints */
+	struct usb_gadget		gadget;
+	spinlock_t			lock;
+	struct goku_ep			ep[4];
+	struct usb_gadget_driver 	*driver;
+
+	enum ep0state			ep0state;
+	unsigned			got_irq:1,
+					got_region:1,
+					req_config:1,
+					configured:1,
+					enabled:1;
+
+	/* pci state used to access those endpoints */
+	struct pci_dev			*pdev;
+	struct goku_udc_regs __iomem	*regs;
+	u32				int_enable;
+
+	/* statistics... */
+	unsigned long			irqs;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(dev,level,fmt,args...) \
+	printk(level "%s %s: " fmt , driver_name , \
+			pci_name(dev->pdev) , ## args)
+
+#ifdef DEBUG
+#define DBG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(dev,fmt,args...) \
+	do { } while (0)
+#endif	/* VERBOSE */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARN(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
new file mode 100644
index 0000000..2cff67c
--- /dev/null
+++ b/drivers/usb/gadget/inode.c
@@ -0,0 +1,2110 @@
+/*
+ * inode.c -- user mode filesystem api for usb gadget controllers
+ *
+ * Copyright (C) 2003-2004 David Brownell
+ * Copyright (C) 2003 Agilent Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+// #define	DEBUG 			/* data to help fault diagnosis */
+// #define	VERBOSE		/* extra debug messages (success too) */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/uts.h>
+#include <linux/wait.h>
+#include <linux/compiler.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+
+#include <linux/usb_gadgetfs.h>
+#include <linux/usb_gadget.h>
+
+
+/*
+ * The gadgetfs API maps each endpoint to a file descriptor so that you
+ * can use standard synchronous read/write calls for I/O.  There's some
+ * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
+ * drivers show how this works in practice.  You can also use AIO to
+ * eliminate I/O gaps between requests, to help when streaming data.
+ *
+ * Key parts that must be USB-specific are protocols defining how the
+ * read/write operations relate to the hardware state machines.  There
+ * are two types of files.  One type is for the device, implementing ep0.
+ * The other type is for each IN or OUT endpoint.  In both cases, the
+ * user mode driver must configure the hardware before using it.
+ *
+ * - First, dev_config() is called when /dev/gadget/$CHIP is configured
+ *   (by writing configuration and device descriptors).  Afterwards it
+ *   may serve as a source of device events, used to handle all control
+ *   requests other than basic enumeration.
+ *
+ * - Then either immediately, or after a SET_CONFIGURATION control request,
+ *   ep_config() is called when each /dev/gadget/ep* file is configured
+ *   (by writing endpoint descriptors).  Afterwards these files are used
+ *   to write() IN data or to read() OUT data.  To halt the endpoint, a
+ *   "wrong direction" request is issued (like reading an IN endpoint).
+ *
+ * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
+ * not possible on all hardware.  For example, precise fault handling with
+ * respect to data left in endpoint fifos after aborted operations; or
+ * selective clearing of endpoint halts, to implement SET_INTERFACE.
+ */
+
+#define	DRIVER_DESC	"USB Gadget filesystem"
+#define	DRIVER_VERSION	"24 Aug 2004"
+
+static const char driver_desc [] = DRIVER_DESC;
+static const char shortname [] = "gadgetfs";
+
+MODULE_DESCRIPTION (DRIVER_DESC);
+MODULE_AUTHOR ("David Brownell");
+MODULE_LICENSE ("GPL");
+
+
+/*----------------------------------------------------------------------*/
+
+#define GADGETFS_MAGIC		0xaee71ee7
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+/* /dev/gadget/$CHIP represents ep0 and the whole device */
+enum ep0_state {
+	/* DISBLED is the initial state.
+	 */
+	STATE_DEV_DISABLED = 0,
+
+	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
+	 * ep0/device i/o modes and binding to the controller.  Driver
+	 * must always write descriptors to initialize the device, then
+	 * the device becomes UNCONNECTED until enumeration.
+	 */
+	STATE_OPENED,
+
+	/* From then on, ep0 fd is in either of two basic modes:
+	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
+	 * - SETUP: read/write will transfer control data and succeed;
+	 *   or if "wrong direction", performs protocol stall
+	 */
+	STATE_UNCONNECTED,
+	STATE_CONNECTED,
+	STATE_SETUP,
+
+	/* UNBOUND means the driver closed ep0, so the device won't be
+	 * accessible again (DEV_DISABLED) until all fds are closed.
+	 */
+	STATE_DEV_UNBOUND,
+};
+
+/* enough for the whole queue: most events invalidate others */
+#define	N_EVENT			5
+
+struct dev_data {
+	spinlock_t			lock;
+	atomic_t			count;
+	enum ep0_state			state;
+	struct usb_gadgetfs_event	event [N_EVENT];
+	unsigned			ev_next;
+	struct fasync_struct		*fasync;
+	u8				current_config;
+
+	/* drivers reading ep0 MUST handle control requests (SETUP)
+	 * reported that way; else the host will time out.
+	 */
+	unsigned			usermode_setup : 1,
+					setup_in : 1,
+					setup_can_stall : 1,
+					setup_out_ready : 1,
+					setup_out_error : 1,
+					setup_abort : 1;
+
+	/* the rest is basically write-once */
+	struct usb_config_descriptor	*config, *hs_config;
+	struct usb_device_descriptor	*dev;
+	struct usb_request		*req;
+	struct usb_gadget		*gadget;
+	struct list_head		epfiles;
+	void				*buf;
+	wait_queue_head_t		wait;
+	struct super_block		*sb;
+	struct dentry			*dentry;
+
+	/* except this scratch i/o buffer for ep0 */
+	u8				rbuf [256];
+};
+
+static inline void get_dev (struct dev_data *data)
+{
+	atomic_inc (&data->count);
+}
+
+static void put_dev (struct dev_data *data)
+{
+	if (likely (!atomic_dec_and_test (&data->count)))
+		return;
+	/* needs no more cleanup */
+	BUG_ON (waitqueue_active (&data->wait));
+	kfree (data);
+}
+
+static struct dev_data *dev_new (void)
+{
+	struct dev_data		*dev;
+
+	dev = kmalloc (sizeof *dev, GFP_KERNEL);
+	if (!dev)
+		return NULL;
+	memset (dev, 0, sizeof *dev);
+	dev->state = STATE_DEV_DISABLED;
+	atomic_set (&dev->count, 1);
+	spin_lock_init (&dev->lock);
+	INIT_LIST_HEAD (&dev->epfiles);
+	init_waitqueue_head (&dev->wait);
+	return dev;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* other /dev/gadget/$ENDPOINT files represent endpoints */
+enum ep_state {
+	STATE_EP_DISABLED = 0,
+	STATE_EP_READY,
+	STATE_EP_DEFER_ENABLE,
+	STATE_EP_ENABLED,
+	STATE_EP_UNBOUND,
+};
+
+struct ep_data {
+	struct semaphore		lock;
+	enum ep_state			state;
+	atomic_t			count;
+	struct dev_data			*dev;
+	/* must hold dev->lock before accessing ep or req */
+	struct usb_ep			*ep;
+	struct usb_request		*req;
+	ssize_t				status;
+	char				name [16];
+	struct usb_endpoint_descriptor	desc, hs_desc;
+	struct list_head		epfiles;
+	wait_queue_head_t		wait;
+	struct dentry			*dentry;
+	struct inode			*inode;
+};
+
+static inline void get_ep (struct ep_data *data)
+{
+	atomic_inc (&data->count);
+}
+
+static void put_ep (struct ep_data *data)
+{
+	if (likely (!atomic_dec_and_test (&data->count)))
+		return;
+	put_dev (data->dev);
+	/* needs no more cleanup */
+	BUG_ON (!list_empty (&data->epfiles));
+	BUG_ON (waitqueue_active (&data->wait));
+	BUG_ON (down_trylock (&data->lock) != 0);
+	kfree (data);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* most "how to use the hardware" policy choices are in userspace:
+ * mapping endpoint roles (which the driver needs) to the capabilities
+ * which the usb controller has.  most of those capabilities are exposed
+ * implicitly, starting with the driver name and then endpoint names.
+ */
+
+static const char *CHIP;
+
+/*----------------------------------------------------------------------*/
+
+/* NOTE:  don't use dev_printk calls before binding to the gadget
+ * at the end of ep0 configuration, or after unbind.
+ */
+
+/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
+#define xprintk(d,level,fmt,args...) \
+	printk(level "%s: " fmt , shortname , ## args)
+
+#ifdef DEBUG
+#define DBG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDEBUG	DBG
+#else
+#define VDEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARN(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+
+/*----------------------------------------------------------------------*/
+
+/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
+ *
+ * After opening, configure non-control endpoints.  Then use normal
+ * stream read() and write() requests; and maybe ioctl() to get more
+ * precise FIFO status when recovering from cancelation.
+ */
+
+static void epio_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct ep_data	*epdata = ep->driver_data;
+
+	if (!req->context)
+		return;
+	if (req->status)
+		epdata->status = req->status;
+	else
+		epdata->status = req->actual;
+	complete ((struct completion *)req->context);
+}
+
+/* tasklock endpoint, returning when it's connected.
+ * still need dev->lock to use epdata->ep.
+ */
+static int
+get_ready_ep (unsigned f_flags, struct ep_data *epdata)
+{
+	int	val;
+
+	if (f_flags & O_NONBLOCK) {
+		if (down_trylock (&epdata->lock) != 0)
+			goto nonblock;
+		if (epdata->state != STATE_EP_ENABLED) {
+			up (&epdata->lock);
+nonblock:
+			val = -EAGAIN;
+		} else
+			val = 0;
+		return val;
+	}
+
+	if ((val = down_interruptible (&epdata->lock)) < 0)
+		return val;
+newstate:
+	switch (epdata->state) {
+	case STATE_EP_ENABLED:
+		break;
+	case STATE_EP_DEFER_ENABLE:
+		DBG (epdata->dev, "%s wait for host\n", epdata->name);
+		if ((val = wait_event_interruptible (epdata->wait, 
+				epdata->state != STATE_EP_DEFER_ENABLE
+				|| epdata->dev->state == STATE_DEV_UNBOUND
+				)) < 0)
+			goto fail;
+		goto newstate;
+	// case STATE_EP_DISABLED:		/* "can't happen" */
+	// case STATE_EP_READY:			/* "can't happen" */
+	default:				/* error! */
+		pr_debug ("%s: ep %p not available, state %d\n",
+				shortname, epdata, epdata->state);
+		// FALLTHROUGH
+	case STATE_EP_UNBOUND:			/* clean disconnect */
+		val = -ENODEV;
+fail:
+		up (&epdata->lock);
+	}
+	return val;
+}
+
+static ssize_t
+ep_io (struct ep_data *epdata, void *buf, unsigned len)
+{
+	DECLARE_COMPLETION (done);
+	int value;
+
+	spin_lock_irq (&epdata->dev->lock);
+	if (likely (epdata->ep != NULL)) {
+		struct usb_request	*req = epdata->req;
+
+		req->context = &done;
+		req->complete = epio_complete;
+		req->buf = buf;
+		req->length = len;
+		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
+	} else
+		value = -ENODEV;
+	spin_unlock_irq (&epdata->dev->lock);
+
+	if (likely (value == 0)) {
+		value = wait_event_interruptible (done.wait, done.done);
+		if (value != 0) {
+			spin_lock_irq (&epdata->dev->lock);
+			if (likely (epdata->ep != NULL)) {
+				DBG (epdata->dev, "%s i/o interrupted\n",
+						epdata->name);
+				usb_ep_dequeue (epdata->ep, epdata->req);
+				spin_unlock_irq (&epdata->dev->lock);
+
+				wait_event (done.wait, done.done);
+				if (epdata->status == -ECONNRESET)
+					epdata->status = -EINTR;
+			} else {
+				spin_unlock_irq (&epdata->dev->lock);
+
+				DBG (epdata->dev, "endpoint gone\n");
+				epdata->status = -ENODEV;
+			}
+		}
+		return epdata->status;
+	}
+	return value;
+}
+
+
+/* handle a synchronous OUT bulk/intr/iso transfer */
+static ssize_t
+ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+{
+	struct ep_data		*data = fd->private_data;
+	void			*kbuf;
+	ssize_t			value;
+
+	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
+		return value;
+
+	/* halt any endpoint by doing a "wrong direction" i/o call */
+	if (data->desc.bEndpointAddress & USB_DIR_IN) {
+		if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				== USB_ENDPOINT_XFER_ISOC)
+			return -EINVAL;
+		DBG (data->dev, "%s halt\n", data->name);
+		spin_lock_irq (&data->dev->lock);
+		if (likely (data->ep != NULL))
+			usb_ep_set_halt (data->ep);
+		spin_unlock_irq (&data->dev->lock);
+		up (&data->lock);
+		return -EBADMSG;
+	}
+
+	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
+
+	value = -ENOMEM;
+	kbuf = kmalloc (len, SLAB_KERNEL);
+	if (unlikely (!kbuf))
+		goto free1;
+
+	value = ep_io (data, kbuf, len);
+	VDEBUG (data->dev, "%s read %d OUT, status %d\n",
+		data->name, len, value);
+	if (value >= 0 && copy_to_user (buf, kbuf, value))
+		value = -EFAULT;
+
+free1:
+	up (&data->lock);
+	kfree (kbuf);
+	return value;
+}
+
+/* handle a synchronous IN bulk/intr/iso transfer */
+static ssize_t
+ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct ep_data		*data = fd->private_data;
+	void			*kbuf;
+	ssize_t			value;
+
+	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
+		return value;
+
+	/* halt any endpoint by doing a "wrong direction" i/o call */
+	if (!(data->desc.bEndpointAddress & USB_DIR_IN)) {
+		if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				== USB_ENDPOINT_XFER_ISOC)
+			return -EINVAL;
+		DBG (data->dev, "%s halt\n", data->name);
+		spin_lock_irq (&data->dev->lock);
+		if (likely (data->ep != NULL))
+			usb_ep_set_halt (data->ep);
+		spin_unlock_irq (&data->dev->lock);
+		up (&data->lock);
+		return -EBADMSG;
+	}
+
+	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
+
+	value = -ENOMEM;
+	kbuf = kmalloc (len, SLAB_KERNEL);
+	if (!kbuf)
+		goto free1;
+	if (copy_from_user (kbuf, buf, len)) {
+		value = -EFAULT;
+		goto free1;
+	}
+
+	value = ep_io (data, kbuf, len);
+	VDEBUG (data->dev, "%s write %d IN, status %d\n",
+		data->name, len, value);
+free1:
+	up (&data->lock);
+	kfree (kbuf);
+	return value;
+}
+
+static int
+ep_release (struct inode *inode, struct file *fd)
+{
+	struct ep_data		*data = fd->private_data;
+
+	/* clean up if this can be reopened */
+	if (data->state != STATE_EP_UNBOUND) {
+		data->state = STATE_EP_DISABLED;
+		data->desc.bDescriptorType = 0;
+		data->hs_desc.bDescriptorType = 0;
+	}
+	put_ep (data);
+	return 0;
+}
+
+static int ep_ioctl (struct inode *inode, struct file *fd,
+		unsigned code, unsigned long value)
+{
+	struct ep_data		*data = fd->private_data;
+	int			status;
+
+	if ((status = get_ready_ep (fd->f_flags, data)) < 0)
+		return status;
+
+	spin_lock_irq (&data->dev->lock);
+	if (likely (data->ep != NULL)) {
+		switch (code) {
+		case GADGETFS_FIFO_STATUS:
+			status = usb_ep_fifo_status (data->ep);
+			break;
+		case GADGETFS_FIFO_FLUSH:
+			usb_ep_fifo_flush (data->ep);
+			break;
+		case GADGETFS_CLEAR_HALT:
+			status = usb_ep_clear_halt (data->ep);
+			break;
+		default:
+			status = -ENOTTY;
+		}
+	} else
+		status = -ENODEV;
+	spin_unlock_irq (&data->dev->lock);
+	up (&data->lock);
+	return status;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
+
+struct kiocb_priv {
+	struct usb_request	*req;
+	struct ep_data		*epdata;
+	void			*buf;
+	char __user		*ubuf;
+	unsigned		actual;
+};
+
+static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
+{
+	struct kiocb_priv	*priv = iocb->private;
+	struct ep_data		*epdata;
+	int			value;
+
+	local_irq_disable();
+	epdata = priv->epdata;
+	// spin_lock(&epdata->dev->lock);
+	kiocbSetCancelled(iocb);
+	if (likely(epdata && epdata->ep && priv->req))
+		value = usb_ep_dequeue (epdata->ep, priv->req);
+	else
+		value = -EINVAL;
+	// spin_unlock(&epdata->dev->lock);
+	local_irq_enable();
+
+	aio_put_req(iocb);
+	return value;
+}
+
+static ssize_t ep_aio_read_retry(struct kiocb *iocb)
+{
+	struct kiocb_priv	*priv = iocb->private;
+	ssize_t			status = priv->actual;
+
+	/* we "retry" to get the right mm context for this: */
+	status = copy_to_user(priv->ubuf, priv->buf, priv->actual);
+	if (unlikely(0 != status))
+		status = -EFAULT;
+	else
+		status = priv->actual;
+	kfree(priv->buf);
+	kfree(priv);
+	aio_put_req(iocb);
+	return status;
+}
+
+static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct kiocb		*iocb = req->context;
+	struct kiocb_priv	*priv = iocb->private;
+	struct ep_data		*epdata = priv->epdata;
+
+	/* lock against disconnect (and ideally, cancel) */
+	spin_lock(&epdata->dev->lock);
+	priv->req = NULL;
+	priv->epdata = NULL;
+	if (NULL == iocb->ki_retry
+			|| unlikely(0 == req->actual)
+			|| unlikely(kiocbIsCancelled(iocb))) {
+		kfree(req->buf);
+		kfree(priv);
+		iocb->private = NULL;
+		/* aio_complete() reports bytes-transferred _and_ faults */
+		if (unlikely(kiocbIsCancelled(iocb)))
+			aio_put_req(iocb);
+		else
+			aio_complete(iocb,
+				req->actual ? req->actual : req->status,
+				req->status);
+	} else {
+		/* retry() won't report both; so we hide some faults */
+		if (unlikely(0 != req->status))
+			DBG(epdata->dev, "%s fault %d len %d\n",
+				ep->name, req->status, req->actual);
+
+		priv->buf = req->buf;
+		priv->actual = req->actual;
+		kick_iocb(iocb);
+	}
+	spin_unlock(&epdata->dev->lock);
+
+	usb_ep_free_request(ep, req);
+	put_ep(epdata);
+}
+
+static ssize_t
+ep_aio_rwtail(
+	struct kiocb	*iocb,
+	char		*buf,
+	size_t		len,
+	struct ep_data	*epdata,
+	char __user	*ubuf
+)
+{
+	struct kiocb_priv	*priv = (void *) &iocb->private;
+	struct usb_request	*req;
+	ssize_t			value;
+
+	priv = kmalloc(sizeof *priv, GFP_KERNEL);
+	if (!priv) {
+		value = -ENOMEM;
+fail:
+		kfree(buf);
+		return value;
+	}
+	iocb->private = priv;
+	priv->ubuf = ubuf;
+
+	value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
+	if (unlikely(value < 0)) {
+		kfree(priv);
+		goto fail;
+	}
+
+	iocb->ki_cancel = ep_aio_cancel;
+	get_ep(epdata);
+	priv->epdata = epdata;
+	priv->actual = 0;
+
+	/* each kiocb is coupled to one usb_request, but we can't
+	 * allocate or submit those if the host disconnected.
+	 */
+	spin_lock_irq(&epdata->dev->lock);
+	if (likely(epdata->ep)) {
+		req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
+		if (likely(req)) {
+			priv->req = req;
+			req->buf = buf;
+			req->length = len;
+			req->complete = ep_aio_complete;
+			req->context = iocb;
+			value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
+			if (unlikely(0 != value))
+				usb_ep_free_request(epdata->ep, req);
+		} else
+			value = -EAGAIN;
+	} else
+		value = -ENODEV;
+	spin_unlock_irq(&epdata->dev->lock);
+
+	up(&epdata->lock);
+
+	if (unlikely(value)) {
+		kfree(priv);
+		put_ep(epdata);
+	} else
+		value = -EIOCBQUEUED;
+	return value;
+}
+
+static ssize_t
+ep_aio_read(struct kiocb *iocb, char __user *ubuf, size_t len, loff_t o)
+{
+	struct ep_data		*epdata = iocb->ki_filp->private_data;
+	char			*buf;
+
+	if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN))
+		return -EINVAL;
+	buf = kmalloc(len, GFP_KERNEL);
+	if (unlikely(!buf))
+		return -ENOMEM;
+	iocb->ki_retry = ep_aio_read_retry;
+	return ep_aio_rwtail(iocb, buf, len, epdata, ubuf);
+}
+
+static ssize_t
+ep_aio_write(struct kiocb *iocb, const char __user *ubuf, size_t len, loff_t o)
+{
+	struct ep_data		*epdata = iocb->ki_filp->private_data;
+	char			*buf;
+
+	if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN)))
+		return -EINVAL;
+	buf = kmalloc(len, GFP_KERNEL);
+	if (unlikely(!buf))
+		return -ENOMEM;
+	if (unlikely(copy_from_user(buf, ubuf, len) != 0)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+	return ep_aio_rwtail(iocb, buf, len, epdata, NULL);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* used after endpoint configuration */
+static struct file_operations ep_io_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.read =		ep_read,
+	.write =	ep_write,
+	.ioctl =	ep_ioctl,
+	.release =	ep_release,
+
+	.aio_read =	ep_aio_read,
+	.aio_write =	ep_aio_write,
+};
+
+/* ENDPOINT INITIALIZATION
+ *
+ *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
+ *     status = write (fd, descriptors, sizeof descriptors)
+ *
+ * That write establishes the endpoint configuration, configuring
+ * the controller to process bulk, interrupt, or isochronous transfers
+ * at the right maxpacket size, and so on.
+ *
+ * The descriptors are message type 1, identified by a host order u32
+ * at the beginning of what's written.  Descriptor order is: full/low
+ * speed descriptor, then optional high speed descriptor.
+ */
+static ssize_t
+ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct ep_data		*data = fd->private_data;
+	struct usb_ep		*ep;
+	u32			tag;
+	int			value;
+
+	if ((value = down_interruptible (&data->lock)) < 0)
+		return value;
+
+	if (data->state != STATE_EP_READY) {
+		value = -EL2HLT;
+		goto fail;
+	}
+
+	value = len;
+	if (len < USB_DT_ENDPOINT_SIZE + 4)
+		goto fail0;
+
+	/* we might need to change message format someday */
+	if (copy_from_user (&tag, buf, 4)) {
+		goto fail1;
+	}
+	if (tag != 1) {
+		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
+		goto fail0;
+	}
+	buf += 4;
+	len -= 4;
+
+	/* NOTE:  audio endpoint extensions not accepted here;
+	 * just don't include the extra bytes.
+	 */
+
+	/* full/low speed descriptor, then high speed */
+	if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
+		goto fail1;
+	}
+	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
+			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
+		goto fail0;
+	if (len != USB_DT_ENDPOINT_SIZE) {
+		if (len != 2 * USB_DT_ENDPOINT_SIZE)
+			goto fail0;
+		if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
+					USB_DT_ENDPOINT_SIZE)) {
+			goto fail1;
+		}
+		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
+				|| data->hs_desc.bDescriptorType
+					!= USB_DT_ENDPOINT) {
+			DBG(data->dev, "config %s, bad hs length or type\n",
+					data->name);
+			goto fail0;
+		}
+	}
+	value = len;
+
+	spin_lock_irq (&data->dev->lock);
+	if (data->dev->state == STATE_DEV_UNBOUND) {
+		value = -ENOENT;
+		goto gone;
+	} else if ((ep = data->ep) == NULL) {
+		value = -ENODEV;
+		goto gone;
+	}
+	switch (data->dev->gadget->speed) {
+	case USB_SPEED_LOW:
+	case USB_SPEED_FULL:
+		value = usb_ep_enable (ep, &data->desc);
+		if (value == 0)
+			data->state = STATE_EP_ENABLED;
+		break;
+#ifdef	HIGHSPEED
+	case USB_SPEED_HIGH:
+		/* fails if caller didn't provide that descriptor... */
+		value = usb_ep_enable (ep, &data->hs_desc);
+		if (value == 0)
+			data->state = STATE_EP_ENABLED;
+		break;
+#endif
+	default:
+		DBG (data->dev, "unconnected, %s init deferred\n",
+				data->name);
+		data->state = STATE_EP_DEFER_ENABLE;
+	}
+	if (value == 0)
+		fd->f_op = &ep_io_operations;
+gone:
+	spin_unlock_irq (&data->dev->lock);
+	if (value < 0) {
+fail:
+		data->desc.bDescriptorType = 0;
+		data->hs_desc.bDescriptorType = 0;
+	}
+	up (&data->lock);
+	return value;
+fail0:
+	value = -EINVAL;
+	goto fail;
+fail1:
+	value = -EFAULT;
+	goto fail;
+}
+
+static int
+ep_open (struct inode *inode, struct file *fd)
+{
+	struct ep_data		*data = inode->u.generic_ip;
+	int			value = -EBUSY;
+
+	if (down_interruptible (&data->lock) != 0)
+		return -EINTR;
+	spin_lock_irq (&data->dev->lock);
+	if (data->dev->state == STATE_DEV_UNBOUND)
+		value = -ENOENT;
+	else if (data->state == STATE_EP_DISABLED) {
+		value = 0;
+		data->state = STATE_EP_READY;
+		get_ep (data);
+		fd->private_data = data;
+		VDEBUG (data->dev, "%s ready\n", data->name);
+	} else
+		DBG (data->dev, "%s state %d\n",
+			data->name, data->state);
+	spin_unlock_irq (&data->dev->lock);
+	up (&data->lock);
+	return value;
+}
+
+/* used before endpoint configuration */
+static struct file_operations ep_config_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		ep_open,
+	.write =	ep_config,
+	.release =	ep_release,
+};
+
+/*----------------------------------------------------------------------*/
+
+/* EP0 IMPLEMENTATION can be partly in userspace.
+ *
+ * Drivers that use this facility receive various events, including
+ * control requests the kernel doesn't handle.  Drivers that don't
+ * use this facility may be too simple-minded for real applications.
+ */
+
+static inline void ep0_readable (struct dev_data *dev)
+{
+	wake_up (&dev->wait);
+	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
+}
+
+static void clean_req (struct usb_ep *ep, struct usb_request *req)
+{
+	struct dev_data		*dev = ep->driver_data;
+
+	if (req->buf != dev->rbuf) {
+		usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
+		req->buf = dev->rbuf;
+		req->dma = DMA_ADDR_INVALID;
+	}
+	req->complete = epio_complete;
+	dev->setup_out_ready = 0;
+}
+
+static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct dev_data		*dev = ep->driver_data;
+	int			free = 1;
+
+	/* for control OUT, data must still get to userspace */
+	if (!dev->setup_in) {
+		dev->setup_out_error = (req->status != 0);
+		if (!dev->setup_out_error)
+			free = 0;
+		dev->setup_out_ready = 1;
+		ep0_readable (dev);
+	} else if (dev->state == STATE_SETUP)
+		dev->state = STATE_CONNECTED;
+
+	/* clean up as appropriate */
+	if (free && req->buf != &dev->rbuf)
+		clean_req (ep, req);
+	req->complete = epio_complete;
+}
+
+static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
+{
+	struct dev_data	*dev = ep->driver_data;
+
+	if (dev->setup_out_ready) {
+		DBG (dev, "ep0 request busy!\n");
+		return -EBUSY;
+	}
+	if (len > sizeof (dev->rbuf))
+		req->buf = usb_ep_alloc_buffer (ep, len, &req->dma, GFP_ATOMIC);
+	if (req->buf == 0) {
+		req->buf = dev->rbuf;
+		return -ENOMEM;
+	}
+	req->complete = ep0_complete;
+	req->length = len;
+	return 0;
+}
+
+static ssize_t
+ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+{
+	struct dev_data			*dev = fd->private_data;
+	ssize_t				retval;
+	enum ep0_state			state;
+
+	spin_lock_irq (&dev->lock);
+
+	/* report fd mode change before acting on it */
+	if (dev->setup_abort) {
+		dev->setup_abort = 0;
+		retval = -EIDRM;
+		goto done;
+	}
+
+	/* control DATA stage */
+	if ((state = dev->state) == STATE_SETUP) {
+
+		if (dev->setup_in) {		/* stall IN */
+			VDEBUG(dev, "ep0in stall\n");
+			(void) usb_ep_set_halt (dev->gadget->ep0);
+			retval = -EL2HLT;
+			dev->state = STATE_CONNECTED;
+
+		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
+			struct usb_ep		*ep = dev->gadget->ep0;
+			struct usb_request	*req = dev->req;
+
+			if ((retval = setup_req (ep, req, 0)) == 0)
+				retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+			dev->state = STATE_CONNECTED;
+
+			/* assume that was SET_CONFIGURATION */
+			if (dev->current_config) {
+				unsigned power;
+#ifdef	HIGHSPEED
+				if (dev->gadget->speed == USB_SPEED_HIGH)
+					power = dev->hs_config->bMaxPower;
+				else
+#endif
+					power = dev->config->bMaxPower;
+				usb_gadget_vbus_draw(dev->gadget, 2 * power);
+			}
+
+		} else {			/* collect OUT data */
+			if ((fd->f_flags & O_NONBLOCK) != 0
+					&& !dev->setup_out_ready) {
+				retval = -EAGAIN;
+				goto done;
+			}
+			spin_unlock_irq (&dev->lock);
+			retval = wait_event_interruptible (dev->wait,
+					dev->setup_out_ready != 0);
+
+			/* FIXME state could change from under us */
+			spin_lock_irq (&dev->lock);
+			if (retval)
+				goto done;
+			if (dev->setup_out_error)
+				retval = -EIO;
+			else {
+				len = min (len, (size_t)dev->req->actual);
+// FIXME don't call this with the spinlock held ...
+				if (copy_to_user (buf, &dev->req->buf, len))
+					retval = -EFAULT;
+				clean_req (dev->gadget->ep0, dev->req);
+				/* NOTE userspace can't yet choose to stall */
+			}
+		}
+		goto done;
+	}
+
+	/* else normal: return event data */
+	if (len < sizeof dev->event [0]) {
+		retval = -EINVAL;
+		goto done;
+	}
+	len -= len % sizeof (struct usb_gadgetfs_event);
+	dev->usermode_setup = 1;
+
+scan:
+	/* return queued events right away */
+	if (dev->ev_next != 0) {
+		unsigned		i, n;
+		int			tmp = dev->ev_next;
+
+		len = min (len, tmp * sizeof (struct usb_gadgetfs_event));
+		n = len / sizeof (struct usb_gadgetfs_event);
+
+		/* ep0 can't deliver events when STATE_SETUP */
+		for (i = 0; i < n; i++) {
+			if (dev->event [i].type == GADGETFS_SETUP) {
+				len = n = i + 1;
+				len *= sizeof (struct usb_gadgetfs_event);
+				n = 0;
+				break;
+			}
+		}
+		spin_unlock_irq (&dev->lock);
+		if (copy_to_user (buf, &dev->event, len))
+			retval = -EFAULT;
+		else
+			retval = len;
+		if (len > 0) {
+			len /= sizeof (struct usb_gadgetfs_event);
+
+			/* NOTE this doesn't guard against broken drivers;
+			 * concurrent ep0 readers may lose events.
+			 */
+			spin_lock_irq (&dev->lock);
+			dev->ev_next -= len;
+			if (dev->ev_next != 0)
+				memmove (&dev->event, &dev->event [len],
+					sizeof (struct usb_gadgetfs_event)
+						* (tmp - len));
+			if (n == 0)
+				dev->state = STATE_SETUP;
+			spin_unlock_irq (&dev->lock);
+		}
+		return retval;
+	}
+	if (fd->f_flags & O_NONBLOCK) {
+		retval = -EAGAIN;
+		goto done;
+	}
+
+	switch (state) {
+	default:
+		DBG (dev, "fail %s, state %d\n", __FUNCTION__, state);
+		retval = -ESRCH;
+		break;
+	case STATE_UNCONNECTED:
+	case STATE_CONNECTED:
+		spin_unlock_irq (&dev->lock);
+		DBG (dev, "%s wait\n", __FUNCTION__);
+
+		/* wait for events */
+		retval = wait_event_interruptible (dev->wait,
+				dev->ev_next != 0);
+		if (retval < 0)
+			return retval;
+		spin_lock_irq (&dev->lock);
+		goto scan;
+	}
+
+done:
+	spin_unlock_irq (&dev->lock);
+	return retval;
+}
+
+static struct usb_gadgetfs_event *
+next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
+{
+	struct usb_gadgetfs_event	*event;
+	unsigned			i;
+
+	switch (type) {
+	/* these events purge the queue */
+	case GADGETFS_DISCONNECT:
+		if (dev->state == STATE_SETUP)
+			dev->setup_abort = 1;
+		// FALL THROUGH
+	case GADGETFS_CONNECT:
+		dev->ev_next = 0;
+		break;
+	case GADGETFS_SETUP:		/* previous request timed out */
+	case GADGETFS_SUSPEND:		/* same effect */
+		/* these events can't be repeated */
+		for (i = 0; i != dev->ev_next; i++) {
+			if (dev->event [i].type != type)
+				continue;
+			DBG (dev, "discard old event %d\n", type);
+			dev->ev_next--;
+			if (i == dev->ev_next)
+				break;
+			/* indices start at zero, for simplicity */
+			memmove (&dev->event [i], &dev->event [i + 1],
+				sizeof (struct usb_gadgetfs_event)
+					* (dev->ev_next - i));
+		}
+		break;
+	default:
+		BUG ();
+	}
+	event = &dev->event [dev->ev_next++];
+	BUG_ON (dev->ev_next > N_EVENT);
+	VDEBUG (dev, "ev %d, next %d\n", type, dev->ev_next);
+	memset (event, 0, sizeof *event);
+	event->type = type;
+	return event;
+}
+
+static ssize_t
+ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct dev_data		*dev = fd->private_data;
+	ssize_t			retval = -ESRCH;
+
+	spin_lock_irq (&dev->lock);
+
+	/* report fd mode change before acting on it */
+	if (dev->setup_abort) {
+		dev->setup_abort = 0;
+		retval = -EIDRM;
+
+	/* data and/or status stage for control request */
+	} else if (dev->state == STATE_SETUP) {
+
+		/* IN DATA+STATUS caller makes len <= wLength */
+		if (dev->setup_in) {
+			retval = setup_req (dev->gadget->ep0, dev->req, len);
+			if (retval == 0) {
+				spin_unlock_irq (&dev->lock);
+				if (copy_from_user (dev->req->buf, buf, len))
+					retval = -EFAULT;
+				else
+					retval = usb_ep_queue (
+						dev->gadget->ep0, dev->req,
+						GFP_KERNEL);
+				if (retval < 0) {
+					spin_lock_irq (&dev->lock);
+					clean_req (dev->gadget->ep0, dev->req);
+					spin_unlock_irq (&dev->lock);
+				} else
+					retval = len;
+
+				return retval;
+			}
+
+		/* can stall some OUT transfers */
+		} else if (dev->setup_can_stall) {
+			VDEBUG(dev, "ep0out stall\n");
+			(void) usb_ep_set_halt (dev->gadget->ep0);
+			retval = -EL2HLT;
+			dev->state = STATE_CONNECTED;
+		} else {
+			DBG(dev, "bogus ep0out stall!\n");
+		}
+	} else
+		DBG (dev, "fail %s, state %d\n", __FUNCTION__, dev->state);
+
+	spin_unlock_irq (&dev->lock);
+	return retval;
+}
+
+static int
+ep0_fasync (int f, struct file *fd, int on)
+{
+	struct dev_data		*dev = fd->private_data;
+	// caller must F_SETOWN before signal delivery happens
+	VDEBUG (dev, "%s %s\n", __FUNCTION__, on ? "on" : "off");
+	return fasync_helper (f, fd, on, &dev->fasync);
+}
+
+static struct usb_gadget_driver gadgetfs_driver;
+
+static int
+dev_release (struct inode *inode, struct file *fd)
+{
+	struct dev_data		*dev = fd->private_data;
+
+	/* closing ep0 === shutdown all */
+
+	usb_gadget_unregister_driver (&gadgetfs_driver);
+
+	/* at this point "good" hardware has disconnected the
+	 * device from USB; the host won't see it any more.
+	 * alternatively, all host requests will time out.
+	 */
+
+	fasync_helper (-1, fd, 0, &dev->fasync);
+	kfree (dev->buf);
+	dev->buf = NULL;
+	put_dev (dev);
+
+	/* other endpoints were all decoupled from this device */
+	dev->state = STATE_DEV_DISABLED;
+	return 0;
+}
+
+static int dev_ioctl (struct inode *inode, struct file *fd,
+		unsigned code, unsigned long value)
+{
+	struct dev_data		*dev = fd->private_data;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	if (gadget->ops->ioctl)
+		return gadget->ops->ioctl (gadget, code, value);
+	return -ENOTTY;
+}
+
+/* used after device configuration */
+static struct file_operations ep0_io_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.read =		ep0_read,
+	.write =	ep0_write,
+	.fasync =	ep0_fasync,
+	// .poll =	ep0_poll,
+	.ioctl =	dev_ioctl,
+	.release =	dev_release,
+};
+
+/*----------------------------------------------------------------------*/
+
+/* The in-kernel gadget driver handles most ep0 issues, in particular
+ * enumerating the single configuration (as provided from user space).
+ *
+ * Unrecognized ep0 requests may be handled in user space.
+ */
+
+#ifdef	HIGHSPEED
+static void make_qualifier (struct dev_data *dev)
+{
+	struct usb_qualifier_descriptor		qual;
+	struct usb_device_descriptor		*desc;
+
+	qual.bLength = sizeof qual;
+	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
+	qual.bcdUSB = __constant_cpu_to_le16 (0x0200);
+
+	desc = dev->dev;
+	qual.bDeviceClass = desc->bDeviceClass;
+	qual.bDeviceSubClass = desc->bDeviceSubClass;
+	qual.bDeviceProtocol = desc->bDeviceProtocol;
+
+	/* assumes ep0 uses the same value for both speeds ... */
+	qual.bMaxPacketSize0 = desc->bMaxPacketSize0;
+
+	qual.bNumConfigurations = 1;
+	qual.bRESERVED = 0;
+
+	memcpy (dev->rbuf, &qual, sizeof qual);
+}
+#endif
+
+static int
+config_buf (struct dev_data *dev, u8 type, unsigned index)
+{
+	int		len;
+#ifdef HIGHSPEED
+	int		hs;
+#endif
+
+	/* only one configuration */
+	if (index > 0)
+		return -EINVAL;
+
+#ifdef HIGHSPEED
+	hs = (dev->gadget->speed == USB_SPEED_HIGH);
+	if (type == USB_DT_OTHER_SPEED_CONFIG)
+		hs = !hs;
+	if (hs) {
+		dev->req->buf = dev->hs_config;
+		len = le16_to_cpup (&dev->hs_config->wTotalLength);
+	} else
+#endif
+	{
+		dev->req->buf = dev->config;
+		len = le16_to_cpup (&dev->config->wTotalLength);
+	}
+	((u8 *)dev->req->buf) [1] = type;
+	return len;
+}
+
+static int
+gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct dev_data			*dev = get_gadget_data (gadget);
+	struct usb_request		*req = dev->req;
+	int				value = -EOPNOTSUPP;
+	struct usb_gadgetfs_event	*event;
+	u16				w_value = ctrl->wValue;
+	u16				w_length = ctrl->wLength;
+
+	spin_lock (&dev->lock);
+	dev->setup_abort = 0;
+	if (dev->state == STATE_UNCONNECTED) {
+		struct usb_ep	*ep;
+		struct ep_data	*data;
+
+		dev->state = STATE_CONNECTED;
+		dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
+
+#ifdef	HIGHSPEED
+		if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) {
+			ERROR (dev, "no high speed config??\n");
+			return -EINVAL;
+		}
+#endif	/* HIGHSPEED */
+
+		INFO (dev, "connected\n");
+		event = next_event (dev, GADGETFS_CONNECT);
+		event->u.speed = gadget->speed;
+		ep0_readable (dev);
+
+		list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+			data = ep->driver_data;
+			/* ... down_trylock (&data->lock) ... */
+			if (data->state != STATE_EP_DEFER_ENABLE)
+				continue;
+#ifdef	HIGHSPEED
+			if (gadget->speed == USB_SPEED_HIGH)
+				value = usb_ep_enable (ep, &data->hs_desc);
+			else
+#endif	/* HIGHSPEED */
+				value = usb_ep_enable (ep, &data->desc);
+			if (value) {
+				ERROR (dev, "deferred %s enable --> %d\n",
+					data->name, value);
+				continue;
+			}
+			data->state = STATE_EP_ENABLED;
+			wake_up (&data->wait);
+			DBG (dev, "woke up %s waiters\n", data->name);
+		}
+
+	/* host may have given up waiting for response.  we can miss control
+	 * requests handled lower down (device/endpoint status and features);
+	 * then ep0_{read,write} will report the wrong status. controller
+	 * driver will have aborted pending i/o.
+	 */
+	} else if (dev->state == STATE_SETUP)
+		dev->setup_abort = 1;
+
+	req->buf = dev->rbuf;
+	req->dma = DMA_ADDR_INVALID;
+	req->context = NULL;
+	value = -EOPNOTSUPP;
+	switch (ctrl->bRequest) {
+
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unrecognized;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			value = min (w_length, (u16) sizeof *dev->dev);
+			req->buf = dev->dev;
+			break;
+#ifdef	HIGHSPEED
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!dev->hs_config)
+				break;
+			value = min (w_length, (u16)
+				sizeof (struct usb_qualifier_descriptor));
+			make_qualifier (dev);
+			break;
+		case USB_DT_OTHER_SPEED_CONFIG:
+			// FALLTHROUGH
+#endif
+		case USB_DT_CONFIG:
+			value = config_buf (dev,
+					w_value >> 8,
+					w_value & 0xff);
+			if (value >= 0)
+				value = min (w_length, (u16) value);
+			break;
+		case USB_DT_STRING:
+			goto unrecognized;
+
+		default:		// all others are errors
+			break;
+		}
+		break;
+
+	/* currently one config, two speeds */
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != 0)
+			break;
+		if (0 == (u8) w_value) {
+			value = 0;
+			dev->current_config = 0;
+			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
+			// user mode expected to disable endpoints
+		} else {
+			u8	config, power;
+#ifdef	HIGHSPEED
+			if (gadget->speed == USB_SPEED_HIGH) {
+				config = dev->hs_config->bConfigurationValue;
+				power = dev->hs_config->bMaxPower;
+			} else
+#endif
+			{
+				config = dev->config->bConfigurationValue;
+				power = dev->config->bMaxPower;
+			}
+
+			if (config == (u8) w_value) {
+				value = 0;
+				dev->current_config = config;
+				usb_gadget_vbus_draw(gadget, 2 * power);
+			}
+		}
+
+		/* report SET_CONFIGURATION like any other control request,
+		 * except that usermode may not stall this.  the next
+		 * request mustn't be allowed start until this finishes:
+		 * endpoints and threads set up, etc.
+		 *
+		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
+		 * has bad/racey automagic that prevents synchronizing here.
+		 * even kernel mode drivers often miss them.
+		 */
+		if (value == 0) {
+			INFO (dev, "configuration #%d\n", dev->current_config);
+			if (dev->usermode_setup) {
+				dev->setup_can_stall = 0;
+				goto delegate;
+			}
+		}
+		break;
+
+#ifndef	CONFIG_USB_GADGETFS_PXA2XX
+	/* PXA automagically handles this request too */
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != 0x80)
+			break;
+		*(u8 *)req->buf = dev->current_config;
+		value = min (w_length, (u16) 1);
+		break;
+#endif
+
+	default:
+unrecognized:
+		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
+			dev->usermode_setup ? "delegate" : "fail",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, le16_to_cpu(ctrl->wIndex), w_length);
+
+		/* if there's an ep0 reader, don't stall */
+		if (dev->usermode_setup) {
+			dev->setup_can_stall = 1;
+delegate:
+			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
+						? 1 : 0;
+			dev->setup_out_ready = 0;
+			dev->setup_out_error = 0;
+			value = 0;
+
+			/* read DATA stage for OUT right away */
+			if (unlikely (!dev->setup_in && w_length)) {
+				value = setup_req (gadget->ep0, dev->req,
+							w_length);
+				if (value < 0)
+					break;
+				value = usb_ep_queue (gadget->ep0, dev->req,
+							GFP_ATOMIC);
+				if (value < 0) {
+					clean_req (gadget->ep0, dev->req);
+					break;
+				}
+
+				/* we can't currently stall these */
+				dev->setup_can_stall = 0;
+			}
+
+			/* state changes when reader collects event */
+			event = next_event (dev, GADGETFS_SETUP);
+			event->u.setup = *ctrl;
+			ep0_readable (dev);
+			spin_unlock (&dev->lock);
+			return 0;
+		}
+	}
+
+	/* proceed with data transfer and status phases? */
+	if (value >= 0 && dev->state != STATE_SETUP) {
+		req->length = value;
+		req->zero = value < w_length;
+		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG (dev, "ep_queue --> %d\n", value);
+			req->status = 0;
+		}
+	}
+
+	/* device stalls when value < 0 */
+	spin_unlock (&dev->lock);
+	return value;
+}
+
+static void destroy_ep_files (struct dev_data *dev)
+{
+	struct list_head	*entry, *tmp;
+
+	DBG (dev, "%s %d\n", __FUNCTION__, dev->state);
+
+	/* dev->state must prevent interference */
+restart:
+	spin_lock_irq (&dev->lock);
+	list_for_each_safe (entry, tmp, &dev->epfiles) {
+		struct ep_data	*ep;
+		struct inode	*parent;
+		struct dentry	*dentry;
+
+		/* break link to FS */
+		ep = list_entry (entry, struct ep_data, epfiles);
+		list_del_init (&ep->epfiles);
+		dentry = ep->dentry;
+		ep->dentry = NULL;
+		parent = dentry->d_parent->d_inode;
+
+		/* break link to controller */
+		if (ep->state == STATE_EP_ENABLED)
+			(void) usb_ep_disable (ep->ep);
+		ep->state = STATE_EP_UNBOUND;
+		usb_ep_free_request (ep->ep, ep->req);
+		ep->ep = NULL;
+		wake_up (&ep->wait);
+		put_ep (ep);
+
+		spin_unlock_irq (&dev->lock);
+
+		/* break link to dcache */
+		down (&parent->i_sem);
+		d_delete (dentry);
+		dput (dentry);
+		up (&parent->i_sem);
+
+		/* fds may still be open */
+		goto restart;
+	}
+	spin_unlock_irq (&dev->lock);
+}
+
+
+static struct inode *
+gadgetfs_create_file (struct super_block *sb, char const *name,
+		void *data, struct file_operations *fops,
+		struct dentry **dentry_p);
+
+static int activate_ep_files (struct dev_data *dev)
+{
+	struct usb_ep	*ep;
+
+	gadget_for_each_ep (ep, dev->gadget) {
+		struct ep_data	*data;
+
+		data = kmalloc (sizeof *data, GFP_KERNEL);
+		if (!data)
+			goto enomem;
+		memset (data, 0, sizeof data);
+		data->state = STATE_EP_DISABLED;
+		init_MUTEX (&data->lock);
+		init_waitqueue_head (&data->wait);
+
+		strncpy (data->name, ep->name, sizeof (data->name) - 1);
+		atomic_set (&data->count, 1);
+		data->dev = dev;
+		get_dev (dev);
+
+		data->ep = ep;
+		ep->driver_data = data;
+
+		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
+		if (!data->req)
+			goto enomem;
+
+		data->inode = gadgetfs_create_file (dev->sb, data->name,
+				data, &ep_config_operations,
+				&data->dentry);
+		if (!data->inode) {
+			kfree (data);
+			goto enomem;
+		}
+		list_add_tail (&data->epfiles, &dev->epfiles);
+	}
+	return 0;
+
+enomem:
+	DBG (dev, "%s enomem\n", __FUNCTION__);
+	destroy_ep_files (dev);
+	return -ENOMEM;
+}
+
+static void
+gadgetfs_unbind (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = get_gadget_data (gadget);
+
+	DBG (dev, "%s\n", __FUNCTION__);
+
+	spin_lock_irq (&dev->lock);
+	dev->state = STATE_DEV_UNBOUND;
+	spin_unlock_irq (&dev->lock);
+
+	destroy_ep_files (dev);
+	gadget->ep0->driver_data = NULL;
+	set_gadget_data (gadget, NULL);
+
+	/* we've already been disconnected ... no i/o is active */
+	if (dev->req)
+		usb_ep_free_request (gadget->ep0, dev->req);
+	DBG (dev, "%s done\n", __FUNCTION__);
+	put_dev (dev);
+}
+
+static struct dev_data		*the_device;
+
+static int
+gadgetfs_bind (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = the_device;
+
+	if (!dev)
+		return -ESRCH;
+	if (0 != strcmp (CHIP, gadget->name)) {
+		printk (KERN_ERR "%s expected %s controller not %s\n",
+			shortname, CHIP, gadget->name);
+		return -ENODEV;
+	}
+
+	set_gadget_data (gadget, dev);
+	dev->gadget = gadget;
+	gadget->ep0->driver_data = dev;
+	dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
+
+	/* preallocate control response and buffer */
+	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
+	if (!dev->req)
+		goto enomem;
+	dev->req->context = NULL;
+	dev->req->complete = epio_complete;
+
+	if (activate_ep_files (dev) < 0)
+		goto enomem;
+
+	INFO (dev, "bound to %s driver\n", gadget->name);
+	dev->state = STATE_UNCONNECTED;
+	get_dev (dev);
+	return 0;
+
+enomem:
+	gadgetfs_unbind (gadget);
+	return -ENOMEM;
+}
+
+static void
+gadgetfs_disconnect (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = get_gadget_data (gadget);
+
+	if (dev->state == STATE_UNCONNECTED) {
+		DBG (dev, "already unconnected\n");
+		return;
+	}
+	dev->state = STATE_UNCONNECTED;
+
+	INFO (dev, "disconnected\n");
+	spin_lock (&dev->lock);
+	next_event (dev, GADGETFS_DISCONNECT);
+	ep0_readable (dev);
+	spin_unlock (&dev->lock);
+}
+
+static void
+gadgetfs_suspend (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = get_gadget_data (gadget);
+
+	INFO (dev, "suspended from state %d\n", dev->state);
+	spin_lock (&dev->lock);
+	switch (dev->state) {
+	case STATE_SETUP:		// VERY odd... host died??
+	case STATE_CONNECTED:
+	case STATE_UNCONNECTED:
+		next_event (dev, GADGETFS_SUSPEND);
+		ep0_readable (dev);
+		/* FALLTHROUGH */
+	default:
+		break;
+	}
+	spin_unlock (&dev->lock);
+}
+
+static struct usb_gadget_driver gadgetfs_driver = {
+#ifdef	HIGHSPEED
+	.speed		= USB_SPEED_HIGH,
+#else
+	.speed		= USB_SPEED_FULL,
+#endif
+	.function	= (char *) driver_desc,
+	.bind		= gadgetfs_bind,
+	.unbind		= gadgetfs_unbind,
+	.setup		= gadgetfs_setup,
+	.disconnect	= gadgetfs_disconnect,
+	.suspend	= gadgetfs_suspend,
+
+	.driver 	= {
+		.name		= (char *) shortname,
+		// .shutdown = ...
+		// .suspend = ...
+		// .resume = ...
+	},
+};
+
+/*----------------------------------------------------------------------*/
+
+static void gadgetfs_nop(struct usb_gadget *arg) { }
+
+static int gadgetfs_probe (struct usb_gadget *gadget)
+{
+	CHIP = gadget->name;
+	return -EISNAM;
+}
+
+static struct usb_gadget_driver probe_driver = {
+	.speed		= USB_SPEED_HIGH,
+	.bind		= gadgetfs_probe,
+	.unbind		= gadgetfs_nop,
+	.setup		= (void *)gadgetfs_nop,
+	.disconnect	= gadgetfs_nop,
+	.driver 	= {
+		.name		= "nop",
+	},
+};
+
+
+/* DEVICE INITIALIZATION
+ *
+ *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
+ *     status = write (fd, descriptors, sizeof descriptors)
+ *
+ * That write establishes the device configuration, so the kernel can
+ * bind to the controller ... guaranteeing it can handle enumeration
+ * at all necessary speeds.  Descriptor order is:
+ *
+ * . message tag (u32, host order) ... for now, must be zero; it
+ *	would change to support features like multi-config devices
+ * . full/low speed config ... all wTotalLength bytes (with interface,
+ *	class, altsetting, endpoint, and other descriptors)
+ * . high speed config ... all descriptors, for high speed operation;
+ * 	this one's optional except for high-speed hardware
+ * . device descriptor
+ *
+ * Endpoints are not yet enabled. Drivers may want to immediately
+ * initialize them, using the /dev/gadget/ep* files that are available
+ * as soon as the kernel sees the configuration, or they can wait
+ * until device configuration and interface altsetting changes create
+ * the need to configure (or unconfigure) them.
+ *
+ * After initialization, the device stays active for as long as that
+ * $CHIP file is open.  Events may then be read from that descriptor,
+ * such configuration notifications.  More complex drivers will handle
+ * some control requests in user space.
+ */
+
+static int is_valid_config (struct usb_config_descriptor *config)
+{
+	return config->bDescriptorType == USB_DT_CONFIG
+		&& config->bLength == USB_DT_CONFIG_SIZE
+		&& config->bConfigurationValue != 0
+		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
+		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
+	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
+	/* FIXME check lengths: walk to end */
+}
+
+static ssize_t
+dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct dev_data		*dev = fd->private_data;
+	ssize_t			value = len, length = len;
+	unsigned		total;
+	u32			tag;
+	char			*kbuf;
+
+	if (dev->state != STATE_OPENED)
+		return -EEXIST;
+
+	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+		return -EINVAL;
+
+	/* we might need to change message format someday */
+	if (copy_from_user (&tag, buf, 4))
+		return -EFAULT;
+	if (tag != 0)
+		return -EINVAL;
+	buf += 4;
+	length -= 4;
+
+	kbuf = kmalloc (length, SLAB_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+	if (copy_from_user (kbuf, buf, length)) {
+		kfree (kbuf);
+		return -EFAULT;
+	}
+
+	spin_lock_irq (&dev->lock);
+	value = -EINVAL;
+	if (dev->buf)
+		goto fail;
+	dev->buf = kbuf;
+
+	/* full or low speed config */
+	dev->config = (void *) kbuf;
+	total = le16_to_cpup (&dev->config->wTotalLength);
+	if (!is_valid_config (dev->config) || total >= length)
+		goto fail;
+	kbuf += total;
+	length -= total;
+
+	/* optional high speed config */
+	if (kbuf [1] == USB_DT_CONFIG) {
+		dev->hs_config = (void *) kbuf;
+		total = le16_to_cpup (&dev->hs_config->wTotalLength);
+		if (!is_valid_config (dev->hs_config) || total >= length)
+			goto fail;
+		kbuf += total;
+		length -= total;
+	}
+
+	/* could support multiple configs, using another encoding! */
+
+	/* device descriptor (tweaked for paranoia) */
+	if (length != USB_DT_DEVICE_SIZE)
+		goto fail;
+	dev->dev = (void *)kbuf;
+	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
+			|| dev->dev->bDescriptorType != USB_DT_DEVICE
+			|| dev->dev->bNumConfigurations != 1)
+		goto fail;
+	dev->dev->bNumConfigurations = 1;
+	dev->dev->bcdUSB = __constant_cpu_to_le16 (0x0200);
+
+	/* triggers gadgetfs_bind(); then we can enumerate. */
+	spin_unlock_irq (&dev->lock);
+	value = usb_gadget_register_driver (&gadgetfs_driver);
+	if (value != 0) {
+		kfree (dev->buf);
+		dev->buf = NULL;
+	} else {
+		/* at this point "good" hardware has for the first time
+		 * let the USB the host see us.  alternatively, if users
+		 * unplug/replug that will clear all the error state.
+		 *
+		 * note:  everything running before here was guaranteed
+		 * to choke driver model style diagnostics.  from here
+		 * on, they can work ... except in cleanup paths that
+		 * kick in after the ep0 descriptor is closed.
+		 */
+		fd->f_op = &ep0_io_operations;
+		value = len;
+	}
+	return value;
+
+fail:
+	spin_unlock_irq (&dev->lock);
+	pr_debug ("%s: %s fail %Zd, %p\n", shortname, __FUNCTION__, value, dev);
+	kfree (dev->buf);
+	dev->buf = NULL;
+	return value;
+}
+
+static int
+dev_open (struct inode *inode, struct file *fd)
+{
+	struct dev_data		*dev = inode->u.generic_ip;
+	int			value = -EBUSY;
+
+	if (dev->state == STATE_DEV_DISABLED) {
+		dev->ev_next = 0;
+		dev->state = STATE_OPENED;
+		fd->private_data = dev;
+		get_dev (dev);
+		value = 0;
+	}
+	return value;
+}
+
+static struct file_operations dev_init_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		dev_open,
+	.write =	dev_config,
+	.fasync =	ep0_fasync,
+	.ioctl =	dev_ioctl,
+	.release =	dev_release,
+};
+
+/*----------------------------------------------------------------------*/
+
+/* FILESYSTEM AND SUPERBLOCK OPERATIONS
+ *
+ * Mounting the filesystem creates a controller file, used first for
+ * device configuration then later for event monitoring.
+ */
+
+
+/* FIXME PAM etc could set this security policy without mount options
+ * if epfiles inherited ownership and permissons from ep0 ...
+ */
+
+static unsigned default_uid;
+static unsigned default_gid;
+static unsigned default_perm = S_IRUSR | S_IWUSR;
+
+module_param (default_uid, uint, 0644);
+module_param (default_gid, uint, 0644);
+module_param (default_perm, uint, 0644);
+
+
+static struct inode *
+gadgetfs_make_inode (struct super_block *sb,
+		void *data, struct file_operations *fops,
+		int mode)
+{
+	struct inode *inode = new_inode (sb);
+
+	if (inode) {
+		inode->i_mode = mode;
+		inode->i_uid = default_uid;
+		inode->i_gid = default_gid;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_atime = inode->i_mtime = inode->i_ctime
+				= CURRENT_TIME;
+		inode->u.generic_ip = data;
+		inode->i_fop = fops;
+	}
+	return inode;
+}
+
+/* creates in fs root directory, so non-renamable and non-linkable.
+ * so inode and dentry are paired, until device reconfig.
+ */
+static struct inode *
+gadgetfs_create_file (struct super_block *sb, char const *name,
+		void *data, struct file_operations *fops,
+		struct dentry **dentry_p)
+{
+	struct dentry	*dentry;
+	struct inode	*inode;
+
+	dentry = d_alloc_name(sb->s_root, name);
+	if (!dentry)
+		return NULL;
+
+	inode = gadgetfs_make_inode (sb, data, fops,
+			S_IFREG | (default_perm & S_IRWXUGO));
+	if (!inode) {
+		dput(dentry);
+		return NULL;
+	}
+	d_add (dentry, inode);
+	*dentry_p = dentry;
+	return inode;
+}
+
+static struct super_operations gadget_fs_operations = {
+	.statfs =	simple_statfs,
+	.drop_inode =	generic_delete_inode,
+};
+
+static int
+gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
+{
+	struct inode	*inode;
+	struct dentry	*d;
+	struct dev_data	*dev;
+
+	if (the_device)
+		return -ESRCH;
+
+	/* fake probe to determine $CHIP */
+	(void) usb_gadget_register_driver (&probe_driver);
+	if (!CHIP)
+		return -ENODEV;
+
+	/* superblock */
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = GADGETFS_MAGIC;
+	sb->s_op = &gadget_fs_operations;
+	sb->s_time_gran = 1;
+
+	/* root inode */
+	inode = gadgetfs_make_inode (sb,
+			NULL, &simple_dir_operations,
+			S_IFDIR | S_IRUGO | S_IXUGO);
+	if (!inode)
+		return -ENOMEM;
+	inode->i_op = &simple_dir_inode_operations;
+	if (!(d = d_alloc_root (inode))) {
+		iput (inode);
+		return -ENOMEM;
+	}
+	sb->s_root = d;
+
+	/* the ep0 file is named after the controller we expect;
+	 * user mode code can use it for sanity checks, like we do.
+	 */
+	dev = dev_new ();
+	if (!dev)
+		return -ENOMEM;
+
+	dev->sb = sb;
+	if (!(inode = gadgetfs_create_file (sb, CHIP,
+				dev, &dev_init_operations,
+				&dev->dentry))) {
+		put_dev(dev);
+		return -ENOMEM;
+	}
+
+	/* other endpoint files are available after hardware setup,
+	 * from binding to a controller.
+	 */
+	the_device = dev;
+	return 0;
+}
+
+/* "mount -t gadgetfs path /dev/gadget" ends up here */
+static struct super_block *
+gadgetfs_get_sb (struct file_system_type *t, int flags,
+		const char *path, void *opts)
+{
+	return get_sb_single (t, flags, opts, gadgetfs_fill_super);
+}
+
+static void
+gadgetfs_kill_sb (struct super_block *sb)
+{
+	kill_litter_super (sb);
+	if (the_device) {
+		put_dev (the_device);
+		the_device = NULL;
+	}
+}
+
+/*----------------------------------------------------------------------*/
+
+static struct file_system_type gadgetfs_type = {
+	.owner		= THIS_MODULE,
+	.name		= shortname,
+	.get_sb		= gadgetfs_get_sb,
+	.kill_sb	= gadgetfs_kill_sb,
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __init init (void)
+{
+	int status;
+
+	status = register_filesystem (&gadgetfs_type);
+	if (status == 0)
+		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
+			shortname, driver_desc);
+	return status;
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	pr_debug ("unregister %s\n", shortname);
+	unregister_filesystem (&gadgetfs_type);
+}
+module_exit (cleanup);
+
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
new file mode 100644
index 0000000..0def9f7
--- /dev/null
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -0,0 +1,2167 @@
+/*
+ * linux/drivers/usb/gadget/lh7a40x_udc.c
+ * Sharp LH7A40x on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID
+ * Copyright (C) 2004 Bo Henriksen, Nordic ID
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include "lh7a40x_udc.h"
+
+//#define DEBUG printk
+//#define DEBUG_EP0 printk
+//#define DEBUG_SETUP printk
+
+#ifndef DEBUG_EP0
+# define DEBUG_EP0(fmt,args...)
+#endif
+#ifndef DEBUG_SETUP
+# define DEBUG_SETUP(fmt,args...)
+#endif
+#ifndef DEBUG
+# define NO_STATES
+# define DEBUG(fmt,args...)
+#endif
+
+#define	DRIVER_DESC			"LH7A40x USB Device Controller"
+#define	DRIVER_VERSION		__DATE__
+
+#ifndef _BIT			/* FIXME - what happended to _BIT in 2.6.7bk18? */
+#define _BIT(x) (1<<(x))
+#endif
+
+struct lh7a40x_udc *the_controller;
+
+static const char driver_name[] = "lh7a40x_udc";
+static const char driver_desc[] = DRIVER_DESC;
+static const char ep0name[] = "ep0-control";
+
+/*
+  Local definintions.
+*/
+
+#ifndef NO_STATES
+static char *state_names[] = {
+	"WAIT_FOR_SETUP",
+	"DATA_STATE_XMIT",
+	"DATA_STATE_NEED_ZLP",
+	"WAIT_FOR_OUT_STATUS",
+	"DATA_STATE_RECV"
+};
+#endif
+
+/*
+  Local declarations.
+*/
+static int lh7a40x_ep_enable(struct usb_ep *ep,
+			     const struct usb_endpoint_descriptor *);
+static int lh7a40x_ep_disable(struct usb_ep *ep);
+static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, int);
+static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
+static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *,
+				  int);
+static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t,
+				unsigned);
+static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, int);
+static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
+static int lh7a40x_set_halt(struct usb_ep *ep, int);
+static int lh7a40x_fifo_status(struct usb_ep *ep);
+static int lh7a40x_fifo_status(struct usb_ep *ep);
+static void lh7a40x_fifo_flush(struct usb_ep *ep);
+static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep);
+static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr);
+
+static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req,
+		 int status);
+static void pio_irq_enable(int bEndpointAddress);
+static void pio_irq_disable(int bEndpointAddress);
+static void stop_activity(struct lh7a40x_udc *dev,
+			  struct usb_gadget_driver *driver);
+static void flush(struct lh7a40x_ep *ep);
+static void udc_enable(struct lh7a40x_udc *dev);
+static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address);
+
+static struct usb_ep_ops lh7a40x_ep_ops = {
+	.enable = lh7a40x_ep_enable,
+	.disable = lh7a40x_ep_disable,
+
+	.alloc_request = lh7a40x_alloc_request,
+	.free_request = lh7a40x_free_request,
+
+	.alloc_buffer = lh7a40x_alloc_buffer,
+	.free_buffer = lh7a40x_free_buffer,
+
+	.queue = lh7a40x_queue,
+	.dequeue = lh7a40x_dequeue,
+
+	.set_halt = lh7a40x_set_halt,
+	.fifo_status = lh7a40x_fifo_status,
+	.fifo_flush = lh7a40x_fifo_flush,
+};
+
+/* Inline code */
+
+static __inline__ int write_packet(struct lh7a40x_ep *ep,
+				   struct lh7a40x_request *req, int max)
+{
+	u8 *buf;
+	int length, count;
+	volatile u32 *fifo = (volatile u32 *)ep->fifo;
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	length = req->req.length - req->req.actual;
+	length = min(length, max);
+	req->req.actual += length;
+
+	DEBUG("Write %d (max %d), fifo %p\n", length, max, fifo);
+
+	count = length;
+	while (count--) {
+		*fifo = *buf++;
+	}
+
+	return length;
+}
+
+static __inline__ void usb_set_index(u32 ep)
+{
+	*(volatile u32 *)io_p2v(USB_INDEX) = ep;
+}
+
+static __inline__ u32 usb_read(u32 port)
+{
+	return *(volatile u32 *)io_p2v(port);
+}
+
+static __inline__ void usb_write(u32 val, u32 port)
+{
+	*(volatile u32 *)io_p2v(port) = val;
+}
+
+static __inline__ void usb_set(u32 val, u32 port)
+{
+	volatile u32 *ioport = (volatile u32 *)io_p2v(port);
+	u32 after = (*ioport) | val;
+	*ioport = after;
+}
+
+static __inline__ void usb_clear(u32 val, u32 port)
+{
+	volatile u32 *ioport = (volatile u32 *)io_p2v(port);
+	u32 after = (*ioport) & ~val;
+	*ioport = after;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define GPIO_PORTC_DR 	(0x80000E08)
+#define GPIO_PORTC_DDR 	(0x80000E18)
+#define GPIO_PORTC_PDR 	(0x80000E70)
+
+/* get port C pin data register */
+#define get_portc_pdr(bit) 		((usb_read(GPIO_PORTC_PDR) & _BIT(bit)) != 0)
+/* get port C data direction register */
+#define get_portc_ddr(bit) 		((usb_read(GPIO_PORTC_DDR) & _BIT(bit)) != 0)
+/* set port C data register */
+#define set_portc_dr(bit, val) 	(val ? usb_set(_BIT(bit), GPIO_PORTC_DR) : usb_clear(_BIT(bit), GPIO_PORTC_DR))
+/* set port C data direction register */
+#define set_portc_ddr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DDR) : usb_clear(_BIT(bit), GPIO_PORTC_DDR))
+
+/*
+ * LPD7A404 GPIO's:
+ * Port C bit 1 = USB Port 1 Power Enable
+ * Port C bit 2 = USB Port 1 Data Carrier Detect
+ */
+#define is_usb_connected() 		get_portc_pdr(2)
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static const char proc_node_name[] = "driver/udc";
+
+static int
+udc_proc_read(char *page, char **start, off_t off, int count,
+	      int *eof, void *_dev)
+{
+	char *buf = page;
+	struct lh7a40x_udc *dev = _dev;
+	char *next = buf;
+	unsigned size = count;
+	unsigned long flags;
+	int t;
+
+	if (off != 0)
+		return 0;
+
+	local_irq_save(flags);
+
+	/* basic device status */
+	t = scnprintf(next, size,
+		      DRIVER_DESC "\n"
+		      "%s version: %s\n"
+		      "Gadget driver: %s\n"
+		      "Host: %s\n\n",
+		      driver_name, DRIVER_VERSION,
+		      dev->driver ? dev->driver->driver.name : "(none)",
+		      is_usb_connected()? "full speed" : "disconnected");
+	size -= t;
+	next += t;
+
+	t = scnprintf(next, size,
+		      "GPIO:\n"
+		      " Port C bit 1: %d, dir %d\n"
+		      " Port C bit 2: %d, dir %d\n\n",
+		      get_portc_pdr(1), get_portc_ddr(1),
+		      get_portc_pdr(2), get_portc_ddr(2)
+	    );
+	size -= t;
+	next += t;
+
+	t = scnprintf(next, size,
+		      "DCP pullup: %d\n\n",
+		      (usb_read(USB_PM) & PM_USB_DCP) != 0);
+	size -= t;
+	next += t;
+
+	local_irq_restore(flags);
+	*eof = 1;
+	return count - size;
+}
+
+#define create_proc_files() 	create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
+#define remove_proc_files() 	remove_proc_entry(proc_node_name, NULL)
+
+#else	/* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_proc_files() do {} while (0)
+#define remove_proc_files() do {} while (0)
+
+#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*
+ * 	udc_disable - disable USB device controller
+ */
+static void udc_disable(struct lh7a40x_udc *dev)
+{
+	DEBUG("%s, %p\n", __FUNCTION__, dev);
+
+	udc_set_address(dev, 0);
+
+	/* Disable interrupts */
+	usb_write(0, USB_IN_INT_EN);
+	usb_write(0, USB_OUT_INT_EN);
+	usb_write(0, USB_INT_EN);
+
+	/* Disable the USB */
+	usb_write(0, USB_PM);
+
+#ifdef CONFIG_ARCH_LH7A404
+	/* Disable USB power */
+	set_portc_dr(1, 0);
+#endif
+
+	/* if hardware supports it, disconnect from usb */
+	/* make_usb_disappear(); */
+
+	dev->ep0state = WAIT_FOR_SETUP;
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	dev->usb_address = 0;
+}
+
+/*
+ * 	udc_reinit - initialize software state
+ */
+static void udc_reinit(struct lh7a40x_udc *dev)
+{
+	u32 i;
+
+	DEBUG("%s, %p\n", __FUNCTION__, dev);
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD(&dev->gadget.ep_list);
+	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+	dev->ep0state = WAIT_FOR_SETUP;
+
+	/* basic endpoint records init */
+	for (i = 0; i < UDC_MAX_ENDPOINTS; i++) {
+		struct lh7a40x_ep *ep = &dev->ep[i];
+
+		if (i != 0)
+			list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+		ep->desc = 0;
+		ep->stopped = 0;
+		INIT_LIST_HEAD(&ep->queue);
+		ep->pio_irqs = 0;
+	}
+
+	/* the rest was statically initialized, and is read-only */
+}
+
+#define BYTES2MAXP(x)	(x / 8)
+#define MAXP2BYTES(x)	(x * 8)
+
+/* until it's enabled, this UDC should be completely invisible
+ * to any USB host.
+ */
+static void udc_enable(struct lh7a40x_udc *dev)
+{
+	int ep;
+
+	DEBUG("%s, %p\n", __FUNCTION__, dev);
+
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+#ifdef CONFIG_ARCH_LH7A404
+	/* Set Port C bit 1 & 2 as output */
+	set_portc_ddr(1, 1);
+	set_portc_ddr(2, 1);
+
+	/* Enable USB power */
+	set_portc_dr(1, 0);
+#endif
+
+	/*
+	 * C.f Chapter 18.1.3.1 Initializing the USB
+	 */
+
+	/* Disable the USB */
+	usb_clear(PM_USB_ENABLE, USB_PM);
+
+	/* Reset APB & I/O sides of the USB */
+	usb_set(USB_RESET_APB | USB_RESET_IO, USB_RESET);
+	mdelay(5);
+	usb_clear(USB_RESET_APB | USB_RESET_IO, USB_RESET);
+
+	/* Set MAXP values for each */
+	for (ep = 0; ep < UDC_MAX_ENDPOINTS; ep++) {
+		struct lh7a40x_ep *ep_reg = &dev->ep[ep];
+		u32 csr;
+
+		usb_set_index(ep);
+
+		switch (ep_reg->ep_type) {
+		case ep_bulk_in:
+		case ep_interrupt:
+			usb_clear(USB_IN_CSR2_USB_DMA_EN | USB_IN_CSR2_AUTO_SET,
+				  ep_reg->csr2);
+			/* Fall through */
+		case ep_control:
+			usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)),
+				  USB_IN_MAXP);
+			break;
+		case ep_bulk_out:
+			usb_clear(USB_OUT_CSR2_USB_DMA_EN |
+				  USB_OUT_CSR2_AUTO_CLR, ep_reg->csr2);
+			usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)),
+				  USB_OUT_MAXP);
+			break;
+		}
+
+		/* Read & Write CSR1, just in case */
+		csr = usb_read(ep_reg->csr1);
+		usb_write(csr, ep_reg->csr1);
+
+		flush(ep_reg);
+	}
+
+	/* Disable interrupts */
+	usb_write(0, USB_IN_INT_EN);
+	usb_write(0, USB_OUT_INT_EN);
+	usb_write(0, USB_INT_EN);
+
+	/* Enable interrupts */
+	usb_set(USB_IN_INT_EP0, USB_IN_INT_EN);
+	usb_set(USB_INT_RESET_INT | USB_INT_RESUME_INT, USB_INT_EN);
+	/* Dont enable rest of the interrupts */
+	/* usb_set(USB_IN_INT_EP3 | USB_IN_INT_EP1 | USB_IN_INT_EP0, USB_IN_INT_EN);
+	   usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN); */
+
+	/* Enable SUSPEND */
+	usb_set(PM_ENABLE_SUSPEND, USB_PM);
+
+	/* Enable the USB */
+	usb_set(PM_USB_ENABLE, USB_PM);
+
+#ifdef CONFIG_ARCH_LH7A404
+	/* NOTE: DOES NOT WORK! */
+	/* Let host detect UDC:
+	 * Software must write a 0 to the PMR:DCP_CTRL bit to turn this
+	 * transistor on and pull the USBDP pin HIGH.
+	 */
+	/* usb_clear(PM_USB_DCP, USB_PM);
+	   usb_set(PM_USB_DCP, USB_PM); */
+#endif
+}
+
+/*
+  Register entry point for the peripheral controller driver.
+*/
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct lh7a40x_udc *dev = the_controller;
+	int retval;
+
+	DEBUG("%s: %s\n", __FUNCTION__, driver->driver.name);
+
+	if (!driver
+	    || driver->speed != USB_SPEED_FULL
+	    || !driver->bind
+	    || !driver->unbind || !driver->disconnect || !driver->setup)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	device_add(&dev->gadget.dev);
+	retval = driver->bind(&dev->gadget);
+	if (retval) {
+		printk("%s: bind to driver %s --> error %d\n", dev->gadget.name,
+		       driver->driver.name, retval);
+		device_del(&dev->gadget.dev);
+
+		dev->driver = 0;
+		dev->gadget.dev.driver = 0;
+		return retval;
+	}
+
+	/* ... then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 * NOTE:  this shouldn't power up until later.
+	 */
+	printk("%s: registered gadget driver '%s'\n", dev->gadget.name,
+	       driver->driver.name);
+
+	udc_enable(dev);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/*
+  Unregister entry point for the peripheral controller driver.
+*/
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct lh7a40x_udc *dev = the_controller;
+	unsigned long flags;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->driver = 0;
+	stop_activity(dev, driver);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	driver->unbind(&dev->gadget);
+	device_del(&dev->gadget.dev);
+
+	udc_disable(dev);
+
+	DEBUG("unregistered gadget driver '%s'\n", driver->driver.name);
+	return 0;
+}
+
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/*-------------------------------------------------------------------------*/
+
+/** Write request to FIFO (max write == maxp size)
+ *  Return:  0 = still running, 1 = completed, negative = errno
+ *  NOTE: INDEX register must be set for EP
+ */
+static int write_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
+{
+	u32 max;
+	u32 csr;
+
+	max = le16_to_cpu(ep->desc->wMaxPacketSize);
+
+	csr = usb_read(ep->csr1);
+	DEBUG("CSR: %x %d\n", csr, csr & USB_IN_CSR1_FIFO_NOT_EMPTY);
+
+	if (!(csr & USB_IN_CSR1_FIFO_NOT_EMPTY)) {
+		unsigned count;
+		int is_last, is_short;
+
+		count = write_packet(ep, req, max);
+		usb_set(USB_IN_CSR1_IN_PKT_RDY, ep->csr1);
+
+		/* last packet is usually short (or a zlp) */
+		if (unlikely(count != max))
+			is_last = is_short = 1;
+		else {
+			if (likely(req->req.length != req->req.actual)
+			    || req->req.zero)
+				is_last = 0;
+			else
+				is_last = 1;
+			/* interrupt/iso maxpacket may not fill the fifo */
+			is_short = unlikely(max < ep_maxpacket(ep));
+		}
+
+		DEBUG("%s: wrote %s %d bytes%s%s %d left %p\n", __FUNCTION__,
+		      ep->ep.name, count,
+		      is_last ? "/L" : "", is_short ? "/S" : "",
+		      req->req.length - req->req.actual, req);
+
+		/* requests complete when all IN data is in the FIFO */
+		if (is_last) {
+			done(ep, req, 0);
+			if (list_empty(&ep->queue)) {
+				pio_irq_disable(ep_index(ep));
+			}
+			return 1;
+		}
+	} else {
+		DEBUG("Hmm.. %d ep FIFO is not empty!\n", ep_index(ep));
+	}
+
+	return 0;
+}
+
+/** Read to request from FIFO (max read == bytes in fifo)
+ *  Return:  0 = still running, 1 = completed, negative = errno
+ *  NOTE: INDEX register must be set for EP
+ */
+static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
+{
+	u32 csr;
+	u8 *buf;
+	unsigned bufferspace, count, is_short;
+	volatile u32 *fifo = (volatile u32 *)ep->fifo;
+
+	/* make sure there's a packet in the FIFO. */
+	csr = usb_read(ep->csr1);
+	if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) {
+		DEBUG("%s: Packet NOT ready!\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+	bufferspace = req->req.length - req->req.actual;
+
+	/* read all bytes from this packet */
+	count = usb_read(USB_OUT_FIFO_WC1);
+	req->req.actual += min(count, bufferspace);
+
+	is_short = (count < ep->ep.maxpacket);
+	DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n",
+	      ep->ep.name, csr, count,
+	      is_short ? "/S" : "", req, req->req.actual, req->req.length);
+
+	while (likely(count-- != 0)) {
+		u8 byte = (u8) (*fifo & 0xff);
+
+		if (unlikely(bufferspace == 0)) {
+			/* this happens when the driver's buffer
+			 * is smaller than what the host sent.
+			 * discard the extra data.
+			 */
+			if (req->req.status != -EOVERFLOW)
+				printk("%s overflow %d\n", ep->ep.name, count);
+			req->req.status = -EOVERFLOW;
+		} else {
+			*buf++ = byte;
+			bufferspace--;
+		}
+	}
+
+	usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1);
+
+	/* completion */
+	if (is_short || req->req.actual == req->req.length) {
+		done(ep, req, 0);
+		usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);
+
+		if (list_empty(&ep->queue))
+			pio_irq_disable(ep_index(ep));
+		return 1;
+	}
+
+	/* finished that packet.  the next one may be waiting... */
+	return 0;
+}
+
+/*
+ *	done - retire a request; caller blocked irqs
+ *  INDEX register is preserved to keep same
+ */
+static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req, int status)
+{
+	unsigned int stopped = ep->stopped;
+	u32 index;
+
+	DEBUG("%s, %p\n", __FUNCTION__, ep);
+	list_del_init(&req->queue);
+
+	if (likely(req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (status && status != -ESHUTDOWN)
+		DEBUG("complete %s req %p stat %d len %u/%u\n",
+		      ep->ep.name, &req->req, status,
+		      req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	/* Read current index (completion may modify it) */
+	index = usb_read(USB_INDEX);
+
+	spin_unlock(&ep->dev->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&ep->dev->lock);
+
+	/* Restore index */
+	usb_set_index(index);
+	ep->stopped = stopped;
+}
+
+/** Enable EP interrupt */
+static void pio_irq_enable(int ep)
+{
+	DEBUG("%s: %d\n", __FUNCTION__, ep);
+
+	switch (ep) {
+	case 1:
+		usb_set(USB_IN_INT_EP1, USB_IN_INT_EN);
+		break;
+	case 2:
+		usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN);
+		break;
+	case 3:
+		usb_set(USB_IN_INT_EP3, USB_IN_INT_EN);
+		break;
+	default:
+		DEBUG("Unknown endpoint: %d\n", ep);
+		break;
+	}
+}
+
+/** Disable EP interrupt */
+static void pio_irq_disable(int ep)
+{
+	DEBUG("%s: %d\n", __FUNCTION__, ep);
+
+	switch (ep) {
+	case 1:
+		usb_clear(USB_IN_INT_EP1, USB_IN_INT_EN);
+		break;
+	case 2:
+		usb_clear(USB_OUT_INT_EP2, USB_OUT_INT_EN);
+		break;
+	case 3:
+		usb_clear(USB_IN_INT_EP3, USB_IN_INT_EN);
+		break;
+	default:
+		DEBUG("Unknown endpoint: %d\n", ep);
+		break;
+	}
+}
+
+/*
+ * 	nuke - dequeue ALL requests
+ */
+void nuke(struct lh7a40x_ep *ep, int status)
+{
+	struct lh7a40x_request *req;
+
+	DEBUG("%s, %p\n", __FUNCTION__, ep);
+
+	/* Flush FIFO */
+	flush(ep);
+
+	/* called with irqs blocked */
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
+		done(ep, req, status);
+	}
+
+	/* Disable IRQ if EP is enabled (has decriptor) */
+	if (ep->desc)
+		pio_irq_disable(ep_index(ep));
+}
+
+/*
+void nuke_all(struct lh7a40x_udc *dev)
+{
+	int n;
+	for(n=0; n<UDC_MAX_ENDPOINTS; n++) {
+		struct lh7a40x_ep *ep = &dev->ep[n];
+		usb_set_index(n);
+		nuke(ep, 0);
+	}
+}*/
+
+/*
+static void flush_all(struct lh7a40x_udc *dev)
+{
+	int n;
+    for (n = 0; n < UDC_MAX_ENDPOINTS; n++)
+    {
+		struct lh7a40x_ep *ep = &dev->ep[n];
+		flush(ep);
+    }
+}
+*/
+
+/** Flush EP
+ * NOTE: INDEX register must be set before this call
+ */
+static void flush(struct lh7a40x_ep *ep)
+{
+	DEBUG("%s, %p\n", __FUNCTION__, ep);
+
+	switch (ep->ep_type) {
+	case ep_control:
+		/* check, by implication c.f. 15.1.2.11 */
+		break;
+
+	case ep_bulk_in:
+	case ep_interrupt:
+		/* if(csr & USB_IN_CSR1_IN_PKT_RDY) */
+		usb_set(USB_IN_CSR1_FIFO_FLUSH, ep->csr1);
+		break;
+
+	case ep_bulk_out:
+		/* if(csr & USB_OUT_CSR1_OUT_PKT_RDY) */
+		usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);
+		break;
+	}
+}
+
+/**
+ * lh7a40x_in_epn - handle IN interrupt
+ */
+static void lh7a40x_in_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr)
+{
+	u32 csr;
+	struct lh7a40x_ep *ep = &dev->ep[ep_idx];
+	struct lh7a40x_request *req;
+
+	usb_set_index(ep_idx);
+
+	csr = usb_read(ep->csr1);
+	DEBUG("%s: %d, csr %x\n", __FUNCTION__, ep_idx, csr);
+
+	if (csr & USB_IN_CSR1_SENT_STALL) {
+		DEBUG("USB_IN_CSR1_SENT_STALL\n");
+		usb_set(USB_IN_CSR1_SENT_STALL /*|USB_IN_CSR1_SEND_STALL */ ,
+			ep->csr1);
+		return;
+	}
+
+	if (!ep->desc) {
+		DEBUG("%s: NO EP DESC\n", __FUNCTION__);
+		return;
+	}
+
+	if (list_empty(&ep->queue))
+		req = 0;
+	else
+		req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
+
+	DEBUG("req: %p\n", req);
+
+	if (!req)
+		return;
+
+	write_fifo(ep, req);
+}
+
+/* ********************************************************************************************* */
+/* Bulk OUT (recv)
+ */
+
+static void lh7a40x_out_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr)
+{
+	struct lh7a40x_ep *ep = &dev->ep[ep_idx];
+	struct lh7a40x_request *req;
+
+	DEBUG("%s: %d\n", __FUNCTION__, ep_idx);
+
+	usb_set_index(ep_idx);
+
+	if (ep->desc) {
+		u32 csr;
+		csr = usb_read(ep->csr1);
+
+		while ((csr =
+			usb_read(ep->
+				 csr1)) & (USB_OUT_CSR1_OUT_PKT_RDY |
+					   USB_OUT_CSR1_SENT_STALL)) {
+			DEBUG("%s: %x\n", __FUNCTION__, csr);
+
+			if (csr & USB_OUT_CSR1_SENT_STALL) {
+				DEBUG("%s: stall sent, flush fifo\n",
+				      __FUNCTION__);
+				/* usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); */
+				flush(ep);
+			} else if (csr & USB_OUT_CSR1_OUT_PKT_RDY) {
+				if (list_empty(&ep->queue))
+					req = 0;
+				else
+					req =
+					    list_entry(ep->queue.next,
+						       struct lh7a40x_request,
+						       queue);
+
+				if (!req) {
+					printk("%s: NULL REQ %d\n",
+					       __FUNCTION__, ep_idx);
+					flush(ep);
+					break;
+				} else {
+					read_fifo(ep, req);
+				}
+			}
+
+		}
+
+	} else {
+		/* Throw packet away.. */
+		printk("%s: No descriptor?!?\n", __FUNCTION__);
+		flush(ep);
+	}
+}
+
+static void stop_activity(struct lh7a40x_udc *dev,
+			  struct usb_gadget_driver *driver)
+{
+	int i;
+
+	/* don't disconnect drivers more than once */
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = 0;
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 0; i < UDC_MAX_ENDPOINTS; i++) {
+		struct lh7a40x_ep *ep = &dev->ep[i];
+		ep->stopped = 1;
+
+		usb_set_index(i);
+		nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock(&dev->lock);
+		driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+
+	/* re-init driver-visible data structures */
+	udc_reinit(dev);
+}
+
+/** Handle USB RESET interrupt
+ */
+static void lh7a40x_reset_intr(struct lh7a40x_udc *dev)
+{
+#if 0				/* def CONFIG_ARCH_LH7A404 */
+	/* Does not work always... */
+
+	DEBUG("%s: %d\n", __FUNCTION__, dev->usb_address);
+
+	if (!dev->usb_address) {
+		/*usb_set(USB_RESET_IO, USB_RESET);
+		   mdelay(5);
+		   usb_clear(USB_RESET_IO, USB_RESET); */
+		return;
+	}
+	/* Put the USB controller into reset. */
+	usb_set(USB_RESET_IO, USB_RESET);
+
+	/* Set Device ID to 0 */
+	udc_set_address(dev, 0);
+
+	/* Let PLL2 settle down */
+	mdelay(5);
+
+	/* Release the USB controller from reset */
+	usb_clear(USB_RESET_IO, USB_RESET);
+
+	/* Re-enable UDC */
+	udc_enable(dev);
+
+#endif
+	dev->gadget.speed = USB_SPEED_FULL;
+}
+
+/*
+ *	lh7a40x usb client interrupt handler.
+ */
+static irqreturn_t lh7a40x_udc_irq(int irq, void *_dev, struct pt_regs *r)
+{
+	struct lh7a40x_udc *dev = _dev;
+
+	DEBUG("\n\n");
+
+	spin_lock(&dev->lock);
+
+	for (;;) {
+		u32 intr_in = usb_read(USB_IN_INT);
+		u32 intr_out = usb_read(USB_OUT_INT);
+		u32 intr_int = usb_read(USB_INT);
+
+		/* Test also against enable bits.. (lh7a40x errata).. Sigh.. */
+		u32 in_en = usb_read(USB_IN_INT_EN);
+		u32 out_en = usb_read(USB_OUT_INT_EN);
+
+		if (!intr_out && !intr_in && !intr_int)
+			break;
+
+		DEBUG("%s (on state %s)\n", __FUNCTION__,
+		      state_names[dev->ep0state]);
+		DEBUG("intr_out = %x\n", intr_out);
+		DEBUG("intr_in  = %x\n", intr_in);
+		DEBUG("intr_int = %x\n", intr_int);
+
+		if (intr_in) {
+			usb_write(intr_in, USB_IN_INT);
+
+			if ((intr_in & USB_IN_INT_EP1)
+			    && (in_en & USB_IN_INT_EP1)) {
+				DEBUG("USB_IN_INT_EP1\n");
+				lh7a40x_in_epn(dev, 1, intr_in);
+			}
+			if ((intr_in & USB_IN_INT_EP3)
+			    && (in_en & USB_IN_INT_EP3)) {
+				DEBUG("USB_IN_INT_EP3\n");
+				lh7a40x_in_epn(dev, 3, intr_in);
+			}
+			if (intr_in & USB_IN_INT_EP0) {
+				DEBUG("USB_IN_INT_EP0 (control)\n");
+				lh7a40x_handle_ep0(dev, intr_in);
+			}
+		}
+
+		if (intr_out) {
+			usb_write(intr_out, USB_OUT_INT);
+
+			if ((intr_out & USB_OUT_INT_EP2)
+			    && (out_en & USB_OUT_INT_EP2)) {
+				DEBUG("USB_OUT_INT_EP2\n");
+				lh7a40x_out_epn(dev, 2, intr_out);
+			}
+		}
+
+		if (intr_int) {
+			usb_write(intr_int, USB_INT);
+
+			if (intr_int & USB_INT_RESET_INT) {
+				lh7a40x_reset_intr(dev);
+			}
+
+			if (intr_int & USB_INT_RESUME_INT) {
+				DEBUG("USB resume\n");
+
+				if (dev->gadget.speed != USB_SPEED_UNKNOWN
+				    && dev->driver
+				    && dev->driver->resume
+				    && is_usb_connected()) {
+					dev->driver->resume(&dev->gadget);
+				}
+			}
+
+			if (intr_int & USB_INT_SUSPEND_INT) {
+				DEBUG("USB suspend%s\n",
+				      is_usb_connected()? "" : "+disconnect");
+				if (!is_usb_connected()) {
+					stop_activity(dev, dev->driver);
+				} else if (dev->gadget.speed !=
+					   USB_SPEED_UNKNOWN && dev->driver
+					   && dev->driver->suspend) {
+					dev->driver->suspend(&dev->gadget);
+				}
+			}
+
+		}
+	}
+
+	spin_unlock(&dev->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int lh7a40x_ep_enable(struct usb_ep *_ep,
+			     const struct usb_endpoint_descriptor *desc)
+{
+	struct lh7a40x_ep *ep;
+	struct lh7a40x_udc *dev;
+	unsigned long flags;
+
+	DEBUG("%s, %p\n", __FUNCTION__, _ep);
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+	    || desc->bDescriptorType != USB_DT_ENDPOINT
+	    || ep->bEndpointAddress != desc->bEndpointAddress
+	    || ep_maxpacket(ep) < le16_to_cpu(desc->wMaxPacketSize)) {
+		DEBUG("%s, bad ep or descriptor\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* xfer types must match, except that interrupt ~= bulk */
+	if (ep->bmAttributes != desc->bmAttributes
+	    && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+	    && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+		DEBUG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
+		return -EINVAL;
+	}
+
+	/* hardware _could_ do smaller, but driver doesn't */
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+	     && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(ep))
+	    || !desc->wMaxPacketSize) {
+		DEBUG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
+		return -ERANGE;
+	}
+
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
+		DEBUG("%s, bogus device state\n", __FUNCTION__);
+		return -ESHUTDOWN;
+	}
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+
+	ep->stopped = 0;
+	ep->desc = desc;
+	ep->pio_irqs = 0;
+	ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+
+	/* Reset halt state (does flush) */
+	lh7a40x_set_halt(_ep, 0);
+
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+	DEBUG("%s: enabled %s\n", __FUNCTION__, _ep->name);
+	return 0;
+}
+
+/** Disable EP
+ *  NOTE: Sets INDEX register
+ */
+static int lh7a40x_ep_disable(struct usb_ep *_ep)
+{
+	struct lh7a40x_ep *ep;
+	unsigned long flags;
+
+	DEBUG("%s, %p\n", __FUNCTION__, _ep);
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (!_ep || !ep->desc) {
+		DEBUG("%s, %s not enabled\n", __FUNCTION__,
+		      _ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+
+	usb_set_index(ep_index(ep));
+
+	/* Nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	/* Disable ep IRQ */
+	pio_irq_disable(ep_index(ep));
+
+	ep->desc = 0;
+	ep->stopped = 1;
+
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+	DEBUG("%s: disabled %s\n", __FUNCTION__, _ep->name);
+	return 0;
+}
+
+static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep,
+						 int gfp_flags)
+{
+	struct lh7a40x_request *req;
+
+	DEBUG("%s, %p\n", __FUNCTION__, ep);
+
+	req = kmalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return 0;
+
+	memset(req, 0, sizeof *req);
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct lh7a40x_request *req;
+
+	DEBUG("%s, %p\n", __FUNCTION__, ep);
+
+	req = container_of(_req, struct lh7a40x_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes,
+				  dma_addr_t * dma, int gfp_flags)
+{
+	char *retval;
+
+	DEBUG("%s (%p, %d, %d)\n", __FUNCTION__, ep, bytes, gfp_flags);
+
+	retval = kmalloc(bytes, gfp_flags & ~(__GFP_DMA | __GFP_HIGHMEM));
+	if (retval)
+		*dma = virt_to_bus(retval);
+	return retval;
+}
+
+static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma,
+				unsigned bytes)
+{
+	DEBUG("%s, %p\n", __FUNCTION__, ep);
+	kfree(buf);
+}
+
+/** Queue one request
+ *  Kickstart transfer if needed
+ *  NOTE: Sets INDEX register
+ */
+static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req,
+			 int gfp_flags)
+{
+	struct lh7a40x_request *req;
+	struct lh7a40x_ep *ep;
+	struct lh7a40x_udc *dev;
+	unsigned long flags;
+
+	DEBUG("\n\n\n%s, %p\n", __FUNCTION__, _ep);
+
+	req = container_of(_req, struct lh7a40x_request, req);
+	if (unlikely
+	    (!_req || !_req->complete || !_req->buf
+	     || !list_empty(&req->queue))) {
+		DEBUG("%s, bad params\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		DEBUG("%s, bad ep\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	dev = ep->dev;
+	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+		DEBUG("%s, bogus device state %p\n", __FUNCTION__, dev->driver);
+		return -ESHUTDOWN;
+	}
+
+	DEBUG("%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length,
+	      _req->buf);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* kickstart this i/o queue? */
+	DEBUG("Add to %d Q %d %d\n", ep_index(ep), list_empty(&ep->queue),
+	      ep->stopped);
+	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
+		u32 csr;
+
+		if (unlikely(ep_index(ep) == 0)) {
+			/* EP0 */
+			list_add_tail(&req->queue, &ep->queue);
+			lh7a40x_ep0_kick(dev, ep);
+			req = 0;
+		} else if (ep_is_in(ep)) {
+			/* EP1 & EP3 */
+			usb_set_index(ep_index(ep));
+			csr = usb_read(ep->csr1);
+			pio_irq_enable(ep_index(ep));
+			if ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY) == 0) {
+				if (write_fifo(ep, req) == 1)
+					req = 0;
+			}
+		} else {
+			/* EP2 */
+			usb_set_index(ep_index(ep));
+			csr = usb_read(ep->csr1);
+			pio_irq_enable(ep_index(ep));
+			if (!(csr & USB_OUT_CSR1_FIFO_FULL)) {
+				if (read_fifo(ep, req) == 1)
+					req = 0;
+			}
+		}
+	}
+
+	/* pio or dma irq handler advances the queue. */
+	if (likely(req != 0))
+		list_add_tail(&req->queue, &ep->queue);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/* dequeue JUST ONE request */
+static int lh7a40x_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct lh7a40x_ep *ep;
+	struct lh7a40x_request *req;
+	unsigned long flags;
+
+	DEBUG("%s, %p\n", __FUNCTION__, _ep);
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (!_ep || ep->ep.name == ep0name)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&ep->dev->lock, flags);
+		return -EINVAL;
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return 0;
+}
+
+/** Halt specific EP
+ *  Return 0 if success
+ *  NOTE: Sets INDEX register to EP !
+ */
+static int lh7a40x_set_halt(struct usb_ep *_ep, int value)
+{
+	struct lh7a40x_ep *ep;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		DEBUG("%s, bad ep\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	usb_set_index(ep_index(ep));
+
+	DEBUG("%s, ep %d, val %d\n", __FUNCTION__, ep_index(ep), value);
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+
+	if (ep_index(ep) == 0) {
+		/* EP0 */
+		usb_set(EP0_SEND_STALL, ep->csr1);
+	} else if (ep_is_in(ep)) {
+		u32 csr = usb_read(ep->csr1);
+		if (value && ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY)
+			      || !list_empty(&ep->queue))) {
+			/*
+			 * Attempts to halt IN endpoints will fail (returning -EAGAIN)
+			 * if any transfer requests are still queued, or if the controller
+			 * FIFO still holds bytes that the host hasnÂ’t collected.
+			 */
+			spin_unlock_irqrestore(&ep->dev->lock, flags);
+			DEBUG
+			    ("Attempt to halt IN endpoint failed (returning -EAGAIN) %d %d\n",
+			     (csr & USB_IN_CSR1_FIFO_NOT_EMPTY),
+			     !list_empty(&ep->queue));
+			return -EAGAIN;
+		}
+		flush(ep);
+		if (value)
+			usb_set(USB_IN_CSR1_SEND_STALL, ep->csr1);
+		else {
+			usb_clear(USB_IN_CSR1_SEND_STALL, ep->csr1);
+			usb_set(USB_IN_CSR1_CLR_DATA_TOGGLE, ep->csr1);
+		}
+
+	} else {
+
+		flush(ep);
+		if (value)
+			usb_set(USB_OUT_CSR1_SEND_STALL, ep->csr1);
+		else {
+			usb_clear(USB_OUT_CSR1_SEND_STALL, ep->csr1);
+			usb_set(USB_OUT_CSR1_CLR_DATA_REG, ep->csr1);
+		}
+	}
+
+	if (value) {
+		ep->stopped = 1;
+	} else {
+		ep->stopped = 0;
+	}
+
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+	DEBUG("%s %s halted\n", _ep->name, value == 0 ? "NOT" : "IS");
+
+	return 0;
+}
+
+/** Return bytes in EP FIFO
+ *  NOTE: Sets INDEX register to EP
+ */
+static int lh7a40x_fifo_status(struct usb_ep *_ep)
+{
+	u32 csr;
+	int count = 0;
+	struct lh7a40x_ep *ep;
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (!_ep) {
+		DEBUG("%s, bad ep\n", __FUNCTION__);
+		return -ENODEV;
+	}
+
+	DEBUG("%s, %d\n", __FUNCTION__, ep_index(ep));
+
+	/* LPD can't report unclaimed bytes from IN fifos */
+	if (ep_is_in(ep))
+		return -EOPNOTSUPP;
+
+	usb_set_index(ep_index(ep));
+
+	csr = usb_read(ep->csr1);
+	if (ep->dev->gadget.speed != USB_SPEED_UNKNOWN ||
+	    csr & USB_OUT_CSR1_OUT_PKT_RDY) {
+		count = usb_read(USB_OUT_FIFO_WC1);
+	}
+
+	return count;
+}
+
+/** Flush EP FIFO
+ *  NOTE: Sets INDEX register to EP
+ */
+static void lh7a40x_fifo_flush(struct usb_ep *_ep)
+{
+	struct lh7a40x_ep *ep;
+
+	ep = container_of(_ep, struct lh7a40x_ep, ep);
+	if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		DEBUG("%s, bad ep\n", __FUNCTION__);
+		return;
+	}
+
+	usb_set_index(ep_index(ep));
+	flush(ep);
+}
+
+/****************************************************************/
+/* End Point 0 related functions                                */
+/****************************************************************/
+
+/* return:  0 = still running, 1 = completed, negative = errno */
+static int write_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
+{
+	u32 max;
+	unsigned count;
+	int is_last;
+
+	max = ep_maxpacket(ep);
+
+	DEBUG_EP0("%s\n", __FUNCTION__);
+
+	count = write_packet(ep, req, max);
+
+	/* last packet is usually short (or a zlp) */
+	if (unlikely(count != max))
+		is_last = 1;
+	else {
+		if (likely(req->req.length != req->req.actual) || req->req.zero)
+			is_last = 0;
+		else
+			is_last = 1;
+	}
+
+	DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __FUNCTION__,
+		  ep->ep.name, count,
+		  is_last ? "/L" : "", req->req.length - req->req.actual, req);
+
+	/* requests complete when all IN data is in the FIFO */
+	if (is_last) {
+		done(ep, req, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+static __inline__ int lh7a40x_fifo_read(struct lh7a40x_ep *ep,
+					unsigned char *cp, int max)
+{
+	int bytes;
+	int count = usb_read(USB_OUT_FIFO_WC1);
+	volatile u32 *fifo = (volatile u32 *)ep->fifo;
+
+	if (count > max)
+		count = max;
+	bytes = count;
+	while (count--)
+		*cp++ = *fifo & 0xFF;
+	return bytes;
+}
+
+static __inline__ void lh7a40x_fifo_write(struct lh7a40x_ep *ep,
+					  unsigned char *cp, int count)
+{
+	volatile u32 *fifo = (volatile u32 *)ep->fifo;
+	DEBUG_EP0("fifo_write: %d %d\n", ep_index(ep), count);
+	while (count--)
+		*fifo = *cp++;
+}
+
+static int read_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
+{
+	u32 csr;
+	u8 *buf;
+	unsigned bufferspace, count, is_short;
+	volatile u32 *fifo = (volatile u32 *)ep->fifo;
+
+	DEBUG_EP0("%s\n", __FUNCTION__);
+
+	csr = usb_read(USB_EP0_CSR);
+	if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY))
+		return 0;
+
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+	bufferspace = req->req.length - req->req.actual;
+
+	/* read all bytes from this packet */
+	if (likely(csr & EP0_OUT_PKT_RDY)) {
+		count = usb_read(USB_OUT_FIFO_WC1);
+		req->req.actual += min(count, bufferspace);
+	} else			/* zlp */
+		count = 0;
+
+	is_short = (count < ep->ep.maxpacket);
+	DEBUG_EP0("read %s %02x, %d bytes%s req %p %d/%d\n",
+		  ep->ep.name, csr, count,
+		  is_short ? "/S" : "", req, req->req.actual, req->req.length);
+
+	while (likely(count-- != 0)) {
+		u8 byte = (u8) (*fifo & 0xff);
+
+		if (unlikely(bufferspace == 0)) {
+			/* this happens when the driver's buffer
+			 * is smaller than what the host sent.
+			 * discard the extra data.
+			 */
+			if (req->req.status != -EOVERFLOW)
+				DEBUG_EP0("%s overflow %d\n", ep->ep.name,
+					  count);
+			req->req.status = -EOVERFLOW;
+		} else {
+			*buf++ = byte;
+			bufferspace--;
+		}
+	}
+
+	/* completion */
+	if (is_short || req->req.actual == req->req.length) {
+		done(ep, req, 0);
+		return 1;
+	}
+
+	/* finished that packet.  the next one may be waiting... */
+	return 0;
+}
+
+/**
+ * udc_set_address - set the USB address for this device
+ * @address:
+ *
+ * Called from control endpoint function after it decodes a set address setup packet.
+ */
+static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address)
+{
+	DEBUG_EP0("%s: %d\n", __FUNCTION__, address);
+	/* c.f. 15.1.2.2 Table 15-4 address will be used after DATA_END is set */
+	dev->usb_address = address;
+	usb_set((address & USB_FA_FUNCTION_ADDR), USB_FA);
+	usb_set(USB_FA_ADDR_UPDATE | (address & USB_FA_FUNCTION_ADDR), USB_FA);
+	/* usb_read(USB_FA); */
+}
+
+/*
+ * DATA_STATE_RECV (OUT_PKT_RDY)
+ *      - if error
+ *              set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits
+ *      - else
+ *              set EP0_CLR_OUT bit
+ 				if last set EP0_DATA_END bit
+ */
+static void lh7a40x_ep0_out(struct lh7a40x_udc *dev, u32 csr)
+{
+	struct lh7a40x_request *req;
+	struct lh7a40x_ep *ep = &dev->ep[0];
+	int ret;
+
+	DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
+
+	if (list_empty(&ep->queue))
+		req = 0;
+	else
+		req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
+
+	if (req) {
+
+		if (req->req.length == 0) {
+			DEBUG_EP0("ZERO LENGTH OUT!\n");
+			usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
+			dev->ep0state = WAIT_FOR_SETUP;
+			return;
+		}
+		ret = read_fifo_ep0(ep, req);
+		if (ret) {
+			/* Done! */
+			DEBUG_EP0("%s: finished, waiting for status\n",
+				  __FUNCTION__);
+
+			usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
+			dev->ep0state = WAIT_FOR_SETUP;
+		} else {
+			/* Not done yet.. */
+			DEBUG_EP0("%s: not finished\n", __FUNCTION__);
+			usb_set(EP0_CLR_OUT, USB_EP0_CSR);
+		}
+	} else {
+		DEBUG_EP0("NO REQ??!\n");
+	}
+}
+
+/*
+ * DATA_STATE_XMIT
+ */
+static int lh7a40x_ep0_in(struct lh7a40x_udc *dev, u32 csr)
+{
+	struct lh7a40x_request *req;
+	struct lh7a40x_ep *ep = &dev->ep[0];
+	int ret, need_zlp = 0;
+
+	DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
+
+	if (list_empty(&ep->queue))
+		req = 0;
+	else
+		req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
+
+	if (!req) {
+		DEBUG_EP0("%s: NULL REQ\n", __FUNCTION__);
+		return 0;
+	}
+
+	if (req->req.length == 0) {
+
+		usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
+		dev->ep0state = WAIT_FOR_SETUP;
+		return 1;
+	}
+
+	if (req->req.length - req->req.actual == EP0_PACKETSIZE) {
+		/* Next write will end with the packet size, */
+		/* so we need Zero-length-packet */
+		need_zlp = 1;
+	}
+
+	ret = write_fifo_ep0(ep, req);
+
+	if (ret == 1 && !need_zlp) {
+		/* Last packet */
+		DEBUG_EP0("%s: finished, waiting for status\n", __FUNCTION__);
+
+		usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
+		dev->ep0state = WAIT_FOR_SETUP;
+	} else {
+		DEBUG_EP0("%s: not finished\n", __FUNCTION__);
+		usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR);
+	}
+
+	if (need_zlp) {
+		DEBUG_EP0("%s: Need ZLP!\n", __FUNCTION__);
+		usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR);
+		dev->ep0state = DATA_STATE_NEED_ZLP;
+	}
+
+	return 1;
+}
+
+static int lh7a40x_handle_get_status(struct lh7a40x_udc *dev,
+				     struct usb_ctrlrequest *ctrl)
+{
+	struct lh7a40x_ep *ep0 = &dev->ep[0];
+	struct lh7a40x_ep *qep;
+	int reqtype = (ctrl->bRequestType & USB_RECIP_MASK);
+	u16 val = 0;
+
+	if (reqtype == USB_RECIP_INTERFACE) {
+		/* This is not supported.
+		 * And according to the USB spec, this one does nothing..
+		 * Just return 0
+		 */
+		DEBUG_SETUP("GET_STATUS: USB_RECIP_INTERFACE\n");
+	} else if (reqtype == USB_RECIP_DEVICE) {
+		DEBUG_SETUP("GET_STATUS: USB_RECIP_DEVICE\n");
+		val |= (1 << 0);	/* Self powered */
+		/*val |= (1<<1); *//* Remote wakeup */
+	} else if (reqtype == USB_RECIP_ENDPOINT) {
+		int ep_num = (ctrl->wIndex & ~USB_DIR_IN);
+
+		DEBUG_SETUP
+		    ("GET_STATUS: USB_RECIP_ENDPOINT (%d), ctrl->wLength = %d\n",
+		     ep_num, ctrl->wLength);
+
+		if (ctrl->wLength > 2 || ep_num > 3)
+			return -EOPNOTSUPP;
+
+		qep = &dev->ep[ep_num];
+		if (ep_is_in(qep) != ((ctrl->wIndex & USB_DIR_IN) ? 1 : 0)
+		    && ep_index(qep) != 0) {
+			return -EOPNOTSUPP;
+		}
+
+		usb_set_index(ep_index(qep));
+
+		/* Return status on next IN token */
+		switch (qep->ep_type) {
+		case ep_control:
+			val =
+			    (usb_read(qep->csr1) & EP0_SEND_STALL) ==
+			    EP0_SEND_STALL;
+			break;
+		case ep_bulk_in:
+		case ep_interrupt:
+			val =
+			    (usb_read(qep->csr1) & USB_IN_CSR1_SEND_STALL) ==
+			    USB_IN_CSR1_SEND_STALL;
+			break;
+		case ep_bulk_out:
+			val =
+			    (usb_read(qep->csr1) & USB_OUT_CSR1_SEND_STALL) ==
+			    USB_OUT_CSR1_SEND_STALL;
+			break;
+		}
+
+		/* Back to EP0 index */
+		usb_set_index(0);
+
+		DEBUG_SETUP("GET_STATUS, ep: %d (%x), val = %d\n", ep_num,
+			    ctrl->wIndex, val);
+	} else {
+		DEBUG_SETUP("Unknown REQ TYPE: %d\n", reqtype);
+		return -EOPNOTSUPP;
+	}
+
+	/* Clear "out packet ready" */
+	usb_set((EP0_CLR_OUT), USB_EP0_CSR);
+	/* Put status to FIFO */
+	lh7a40x_fifo_write(ep0, (u8 *) & val, sizeof(val));
+	/* Issue "In packet ready" */
+	usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
+
+	return 0;
+}
+
+/*
+ * WAIT_FOR_SETUP (OUT_PKT_RDY)
+ *      - read data packet from EP0 FIFO
+ *      - decode command
+ *      - if error
+ *              set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits
+ *      - else
+ *              set EP0_CLR_OUT | EP0_DATA_END bits
+ */
+static void lh7a40x_ep0_setup(struct lh7a40x_udc *dev, u32 csr)
+{
+	struct lh7a40x_ep *ep = &dev->ep[0];
+	struct usb_ctrlrequest ctrl;
+	int i, bytes, is_in;
+
+	DEBUG_SETUP("%s: %x\n", __FUNCTION__, csr);
+
+	/* Nuke all previous transfers */
+	nuke(ep, -EPROTO);
+
+	/* read control req from fifo (8 bytes) */
+	bytes = lh7a40x_fifo_read(ep, (unsigned char *)&ctrl, 8);
+
+	DEBUG_SETUP("Read CTRL REQ %d bytes\n", bytes);
+	DEBUG_SETUP("CTRL.bRequestType = %d (is_in %d)\n", ctrl.bRequestType,
+		    ctrl.bRequestType == USB_DIR_IN);
+	DEBUG_SETUP("CTRL.bRequest = %d\n", ctrl.bRequest);
+	DEBUG_SETUP("CTRL.wLength = %d\n", ctrl.wLength);
+	DEBUG_SETUP("CTRL.wValue = %d (%d)\n", ctrl.wValue, ctrl.wValue >> 8);
+	DEBUG_SETUP("CTRL.wIndex = %d\n", ctrl.wIndex);
+
+	/* Set direction of EP0 */
+	if (likely(ctrl.bRequestType & USB_DIR_IN)) {
+		ep->bEndpointAddress |= USB_DIR_IN;
+		is_in = 1;
+	} else {
+		ep->bEndpointAddress &= ~USB_DIR_IN;
+		is_in = 0;
+	}
+
+	dev->req_pending = 1;
+
+	/* Handle some SETUP packets ourselves */
+	switch (ctrl.bRequest) {
+	case USB_REQ_SET_ADDRESS:
+		if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
+			break;
+
+		DEBUG_SETUP("USB_REQ_SET_ADDRESS (%d)\n", ctrl.wValue);
+		udc_set_address(dev, ctrl.wValue);
+		usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
+		return;
+
+	case USB_REQ_GET_STATUS:{
+			if (lh7a40x_handle_get_status(dev, &ctrl) == 0)
+				return;
+
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+			if (ctrl.bRequestType == USB_RECIP_ENDPOINT) {
+				struct lh7a40x_ep *qep;
+				int ep_num = (ctrl.wIndex & 0x0f);
+
+				/* Support only HALT feature */
+				if (ctrl.wValue != 0 || ctrl.wLength != 0
+				    || ep_num > 3 || ep_num < 1)
+					break;
+
+				qep = &dev->ep[ep_num];
+				if (ctrl.bRequest == USB_REQ_SET_FEATURE) {
+					DEBUG_SETUP("SET_FEATURE (%d)\n",
+						    ep_num);
+					lh7a40x_set_halt(&qep->ep, 1);
+				} else {
+					DEBUG_SETUP("CLR_FEATURE (%d)\n",
+						    ep_num);
+					lh7a40x_set_halt(&qep->ep, 0);
+				}
+				usb_set_index(0);
+
+				/* Reply with a ZLP on next IN token */
+				usb_set((EP0_CLR_OUT | EP0_DATA_END),
+					USB_EP0_CSR);
+				return;
+			}
+			break;
+		}
+
+	default:
+		break;
+	}
+
+	if (likely(dev->driver)) {
+		/* device-2-host (IN) or no data setup command, process immediately */
+		spin_unlock(&dev->lock);
+		i = dev->driver->setup(&dev->gadget, &ctrl);
+		spin_lock(&dev->lock);
+
+		if (i < 0) {
+			/* setup processing failed, force stall */
+			DEBUG_SETUP
+			    ("  --> ERROR: gadget setup FAILED (stalling), setup returned %d\n",
+			     i);
+			usb_set_index(0);
+			usb_set((EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL),
+				USB_EP0_CSR);
+
+			/* ep->stopped = 1; */
+			dev->ep0state = WAIT_FOR_SETUP;
+		}
+	}
+}
+
+/*
+ * DATA_STATE_NEED_ZLP
+ */
+static void lh7a40x_ep0_in_zlp(struct lh7a40x_udc *dev, u32 csr)
+{
+	DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
+
+	/* c.f. Table 15-14 */
+	usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
+	dev->ep0state = WAIT_FOR_SETUP;
+}
+
+/*
+ * handle ep0 interrupt
+ */
+static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr)
+{
+	struct lh7a40x_ep *ep = &dev->ep[0];
+	u32 csr;
+
+	/* Set index 0 */
+	usb_set_index(0);
+ 	csr = usb_read(USB_EP0_CSR);
+
+	DEBUG_EP0("%s: csr = %x\n", __FUNCTION__, csr);
+
+	/*
+	 * For overview of what we should be doing see c.f. Chapter 18.1.2.4
+	 * We will follow that outline here modified by our own global state
+	 * indication which provides hints as to what we think should be
+	 * happening..
+	 */
+
+	/*
+	 * if SENT_STALL is set
+	 *      - clear the SENT_STALL bit
+	 */
+	if (csr & EP0_SENT_STALL) {
+		DEBUG_EP0("%s: EP0_SENT_STALL is set: %x\n", __FUNCTION__, csr);
+		usb_clear((EP0_SENT_STALL | EP0_SEND_STALL), USB_EP0_CSR);
+		nuke(ep, -ECONNABORTED);
+		dev->ep0state = WAIT_FOR_SETUP;
+		return;
+	}
+
+	/*
+	 * if a transfer is in progress && IN_PKT_RDY and OUT_PKT_RDY are clear
+	 *      - fill EP0 FIFO
+	 *      - if last packet
+	 *      -       set IN_PKT_RDY | DATA_END
+	 *      - else
+	 *              set IN_PKT_RDY
+	 */
+	if (!(csr & (EP0_IN_PKT_RDY | EP0_OUT_PKT_RDY))) {
+		DEBUG_EP0("%s: IN_PKT_RDY and OUT_PKT_RDY are clear\n",
+			  __FUNCTION__);
+
+		switch (dev->ep0state) {
+		case DATA_STATE_XMIT:
+			DEBUG_EP0("continue with DATA_STATE_XMIT\n");
+			lh7a40x_ep0_in(dev, csr);
+			return;
+		case DATA_STATE_NEED_ZLP:
+			DEBUG_EP0("continue with DATA_STATE_NEED_ZLP\n");
+			lh7a40x_ep0_in_zlp(dev, csr);
+			return;
+		default:
+			/* Stall? */
+			DEBUG_EP0("Odd state!! state = %s\n",
+				  state_names[dev->ep0state]);
+			dev->ep0state = WAIT_FOR_SETUP;
+			/* nuke(ep, 0); */
+			/* usb_set(EP0_SEND_STALL, ep->csr1); */
+			break;
+		}
+	}
+
+	/*
+	 * if SETUP_END is set
+	 *      - abort the last transfer
+	 *      - set SERVICED_SETUP_END_BIT
+	 */
+	if (csr & EP0_SETUP_END) {
+		DEBUG_EP0("%s: EP0_SETUP_END is set: %x\n", __FUNCTION__, csr);
+
+		usb_set(EP0_CLR_SETUP_END, USB_EP0_CSR);
+
+		nuke(ep, 0);
+		dev->ep0state = WAIT_FOR_SETUP;
+	}
+
+	/*
+	 * if EP0_OUT_PKT_RDY is set
+	 *      - read data packet from EP0 FIFO
+	 *      - decode command
+	 *      - if error
+	 *              set SERVICED_OUT_PKT_RDY | DATA_END bits | SEND_STALL
+	 *      - else
+	 *              set SERVICED_OUT_PKT_RDY | DATA_END bits
+	 */
+	if (csr & EP0_OUT_PKT_RDY) {
+
+		DEBUG_EP0("%s: EP0_OUT_PKT_RDY is set: %x\n", __FUNCTION__,
+			  csr);
+
+		switch (dev->ep0state) {
+		case WAIT_FOR_SETUP:
+			DEBUG_EP0("WAIT_FOR_SETUP\n");
+			lh7a40x_ep0_setup(dev, csr);
+			break;
+
+		case DATA_STATE_RECV:
+			DEBUG_EP0("DATA_STATE_RECV\n");
+			lh7a40x_ep0_out(dev, csr);
+			break;
+
+		default:
+			/* send stall? */
+			DEBUG_EP0("strange state!! 2. send stall? state = %d\n",
+				  dev->ep0state);
+			break;
+		}
+	}
+}
+
+static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep)
+{
+	u32 csr;
+
+	usb_set_index(0);
+	csr = usb_read(USB_EP0_CSR);
+
+	DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
+
+	/* Clear "out packet ready" */
+	usb_set(EP0_CLR_OUT, USB_EP0_CSR);
+
+	if (ep_is_in(ep)) {
+		dev->ep0state = DATA_STATE_XMIT;
+		lh7a40x_ep0_in(dev, csr);
+	} else {
+		dev->ep0state = DATA_STATE_RECV;
+		lh7a40x_ep0_out(dev, csr);
+	}
+}
+
+/* ---------------------------------------------------------------------------
+ * 	device-scoped parts of the api to the usb controller hardware
+ * ---------------------------------------------------------------------------
+ */
+
+static int lh7a40x_udc_get_frame(struct usb_gadget *_gadget)
+{
+	u32 frame1 = usb_read(USB_FRM_NUM1);	/* Least significant 8 bits */
+	u32 frame2 = usb_read(USB_FRM_NUM2);	/* Most significant 3 bits */
+	DEBUG("%s, %p\n", __FUNCTION__, _gadget);
+	return ((frame2 & 0x07) << 8) | (frame1 & 0xff);
+}
+
+static int lh7a40x_udc_wakeup(struct usb_gadget *_gadget)
+{
+	/* host may not have enabled remote wakeup */
+	/*if ((UDCCS0 & UDCCS0_DRWF) == 0)
+	   return -EHOSTUNREACH;
+	   udc_set_mask_UDCCR(UDCCR_RSM); */
+	return -ENOTSUPP;
+}
+
+static const struct usb_gadget_ops lh7a40x_udc_ops = {
+	.get_frame = lh7a40x_udc_get_frame,
+	.wakeup = lh7a40x_udc_wakeup,
+	/* current versions must always be self-powered */
+};
+
+static void nop_release(struct device *dev)
+{
+	DEBUG("%s %s\n", __FUNCTION__, dev->bus_id);
+}
+
+static struct lh7a40x_udc memory = {
+	.usb_address = 0,
+
+	.gadget = {
+		   .ops = &lh7a40x_udc_ops,
+		   .ep0 = &memory.ep[0].ep,
+		   .name = driver_name,
+		   .dev = {
+			   .bus_id = "gadget",
+			   .release = nop_release,
+			   },
+		   },
+
+	/* control endpoint */
+	.ep[0] = {
+		  .ep = {
+			 .name = ep0name,
+			 .ops = &lh7a40x_ep_ops,
+			 .maxpacket = EP0_PACKETSIZE,
+			 },
+		  .dev = &memory,
+
+		  .bEndpointAddress = 0,
+		  .bmAttributes = 0,
+
+		  .ep_type = ep_control,
+		  .fifo = io_p2v(USB_EP0_FIFO),
+		  .csr1 = USB_EP0_CSR,
+		  .csr2 = USB_EP0_CSR,
+		  },
+
+	/* first group of endpoints */
+	.ep[1] = {
+		  .ep = {
+			 .name = "ep1in-bulk",
+			 .ops = &lh7a40x_ep_ops,
+			 .maxpacket = 64,
+			 },
+		  .dev = &memory,
+
+		  .bEndpointAddress = USB_DIR_IN | 1,
+		  .bmAttributes = USB_ENDPOINT_XFER_BULK,
+
+		  .ep_type = ep_bulk_in,
+		  .fifo = io_p2v(USB_EP1_FIFO),
+		  .csr1 = USB_IN_CSR1,
+		  .csr2 = USB_IN_CSR2,
+		  },
+
+	.ep[2] = {
+		  .ep = {
+			 .name = "ep2out-bulk",
+			 .ops = &lh7a40x_ep_ops,
+			 .maxpacket = 64,
+			 },
+		  .dev = &memory,
+
+		  .bEndpointAddress = 2,
+		  .bmAttributes = USB_ENDPOINT_XFER_BULK,
+
+		  .ep_type = ep_bulk_out,
+		  .fifo = io_p2v(USB_EP2_FIFO),
+		  .csr1 = USB_OUT_CSR1,
+		  .csr2 = USB_OUT_CSR2,
+		  },
+
+	.ep[3] = {
+		  .ep = {
+			 .name = "ep3in-int",
+			 .ops = &lh7a40x_ep_ops,
+			 .maxpacket = 64,
+			 },
+		  .dev = &memory,
+
+		  .bEndpointAddress = USB_DIR_IN | 3,
+		  .bmAttributes = USB_ENDPOINT_XFER_INT,
+
+		  .ep_type = ep_interrupt,
+		  .fifo = io_p2v(USB_EP3_FIFO),
+		  .csr1 = USB_IN_CSR1,
+		  .csr2 = USB_IN_CSR2,
+		  },
+};
+
+/*
+ * 	probe - binds to the platform device
+ */
+static int lh7a40x_udc_probe(struct device *_dev)
+{
+	struct lh7a40x_udc *dev = &memory;
+	int retval;
+
+	DEBUG("%s: %p\n", __FUNCTION__, _dev);
+
+	spin_lock_init(&dev->lock);
+	dev->dev = _dev;
+
+	device_initialize(&dev->gadget.dev);
+	dev->gadget.dev.parent = _dev;
+
+	the_controller = dev;
+	dev_set_drvdata(_dev, dev);
+
+	udc_disable(dev);
+	udc_reinit(dev);
+
+	/* irq setup after old hardware state is cleaned up */
+	retval =
+	    request_irq(IRQ_USBINTR, lh7a40x_udc_irq, SA_INTERRUPT, driver_name,
+			dev);
+	if (retval != 0) {
+		DEBUG(KERN_ERR "%s: can't get irq %i, err %d\n", driver_name,
+		      IRQ_USBINTR, retval);
+		return -EBUSY;
+	}
+
+	create_proc_files();
+
+	return retval;
+}
+
+static int lh7a40x_udc_remove(struct device *_dev)
+{
+	struct lh7a40x_udc *dev = _dev->driver_data;
+
+	DEBUG("%s: %p\n", __FUNCTION__, dev);
+
+	udc_disable(dev);
+	remove_proc_files();
+	usb_gadget_unregister_driver(dev->driver);
+
+	free_irq(IRQ_USBINTR, dev);
+
+	dev_set_drvdata(_dev, 0);
+
+	the_controller = 0;
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct device_driver udc_driver = {
+	.name = (char *)driver_name,
+	.bus = &platform_bus_type,
+	.probe = lh7a40x_udc_probe,
+	.remove = lh7a40x_udc_remove
+	    /* FIXME power management support */
+	    /* .suspend = ... disable UDC */
+	    /* .resume = ... re-enable UDC */
+};
+
+static int __init udc_init(void)
+{
+	DEBUG("%s: %s version %s\n", __FUNCTION__, driver_name, DRIVER_VERSION);
+	return driver_register(&udc_driver);
+}
+
+static void __exit udc_exit(void)
+{
+	driver_unregister(&udc_driver);
+}
+
+module_init(udc_init);
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Mikko Lahteenmaki, Bo Henriksen");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
new file mode 100644
index 0000000..1bb455c
--- /dev/null
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -0,0 +1,261 @@
+/*
+ * linux/drivers/usb/gadget/lh7a40x_udc.h
+ * Sharp LH7A40x on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID
+ * Copyright (C) 2004 Bo Henriksen, Nordic ID
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef __LH7A40X_H_
+#define __LH7A40X_H_
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <asm/hardware.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+/*
+ * Memory map
+ */
+
+#define USB_FA					0x80000200	// function address register
+#define USB_PM					0x80000204	// power management register
+
+#define USB_IN_INT				0x80000208	// IN interrupt register bank (EP0-EP3)
+#define USB_OUT_INT				0x80000210	// OUT interrupt register bank (EP2)
+#define USB_INT					0x80000218	// interrupt register bank
+
+#define USB_IN_INT_EN			0x8000021C	// IN interrupt enable register bank
+#define USB_OUT_INT_EN			0x80000224	// OUT interrupt enable register bank
+#define USB_INT_EN				0x8000022C	// USB interrupt enable register bank
+
+#define USB_FRM_NUM1			0x80000230	// Frame number1 register
+#define USB_FRM_NUM2			0x80000234	// Frame number2 register
+#define USB_INDEX				0x80000238	// index register
+
+#define USB_IN_MAXP				0x80000240	// IN MAXP register
+#define USB_IN_CSR1				0x80000244	// IN CSR1 register/EP0 CSR register
+#define USB_EP0_CSR				0x80000244	// IN CSR1 register/EP0 CSR register
+#define USB_IN_CSR2				0x80000248	// IN CSR2 register
+#define USB_OUT_MAXP			0x8000024C	// OUT MAXP register
+
+#define USB_OUT_CSR1			0x80000250	// OUT CSR1 register
+#define USB_OUT_CSR2			0x80000254	// OUT CSR2 register
+#define USB_OUT_FIFO_WC1		0x80000258	// OUT FIFO write count1 register
+#define USB_OUT_FIFO_WC2		0x8000025C	// OUT FIFO write count2 register
+
+#define USB_RESET				0x8000044C	// USB reset register
+
+#define	USB_EP0_FIFO			0x80000280
+#define	USB_EP1_FIFO			0x80000284
+#define	USB_EP2_FIFO			0x80000288
+#define	USB_EP3_FIFO			0x8000028c
+
+/*
+ * USB reset register
+ */
+#define USB_RESET_APB			(1<<1)	//resets USB APB control side WRITE
+#define USB_RESET_IO			(1<<0)	//resets USB IO side WRITE
+
+/*
+ * USB function address register
+ */
+#define USB_FA_ADDR_UPDATE		(1<<7)
+#define USB_FA_FUNCTION_ADDR	(0x7F)
+
+/*
+ * Power Management register
+ */
+#define PM_USB_DCP				(1<<5)
+#define PM_USB_ENABLE			(1<<4)
+#define PM_USB_RESET			(1<<3)
+#define PM_UC_RESUME			(1<<2)
+#define PM_SUSPEND_MODE			(1<<1)
+#define PM_ENABLE_SUSPEND		(1<<0)
+
+/*
+ * IN interrupt register
+ */
+#define USB_IN_INT_EP3				(1<<3)
+#define USB_IN_INT_EP1				(1<<1)
+#define USB_IN_INT_EP0				(1<<0)
+
+/*
+ * OUT interrupt register
+ */
+#define USB_OUT_INT_EP2				(1<<2)
+
+/*
+ * USB interrupt register
+ */
+#define USB_INT_RESET_INT			(1<<2)
+#define USB_INT_RESUME_INT			(1<<1)
+#define USB_INT_SUSPEND_INT			(1<<0)
+
+/*
+ * USB interrupt enable register
+ */
+#define USB_INT_EN_USB_RESET_INTER		(1<<2)
+#define USB_INT_EN_RESUME_INTER			(1<<1)
+#define USB_INT_EN_SUSPEND_INTER		(1<<0)
+
+/*
+ * INCSR1 register
+ */
+#define USB_IN_CSR1_CLR_DATA_TOGGLE		(1<<6)
+#define USB_IN_CSR1_SENT_STALL			(1<<5)
+#define USB_IN_CSR1_SEND_STALL			(1<<4)
+#define USB_IN_CSR1_FIFO_FLUSH			(1<<3)
+#define USB_IN_CSR1_FIFO_NOT_EMPTY		(1<<1)
+#define USB_IN_CSR1_IN_PKT_RDY			(1<<0)
+
+/*
+ * INCSR2 register
+ */
+#define USB_IN_CSR2_AUTO_SET			(1<<7)
+#define USB_IN_CSR2_USB_DMA_EN			(1<<4)
+
+/*
+ * OUT CSR1 register
+ */
+#define USB_OUT_CSR1_CLR_DATA_REG		(1<<7)
+#define USB_OUT_CSR1_SENT_STALL			(1<<6)
+#define USB_OUT_CSR1_SEND_STALL			(1<<5)
+#define USB_OUT_CSR1_FIFO_FLUSH			(1<<4)
+#define USB_OUT_CSR1_FIFO_FULL			(1<<1)
+#define USB_OUT_CSR1_OUT_PKT_RDY		(1<<0)
+
+/*
+ * OUT CSR2 register
+ */
+#define USB_OUT_CSR2_AUTO_CLR			(1<<7)
+#define USB_OUT_CSR2_USB_DMA_EN			(1<<4)
+
+/*
+ * EP0 CSR
+ */
+#define EP0_CLR_SETUP_END		(1<<7)	/* Clear "Setup Ends" Bit (w) */
+#define EP0_CLR_OUT				(1<<6)	/* Clear "Out packet ready" Bit (w) */
+#define EP0_SEND_STALL			(1<<5)	/* Send STALL Handshake (rw) */
+#define EP0_SETUP_END			(1<<4)	/* Setup Ends (r) */
+
+#define EP0_DATA_END			(1<<3)	/* Data end (rw) */
+#define EP0_SENT_STALL			(1<<2)	/* Sent Stall Handshake (r) */
+#define EP0_IN_PKT_RDY			(1<<1)	/* In packet ready (rw) */
+#define EP0_OUT_PKT_RDY			(1<<0)	/* Out packet ready (r) */
+
+/* general CSR */
+#define OUT_PKT_RDY		(1<<0)
+#define IN_PKT_RDY		(1<<0)
+
+/*
+ * IN/OUT MAXP register
+ */
+#define USB_OUT_MAXP_MAXP			(0xF)
+#define USB_IN_MAXP_MAXP			(0xF)
+
+// Max packet size
+//#define EP0_PACKETSIZE        0x10
+#define EP0_PACKETSIZE  	0x8
+#define EP0_MAXPACKETSIZE  	0x10
+
+#define UDC_MAX_ENDPOINTS       4
+
+#define WAIT_FOR_SETUP          0
+#define DATA_STATE_XMIT         1
+#define DATA_STATE_NEED_ZLP     2
+#define WAIT_FOR_OUT_STATUS     3
+#define DATA_STATE_RECV         4
+
+/* ********************************************************************************************* */
+/* IO
+ */
+
+typedef enum ep_type {
+	ep_control, ep_bulk_in, ep_bulk_out, ep_interrupt
+} ep_type_t;
+
+struct lh7a40x_ep {
+	struct usb_ep ep;
+	struct lh7a40x_udc *dev;
+
+	const struct usb_endpoint_descriptor *desc;
+	struct list_head queue;
+	unsigned long pio_irqs;
+
+	u8 stopped;
+	u8 bEndpointAddress;
+	u8 bmAttributes;
+
+	ep_type_t ep_type;
+	u32 fifo;
+	u32 csr1;
+	u32 csr2;
+};
+
+struct lh7a40x_request {
+	struct usb_request req;
+	struct list_head queue;
+};
+
+struct lh7a40x_udc {
+	struct usb_gadget gadget;
+	struct usb_gadget_driver *driver;
+	struct device *dev;
+	spinlock_t lock;
+
+	int ep0state;
+	struct lh7a40x_ep ep[UDC_MAX_ENDPOINTS];
+
+	unsigned char usb_address;
+
+	unsigned req_pending:1, req_std:1, req_config:1;
+};
+
+extern struct lh7a40x_udc *the_controller;
+
+#define ep_is_in(EP) 		(((EP)->bEndpointAddress&USB_DIR_IN)==USB_DIR_IN)
+#define ep_index(EP) 		((EP)->bEndpointAddress&0xF)
+#define ep_maxpacket(EP) 	((EP)->ep.maxpacket)
+
+#endif
diff --git a/drivers/usb/gadget/ndis.h b/drivers/usb/gadget/ndis.h
new file mode 100644
index 0000000..c553bbf
--- /dev/null
+++ b/drivers/usb/gadget/ndis.h
@@ -0,0 +1,217 @@
+/*
+ * ndis.h 
+ * 
+ * ntddndis.h modified by Benedikt Spranger <b.spranger@pengutronix.de>
+ * 
+ * Thanks to the cygwin development team, 
+ * espacially to Casper S. Hornstrup <chorns@users.sourceforge.net>
+ * 
+ * THIS SOFTWARE IS NOT COPYRIGHTED
+ *
+ * This source code is offered for use in the public domain. You may
+ * use, modify or distribute it freely.
+ *
+ * This code is distributed in the hope that it will be useful but
+ * WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY
+ * DISCLAIMED. This includes but is not limited to warranties of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#ifndef _LINUX_NDIS_H
+#define _LINUX_NDIS_H
+
+
+#define NDIS_STATUS_MULTICAST_FULL	  0xC0010009
+#define NDIS_STATUS_MULTICAST_EXISTS      0xC001000A
+#define NDIS_STATUS_MULTICAST_NOT_FOUND   0xC001000B
+
+enum NDIS_DEVICE_POWER_STATE {
+	NdisDeviceStateUnspecified = 0,
+	NdisDeviceStateD0,
+	NdisDeviceStateD1,
+	NdisDeviceStateD2,
+	NdisDeviceStateD3,
+	NdisDeviceStateMaximum
+};
+
+struct NDIS_PM_WAKE_UP_CAPABILITIES {
+	enum NDIS_DEVICE_POWER_STATE  MinMagicPacketWakeUp;
+	enum NDIS_DEVICE_POWER_STATE  MinPatternWakeUp;
+	enum NDIS_DEVICE_POWER_STATE  MinLinkChangeWakeUp;
+};
+
+/* NDIS_PNP_CAPABILITIES.Flags constants */
+#define NDIS_DEVICE_WAKE_UP_ENABLE                0x00000001
+#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE  0x00000002
+#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE   0x00000004
+
+struct NDIS_PNP_CAPABILITIES {
+	u32					Flags;
+	struct NDIS_PM_WAKE_UP_CAPABILITIES	WakeUpCapabilities;
+};
+
+struct NDIS_PM_PACKET_PATTERN {
+	u32	Priority;
+	u32	Reserved;
+	u32	MaskSize;
+	u32	PatternOffset;
+	u32	PatternSize;
+	u32	PatternFlags;
+};
+
+
+/* Required Object IDs (OIDs) */
+#define OID_GEN_SUPPORTED_LIST            0x00010101
+#define OID_GEN_HARDWARE_STATUS           0x00010102
+#define OID_GEN_MEDIA_SUPPORTED           0x00010103
+#define OID_GEN_MEDIA_IN_USE              0x00010104
+#define OID_GEN_MAXIMUM_LOOKAHEAD         0x00010105
+#define OID_GEN_MAXIMUM_FRAME_SIZE        0x00010106
+#define OID_GEN_LINK_SPEED                0x00010107
+#define OID_GEN_TRANSMIT_BUFFER_SPACE     0x00010108
+#define OID_GEN_RECEIVE_BUFFER_SPACE      0x00010109
+#define OID_GEN_TRANSMIT_BLOCK_SIZE       0x0001010A
+#define OID_GEN_RECEIVE_BLOCK_SIZE        0x0001010B
+#define OID_GEN_VENDOR_ID                 0x0001010C
+#define OID_GEN_VENDOR_DESCRIPTION        0x0001010D
+#define OID_GEN_CURRENT_PACKET_FILTER     0x0001010E
+#define OID_GEN_CURRENT_LOOKAHEAD         0x0001010F
+#define OID_GEN_DRIVER_VERSION            0x00010110
+#define OID_GEN_MAXIMUM_TOTAL_SIZE        0x00010111
+#define OID_GEN_PROTOCOL_OPTIONS          0x00010112
+#define OID_GEN_MAC_OPTIONS               0x00010113
+#define OID_GEN_MEDIA_CONNECT_STATUS      0x00010114
+#define OID_GEN_MAXIMUM_SEND_PACKETS      0x00010115
+#define OID_GEN_VENDOR_DRIVER_VERSION     0x00010116
+#define OID_GEN_SUPPORTED_GUIDS           0x00010117
+#define OID_GEN_NETWORK_LAYER_ADDRESSES   0x00010118
+#define OID_GEN_TRANSPORT_HEADER_OFFSET   0x00010119
+#define OID_GEN_MACHINE_NAME              0x0001021A
+#define OID_GEN_RNDIS_CONFIG_PARAMETER    0x0001021B
+#define OID_GEN_VLAN_ID                   0x0001021C
+
+/* Optional OIDs */
+#define OID_GEN_MEDIA_CAPABILITIES        0x00010201
+#define OID_GEN_PHYSICAL_MEDIUM           0x00010202
+
+/* Required statistics OIDs */
+#define OID_GEN_XMIT_OK                   0x00020101
+#define OID_GEN_RCV_OK                    0x00020102
+#define OID_GEN_XMIT_ERROR                0x00020103
+#define OID_GEN_RCV_ERROR                 0x00020104
+#define OID_GEN_RCV_NO_BUFFER             0x00020105
+
+/* Optional statistics OIDs */
+#define OID_GEN_DIRECTED_BYTES_XMIT       0x00020201
+#define OID_GEN_DIRECTED_FRAMES_XMIT      0x00020202
+#define OID_GEN_MULTICAST_BYTES_XMIT      0x00020203
+#define OID_GEN_MULTICAST_FRAMES_XMIT     0x00020204
+#define OID_GEN_BROADCAST_BYTES_XMIT      0x00020205
+#define OID_GEN_BROADCAST_FRAMES_XMIT     0x00020206
+#define OID_GEN_DIRECTED_BYTES_RCV        0x00020207
+#define OID_GEN_DIRECTED_FRAMES_RCV       0x00020208
+#define OID_GEN_MULTICAST_BYTES_RCV       0x00020209
+#define OID_GEN_MULTICAST_FRAMES_RCV      0x0002020A
+#define OID_GEN_BROADCAST_BYTES_RCV       0x0002020B
+#define OID_GEN_BROADCAST_FRAMES_RCV      0x0002020C
+#define OID_GEN_RCV_CRC_ERROR             0x0002020D
+#define OID_GEN_TRANSMIT_QUEUE_LENGTH     0x0002020E
+#define OID_GEN_GET_TIME_CAPS             0x0002020F
+#define OID_GEN_GET_NETCARD_TIME          0x00020210
+#define OID_GEN_NETCARD_LOAD              0x00020211
+#define OID_GEN_DEVICE_PROFILE            0x00020212
+#define OID_GEN_INIT_TIME_MS              0x00020213
+#define OID_GEN_RESET_COUNTS              0x00020214
+#define OID_GEN_MEDIA_SENSE_COUNTS        0x00020215
+#define OID_GEN_FRIENDLY_NAME             0x00020216
+#define OID_GEN_MINIPORT_INFO             0x00020217
+#define OID_GEN_RESET_VERIFY_PARAMETERS   0x00020218
+
+/* IEEE 802.3 (Ethernet) OIDs */
+#define NDIS_802_3_MAC_OPTION_PRIORITY    0x00000001
+
+#define OID_802_3_PERMANENT_ADDRESS       0x01010101
+#define OID_802_3_CURRENT_ADDRESS         0x01010102
+#define OID_802_3_MULTICAST_LIST          0x01010103
+#define OID_802_3_MAXIMUM_LIST_SIZE       0x01010104
+#define OID_802_3_MAC_OPTIONS             0x01010105
+#define OID_802_3_RCV_ERROR_ALIGNMENT     0x01020101
+#define OID_802_3_XMIT_ONE_COLLISION      0x01020102
+#define OID_802_3_XMIT_MORE_COLLISIONS    0x01020103
+#define OID_802_3_XMIT_DEFERRED           0x01020201
+#define OID_802_3_XMIT_MAX_COLLISIONS     0x01020202
+#define OID_802_3_RCV_OVERRUN             0x01020203
+#define OID_802_3_XMIT_UNDERRUN           0x01020204
+#define OID_802_3_XMIT_HEARTBEAT_FAILURE  0x01020205
+#define OID_802_3_XMIT_TIMES_CRS_LOST     0x01020206
+#define OID_802_3_XMIT_LATE_COLLISIONS    0x01020207
+
+/* OID_GEN_MINIPORT_INFO constants */
+#define NDIS_MINIPORT_BUS_MASTER                      0x00000001
+#define NDIS_MINIPORT_WDM_DRIVER                      0x00000002
+#define NDIS_MINIPORT_SG_LIST                         0x00000004
+#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY            0x00000008
+#define NDIS_MINIPORT_INDICATES_PACKETS               0x00000010
+#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE             0x00000020
+#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE            0x00000040
+#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS        0x00000080
+#define NDIS_MINIPORT_INTERMEDIATE_DRIVER             0x00000100
+#define NDIS_MINIPORT_IS_NDIS_5                       0x00000200
+#define NDIS_MINIPORT_IS_CO                           0x00000400
+#define NDIS_MINIPORT_DESERIALIZE                     0x00000800
+#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING          0x00001000
+#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE            0x00002000
+#define NDIS_MINIPORT_NETBOOT_CARD                    0x00004000
+#define NDIS_MINIPORT_PM_SUPPORTED                    0x00008000
+#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE  0x00010000
+#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS           0x00020000
+#define NDIS_MINIPORT_HIDDEN                          0x00040000
+#define NDIS_MINIPORT_SWENUM                          0x00080000
+#define NDIS_MINIPORT_SURPRISE_REMOVE_OK              0x00100000
+#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND              0x00200000
+#define NDIS_MINIPORT_HARDWARE_DEVICE                 0x00400000
+#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS    0x00800000
+#define NDIS_MINIPORT_64BITS_DMA                      0x01000000
+
+#define NDIS_MEDIUM_802_3		0x00000000
+#define NDIS_MEDIUM_802_5		0x00000001
+#define NDIS_MEDIUM_FDDI		0x00000002
+#define NDIS_MEDIUM_WAN			0x00000003
+#define NDIS_MEDIUM_LOCAL_TALK		0x00000004
+#define NDIS_MEDIUM_DIX			0x00000005
+#define NDIS_MEDIUM_ARCENT_RAW		0x00000006
+#define NDIS_MEDIUM_ARCENT_878_2	0x00000007
+#define NDIS_MEDIUM_ATM			0x00000008
+#define NDIS_MEDIUM_WIRELESS_LAN	0x00000009
+#define NDIS_MEDIUM_IRDA		0x0000000A
+#define NDIS_MEDIUM_BPC			0x0000000B
+#define NDIS_MEDIUM_CO_WAN		0x0000000C
+#define NDIS_MEDIUM_1394		0x0000000D
+
+#define NDIS_PACKET_TYPE_DIRECTED	0x00000001
+#define NDIS_PACKET_TYPE_MULTICAST	0x00000002
+#define NDIS_PACKET_TYPE_ALL_MULTICAST	0x00000004
+#define NDIS_PACKET_TYPE_BROADCAST	0x00000008
+#define NDIS_PACKET_TYPE_SOURCE_ROUTING	0x00000010
+#define NDIS_PACKET_TYPE_PROMISCUOUS	0x00000020
+#define NDIS_PACKET_TYPE_SMT		0x00000040
+#define NDIS_PACKET_TYPE_ALL_LOCAL	0x00000080
+#define NDIS_PACKET_TYPE_GROUP		0x00000100
+#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL	0x00000200
+#define NDIS_PACKET_TYPE_FUNCTIONAL	0x00000400
+#define NDIS_PACKET_TYPE_MAC_FRAME	0x00000800
+
+#define NDIS_MEDIA_STATE_CONNECTED	0x00000000
+#define NDIS_MEDIA_STATE_DISCONNECTED	0x00000001
+
+#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA     0x00000001
+#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED      0x00000002
+#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND      0x00000004
+#define NDIS_MAC_OPTION_NO_LOOPBACK             0x00000008
+#define NDIS_MAC_OPTION_FULL_DUPLEX             0x00000010
+#define NDIS_MAC_OPTION_EOTX_INDICATION         0x00000020
+#define NDIS_MAC_OPTION_8021P_PRIORITY          0x00000040
+#define NDIS_MAC_OPTION_RESERVED                0x80000000
+
+#endif /* _LINUX_NDIS_H */
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
new file mode 100644
index 0000000..e5457f2
--- /dev/null
+++ b/drivers/usb/gadget/net2280.c
@@ -0,0 +1,2967 @@
+/*
+ * Driver for the PLX NET2280 USB device controller.
+ * Specs and errata are available from <http://www.plxtech.com>.
+ *
+ * PLX Technology Inc. (formerly NetChip Technology) supported the 
+ * development of this driver.
+ *
+ *
+ * CODE STATUS HIGHLIGHTS
+ *
+ * This driver should work well with most "gadget" drivers, including
+ * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
+ * as well as Gadget Zero and Gadgetfs.
+ *
+ * DMA is enabled by default.  Drivers using transfer queues might use
+ * DMA chaining to remove IRQ latencies between transfers.  (Except when
+ * short OUT transfers happen.)  Drivers can use the req->no_interrupt
+ * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
+ * and DMA chaining is enabled.
+ *
+ * Note that almost all the errata workarounds here are only needed for
+ * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
+ */
+
+/*
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 PLX Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#undef	DEBUG		/* messages on error and most fault paths */
+#undef	VERBOSE		/* extra debug messages (success too) */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+
+#define	DRIVER_DESC		"PLX NET2280 USB Peripheral Controller"
+#define	DRIVER_VERSION		"2005 Feb 03"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+#define	EP_DONTUSE		13	/* nonzero */
+
+#define USE_RDK_LEDS		/* GPIO pins control three LEDs */
+
+
+static const char driver_name [] = "net2280";
+static const char driver_desc [] = DRIVER_DESC;
+
+static const char ep0name [] = "ep0";
+static const char *ep_name [] = {
+	ep0name,
+	"ep-a", "ep-b", "ep-c", "ep-d",
+	"ep-e", "ep-f",
+};
+
+/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
+ * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
+ *
+ * The net2280 DMA engines are not tightly integrated with their FIFOs;
+ * not all cases are (yet) handled well in this driver or the silicon.
+ * Some gadget drivers work better with the dma support here than others.
+ * These two parameters let you use PIO or more aggressive DMA.
+ */
+static int use_dma = 1;
+static int use_dma_chaining = 0;
+
+/* "modprobe net2280 use_dma=n" etc */
+module_param (use_dma, bool, S_IRUGO);
+module_param (use_dma_chaining, bool, S_IRUGO);
+
+
+/* mode 0 == ep-{a,b,c,d} 1K fifo each
+ * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
+ * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
+ */
+static ushort fifo_mode = 0;
+
+/* "modprobe net2280 fifo_mode=1" etc */
+module_param (fifo_mode, ushort, 0644);
+
+/* enable_suspend -- When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2280.  Otherwise,
+ * USB suspend requests will be ignored.  This is acceptible for
+ * self-powered devices, and helps avoid some quirks.
+ */
+static int enable_suspend = 0;
+
+/* "modprobe net2280 enable_suspend=1" etc */
+module_param (enable_suspend, bool, S_IRUGO);
+
+
+#define	DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
+
+#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
+static char *type_string (u8 bmAttributes)
+{
+	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:	return "bulk";
+	case USB_ENDPOINT_XFER_ISOC:	return "iso";
+	case USB_ENDPOINT_XFER_INT:	return "intr";
+	};
+	return "control";
+}
+#endif
+
+#include "net2280.h"
+
+#define valid_bit	__constant_cpu_to_le32 (1 << VALID_BIT)
+#define dma_done_ie	__constant_cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
+
+/*-------------------------------------------------------------------------*/
+
+static int
+net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct net2280		*dev;
+	struct net2280_ep	*ep;
+	u32			max, tmp;
+	unsigned long		flags;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	/* erratum 0119 workaround ties up an endpoint number */
+	if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
+		return -EDOM;
+
+	/* sanity check ep-e/ep-f since their fifos are small */
+	max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
+	if (ep->num > 4 && max > 64)
+		return -ERANGE;
+
+	spin_lock_irqsave (&dev->lock, flags);
+	_ep->maxpacket = max & 0x7ff;
+	ep->desc = desc;
+
+	/* ep_reset() has already been called */
+	ep->stopped = 0;
+	ep->out_overflow = 0;
+
+	/* set speed-dependent max packet; may kick in high bandwidth */
+	set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
+
+	/* FIFO lines can't go to different packets.  PIO is ok, so
+	 * use it instead of troublesome (non-bulk) multi-packet DMA.
+	 */
+	if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
+		DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
+			ep->ep.name, ep->ep.maxpacket);
+		ep->dma = NULL;
+	}
+
+	/* set type, direction, address; reset fifo counters */
+	writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
+	tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+	if (tmp == USB_ENDPOINT_XFER_INT) {
+		/* erratum 0105 workaround prevents hs NYET */
+		if (dev->chiprev == 0100
+				&& dev->gadget.speed == USB_SPEED_HIGH
+				&& !(desc->bEndpointAddress & USB_DIR_IN))
+			writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
+				&ep->regs->ep_rsp);
+	} else if (tmp == USB_ENDPOINT_XFER_BULK) {
+		/* catch some particularly blatant driver bugs */
+		if ((dev->gadget.speed == USB_SPEED_HIGH
+					&& max != 512)
+				|| (dev->gadget.speed == USB_SPEED_FULL
+					&& max > 64)) {
+			spin_unlock_irqrestore (&dev->lock, flags);
+			return -ERANGE;
+		}
+	}
+	ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
+	tmp <<= ENDPOINT_TYPE;
+	tmp |= desc->bEndpointAddress;
+	tmp |= (4 << ENDPOINT_BYTE_COUNT);	/* default full fifo lines */
+	tmp |= 1 << ENDPOINT_ENABLE;
+	wmb ();
+
+	/* for OUT transfers, block the rx fifo until a read is posted */
+	ep->is_in = (tmp & USB_DIR_IN) != 0;
+	if (!ep->is_in)
+		writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+
+	writel (tmp, &ep->regs->ep_cfg);
+
+	/* enable irqs */
+	if (!ep->dma) {				/* pio, per-packet */
+		tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
+		writel (tmp, &dev->regs->pciirqenb0);
+
+		tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+			| readl (&ep->regs->ep_irqenb);
+		writel (tmp, &ep->regs->ep_irqenb);
+	} else {				/* dma, per-request */
+		tmp = (1 << (8 + ep->num));	/* completion */
+		tmp |= readl (&dev->regs->pciirqenb1);
+		writel (tmp, &dev->regs->pciirqenb1);
+
+		/* for short OUT transfers, dma completions can't
+		 * advance the queue; do it pio-style, by hand.
+		 * NOTE erratum 0112 workaround #2
+		 */
+		if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
+			tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
+			writel (tmp, &ep->regs->ep_irqenb);
+
+			tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
+			writel (tmp, &dev->regs->pciirqenb0);
+		}
+	}
+
+	tmp = desc->bEndpointAddress;
+	DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
+		_ep->name, tmp & 0x0f, DIR_STRING (tmp),
+		type_string (desc->bmAttributes),
+		ep->dma ? "dma" : "pio", max);
+
+	/* pci writes may still be posted */
+	spin_unlock_irqrestore (&dev->lock, flags);
+	return 0;
+}
+
+static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
+{
+	u32	result;
+
+	do {
+		result = readl (ptr);
+		if (result == ~(u32)0)		/* "device unplugged" */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay (1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+static struct usb_ep_ops net2280_ep_ops;
+
+static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
+{
+	u32		tmp;
+
+	ep->desc = NULL;
+	INIT_LIST_HEAD (&ep->queue);
+
+	ep->ep.maxpacket = ~0;
+	ep->ep.ops = &net2280_ep_ops;
+
+	/* disable the dma, irqs, endpoint... */
+	if (ep->dma) {
+		writel (0, &ep->dma->dmactl);
+		writel (  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
+			| (1 << DMA_TRANSACTION_DONE_INTERRUPT)
+			| (1 << DMA_ABORT)
+			, &ep->dma->dmastat);
+
+		tmp = readl (&regs->pciirqenb0);
+		tmp &= ~(1 << ep->num);
+		writel (tmp, &regs->pciirqenb0);
+	} else {
+		tmp = readl (&regs->pciirqenb1);
+		tmp &= ~(1 << (8 + ep->num));	/* completion */
+		writel (tmp, &regs->pciirqenb1);
+	}
+	writel (0, &ep->regs->ep_irqenb);
+
+	/* init to our chosen defaults, notably so that we NAK OUT
+	 * packets until the driver queues a read (+note erratum 0112)
+	 */
+	tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
+		| (1 << SET_NAK_OUT_PACKETS)
+		| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
+		| (1 << CLEAR_INTERRUPT_MODE);
+
+	if (ep->num != 0) {
+		tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
+			| (1 << CLEAR_ENDPOINT_HALT);
+	}
+	writel (tmp, &ep->regs->ep_rsp);
+
+	/* scrub most status bits, and flush any fifo state */
+	writel (  (1 << TIMEOUT)
+		| (1 << USB_STALL_SENT)
+		| (1 << USB_IN_NAK_SENT)
+		| (1 << USB_IN_ACK_RCVD)
+		| (1 << USB_OUT_PING_NAK_SENT)
+		| (1 << USB_OUT_ACK_SENT)
+		| (1 << FIFO_OVERFLOW)
+		| (1 << FIFO_UNDERFLOW)
+		| (1 << FIFO_FLUSH)
+		| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
+		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
+		| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+		| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+		| (1 << DATA_IN_TOKEN_INTERRUPT)
+		, &ep->regs->ep_stat);
+
+	/* fifo size is handled separately */
+}
+
+static void nuke (struct net2280_ep *);
+
+static int net2280_disable (struct usb_ep *_ep)
+{
+	struct net2280_ep	*ep;
+	unsigned long		flags;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || !ep->desc || _ep->name == ep0name)
+		return -EINVAL;
+
+	spin_lock_irqsave (&ep->dev->lock, flags);
+	nuke (ep);
+	ep_reset (ep->dev->regs, ep);
+
+	VDEBUG (ep->dev, "disabled %s %s\n",
+			ep->dma ? "dma" : "pio", _ep->name);
+
+	/* synch memory views with the device */
+	(void) readl (&ep->regs->ep_cfg);
+
+	if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
+		ep->dma = &ep->dev->dma [ep->num - 1];
+
+	spin_unlock_irqrestore (&ep->dev->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+net2280_alloc_request (struct usb_ep *_ep, int gfp_flags)
+{
+	struct net2280_ep	*ep;
+	struct net2280_request	*req;
+
+	if (!_ep)
+		return NULL;
+	ep = container_of (_ep, struct net2280_ep, ep);
+
+	req = kmalloc (sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	memset (req, 0, sizeof *req);
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD (&req->queue);
+
+	/* this dma descriptor may be swapped with the previous dummy */
+	if (ep->dma) {
+		struct net2280_dma	*td;
+
+		td = pci_pool_alloc (ep->dev->requests, gfp_flags,
+				&req->td_dma);
+		if (!td) {
+			kfree (req);
+			return NULL;
+		}
+		td->dmacount = 0;	/* not VALID */
+		td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
+		td->dmadesc = td->dmaaddr;
+		req->td = td;
+	}
+	return &req->req;
+}
+
+static void
+net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct net2280_ep	*ep;
+	struct net2280_request	*req;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || !_req)
+		return;
+
+	req = container_of (_req, struct net2280_request, req);
+	WARN_ON (!list_empty (&req->queue));
+	if (req->td)
+		pci_pool_free (ep->dev->requests, req->td, req->td_dma);
+	kfree (req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#undef USE_KMALLOC
+
+/* many common platforms have dma-coherent caches, which means that it's
+ * safe to use kmalloc() memory for all i/o buffers without using any
+ * cache flushing calls.  (unless you're trying to share cache lines
+ * between dma and non-dma activities, which is a slow idea in any case.)
+ *
+ * other platforms need more care, with 2.5 having a moderately general
+ * solution (which falls down for allocations smaller than one page)
+ * that improves significantly on the 2.4 PCI allocators by removing
+ * the restriction that memory never be freed in_interrupt().
+ */
+#if	defined(CONFIG_X86)
+#define USE_KMALLOC
+
+#elif	defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+#define USE_KMALLOC
+
+#elif	defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
+#define USE_KMALLOC
+
+/* FIXME there are other cases, including an x86-64 one ...  */
+#endif
+
+/* allocating buffers this way eliminates dma mapping overhead, which
+ * on some platforms will mean eliminating a per-io buffer copy.  with
+ * some kinds of system caches, further tweaks may still be needed.
+ */
+static void *
+net2280_alloc_buffer (
+	struct usb_ep		*_ep,
+	unsigned		bytes,
+	dma_addr_t		*dma,
+	int			gfp_flags
+)
+{
+	void			*retval;
+	struct net2280_ep	*ep;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep)
+		return NULL;
+	*dma = DMA_ADDR_INVALID;
+
+#if	defined(USE_KMALLOC)
+	retval = kmalloc(bytes, gfp_flags);
+	if (retval)
+		*dma = virt_to_phys(retval);
+#else
+	if (ep->dma) {
+		/* the main problem with this call is that it wastes memory
+		 * on typical 1/N page allocations: it allocates 1-N pages.
+		 */
+#warning Using dma_alloc_coherent even with buffers smaller than a page.
+		retval = dma_alloc_coherent(&ep->dev->pdev->dev,
+				bytes, dma, gfp_flags);
+	} else
+		retval = kmalloc(bytes, gfp_flags);
+#endif
+	return retval;
+}
+
+static void
+net2280_free_buffer (
+	struct usb_ep *_ep,
+	void *buf,
+	dma_addr_t dma,
+	unsigned bytes
+) {
+	/* free memory into the right allocator */
+#ifndef	USE_KMALLOC
+	if (dma != DMA_ADDR_INVALID) {
+		struct net2280_ep	*ep;
+
+		ep = container_of(_ep, struct net2280_ep, ep);
+		if (!_ep)
+			return;
+		dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
+	} else
+#endif
+		kfree (buf);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* load a packet into the fifo we use for usb IN transfers.
+ * works for all endpoints.
+ *
+ * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
+ * at a time, but this code is simpler because it knows it only writes
+ * one packet.  ep-a..ep-d should use dma instead.
+ */
+static void
+write_fifo (struct net2280_ep *ep, struct usb_request *req)
+{
+	struct net2280_ep_regs	__iomem *regs = ep->regs;
+	u8			*buf;
+	u32			tmp;
+	unsigned		count, total;
+
+	/* INVARIANT:  fifo is currently empty. (testable) */
+
+	if (req) {
+		buf = req->buf + req->actual;
+		prefetch (buf);
+		total = req->length - req->actual;
+	} else {
+		total = 0;
+		buf = NULL;
+	}
+
+	/* write just one packet at a time */
+	count = ep->ep.maxpacket;
+	if (count > total)	/* min() cannot be used on a bitfield */
+		count = total;
+
+	VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
+			ep->ep.name, count,
+			(count != ep->ep.maxpacket) ? " (short)" : "",
+			req);
+	while (count >= 4) {
+		/* NOTE be careful if you try to align these. fifo lines
+		 * should normally be full (4 bytes) and successive partial
+		 * lines are ok only in certain cases.
+		 */
+		tmp = get_unaligned ((u32 *)buf);
+		cpu_to_le32s (&tmp);
+		writel (tmp, &regs->ep_data);
+		buf += 4;
+		count -= 4;
+	}
+
+	/* last fifo entry is "short" unless we wrote a full packet.
+	 * also explicitly validate last word in (periodic) transfers
+	 * when maxpacket is not a multiple of 4 bytes.
+	 */
+	if (count || total < ep->ep.maxpacket) {
+		tmp = count ? get_unaligned ((u32 *)buf) : count;
+		cpu_to_le32s (&tmp);
+		set_fifo_bytecount (ep, count & 0x03);
+		writel (tmp, &regs->ep_data);
+	}
+
+	/* pci writes may still be posted */
+}
+
+/* work around erratum 0106: PCI and USB race over the OUT fifo.
+ * caller guarantees chiprev 0100, out endpoint is NAKing, and
+ * there's no real data in the fifo.
+ *
+ * NOTE:  also used in cases where that erratum doesn't apply:
+ * where the host wrote "too much" data to us.
+ */
+static void out_flush (struct net2280_ep *ep)
+{
+	u32	__iomem *statp;
+	u32	tmp;
+
+	ASSERT_OUT_NAKING (ep);
+
+	statp = &ep->regs->ep_stat;
+	writel (  (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+		| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+		, statp);
+	writel ((1 << FIFO_FLUSH), statp);
+	mb ();
+	tmp = readl (statp);
+	if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+			/* high speed did bulk NYET; fifo isn't filling */
+			&& ep->dev->gadget.speed == USB_SPEED_FULL) {
+		unsigned	usec;
+
+		usec = 50;		/* 64 byte bulk/interrupt */
+		handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
+				(1 << USB_OUT_PING_NAK_SENT), usec);
+		/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
+	}
+}
+
+/* unload packet(s) from the fifo we use for usb OUT transfers.
+ * returns true iff the request completed, because of short packet
+ * or the request buffer having filled with full packets.
+ *
+ * for ep-a..ep-d this will read multiple packets out when they
+ * have been accepted.
+ */
+static int
+read_fifo (struct net2280_ep *ep, struct net2280_request *req)
+{
+	struct net2280_ep_regs	__iomem *regs = ep->regs;
+	u8			*buf = req->req.buf + req->req.actual;
+	unsigned		count, tmp, is_short;
+	unsigned		cleanup = 0, prevent = 0;
+
+	/* erratum 0106 ... packets coming in during fifo reads might
+	 * be incompletely rejected.  not all cases have workarounds.
+	 */
+	if (ep->dev->chiprev == 0x0100
+			&& ep->dev->gadget.speed == USB_SPEED_FULL) {
+		udelay (1);
+		tmp = readl (&ep->regs->ep_stat);
+		if ((tmp & (1 << NAK_OUT_PACKETS)))
+			cleanup = 1;
+		else if ((tmp & (1 << FIFO_FULL))) {
+			start_out_naking (ep);
+			prevent = 1;
+		}
+		/* else: hope we don't see the problem */
+	}
+
+	/* never overflow the rx buffer. the fifo reads packets until
+	 * it sees a short one; we might not be ready for them all.
+	 */
+	prefetchw (buf);
+	count = readl (&regs->ep_avail);
+	if (unlikely (count == 0)) {
+		udelay (1);
+		tmp = readl (&ep->regs->ep_stat);
+		count = readl (&regs->ep_avail);
+		/* handled that data already? */
+		if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
+			return 0;
+	}
+
+	tmp = req->req.length - req->req.actual;
+	if (count > tmp) {
+		/* as with DMA, data overflow gets flushed */
+		if ((tmp % ep->ep.maxpacket) != 0) {
+			ERROR (ep->dev,
+				"%s out fifo %d bytes, expected %d\n",
+				ep->ep.name, count, tmp);
+			req->req.status = -EOVERFLOW;
+			cleanup = 1;
+			/* NAK_OUT_PACKETS will be set, so flushing is safe;
+			 * the next read will start with the next packet
+			 */
+		} /* else it's a ZLP, no worries */
+		count = tmp;
+	}
+	req->req.actual += count;
+
+	is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
+
+	VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
+			ep->ep.name, count, is_short ? " (short)" : "",
+			cleanup ? " flush" : "", prevent ? " nak" : "",
+			req, req->req.actual, req->req.length);
+
+	while (count >= 4) {
+		tmp = readl (&regs->ep_data);
+		cpu_to_le32s (&tmp);
+		put_unaligned (tmp, (u32 *)buf);
+		buf += 4;
+		count -= 4;
+	}
+	if (count) {
+		tmp = readl (&regs->ep_data);
+		/* LE conversion is implicit here: */
+		do {
+			*buf++ = (u8) tmp;
+			tmp >>= 8;
+		} while (--count);
+	}
+	if (cleanup)
+		out_flush (ep);
+	if (prevent) {
+		writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+		(void) readl (&ep->regs->ep_rsp);
+	}
+
+	return is_short || ((req->req.actual == req->req.length)
+				&& !req->req.zero);
+}
+
+/* fill out dma descriptor to match a given request */
+static void
+fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
+{
+	struct net2280_dma	*td = req->td;
+	u32			dmacount = req->req.length;
+
+	/* don't let DMA continue after a short OUT packet,
+	 * so overruns can't affect the next transfer.
+	 * in case of overruns on max-size packets, we can't
+	 * stop the fifo from filling but we can flush it.
+	 */
+	if (ep->is_in)
+		dmacount |= (1 << DMA_DIRECTION);
+	else if ((dmacount % ep->ep.maxpacket) != 0)
+		dmacount |= (1 << END_OF_CHAIN);
+
+	req->valid = valid;
+	if (valid)
+		dmacount |= (1 << VALID_BIT);
+	if (likely(!req->req.no_interrupt || !use_dma_chaining))
+		dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
+
+	/* td->dmadesc = previously set by caller */
+	td->dmaaddr = cpu_to_le32 (req->req.dma);
+
+	/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
+	wmb ();
+	td->dmacount = cpu_to_le32p (&dmacount);
+}
+
+static const u32 dmactl_default =
+		  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
+		| (1 << DMA_CLEAR_COUNT_ENABLE)
+		/* erratum 0116 workaround part 1 (use POLLING) */
+		| (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
+		| (1 << DMA_VALID_BIT_POLLING_ENABLE)
+		| (1 << DMA_VALID_BIT_ENABLE)
+		| (1 << DMA_SCATTER_GATHER_ENABLE)
+		/* erratum 0116 workaround part 2 (no AUTOSTART) */
+		| (1 << DMA_ENABLE);
+
+static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
+{
+	handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
+}
+
+static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
+{
+	writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
+	spin_stop_dma (dma);
+}
+
+static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
+{
+	struct net2280_dma_regs	__iomem *dma = ep->dma;
+
+	writel ((1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION),
+			&dma->dmacount);
+	writel (readl (&dma->dmastat), &dma->dmastat);
+
+	writel (td_dma, &dma->dmadesc);
+	writel (dmactl, &dma->dmactl);
+
+	/* erratum 0116 workaround part 3:  pci arbiter away from net2280 */
+	(void) readl (&ep->dev->pci->pcimstctl);
+
+	writel ((1 << DMA_START), &dma->dmastat);
+
+	if (!ep->is_in)
+		stop_out_naking (ep);
+}
+
+static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
+{
+	u32			tmp;
+	struct net2280_dma_regs	__iomem *dma = ep->dma;
+
+	/* FIXME can't use DMA for ZLPs */
+
+	/* on this path we "know" there's no dma active (yet) */
+	WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
+	writel (0, &ep->dma->dmactl);
+
+	/* previous OUT packet might have been short */
+	if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
+		 		& (1 << NAK_OUT_PACKETS)) != 0) {
+		writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
+			&ep->regs->ep_stat);
+
+		tmp = readl (&ep->regs->ep_avail);
+		if (tmp) {
+			writel (readl (&dma->dmastat), &dma->dmastat);
+
+			/* transfer all/some fifo data */
+			writel (req->req.dma, &dma->dmaaddr);
+			tmp = min (tmp, req->req.length);
+
+			/* dma irq, faking scatterlist status */
+			req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
+			writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
+				| tmp, &dma->dmacount);
+			req->td->dmadesc = 0;
+			req->valid = 1;
+
+			writel ((1 << DMA_ENABLE), &dma->dmactl);
+			writel ((1 << DMA_START), &dma->dmastat);
+			return;
+		}
+	}
+
+	tmp = dmactl_default;
+
+	/* force packet boundaries between dma requests, but prevent the
+	 * controller from automagically writing a last "short" packet
+	 * (zero length) unless the driver explicitly said to do that.
+	 */
+	if (ep->is_in) {
+		if (likely ((req->req.length % ep->ep.maxpacket) != 0
+				|| req->req.zero)) {
+			tmp |= (1 << DMA_FIFO_VALIDATE);
+			ep->in_fifo_validate = 1;
+		} else
+			ep->in_fifo_validate = 0;
+	}
+
+	/* init req->td, pointing to the current dummy */
+	req->td->dmadesc = cpu_to_le32 (ep->td_dma);
+	fill_dma_desc (ep, req, 1);
+
+	if (!use_dma_chaining)
+		req->td->dmacount |= __constant_cpu_to_le32 (1 << END_OF_CHAIN);
+
+	start_queue (ep, tmp, req->td_dma);
+}
+
+static inline void
+queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
+{
+	struct net2280_dma	*end;
+	dma_addr_t		tmp;
+
+	/* swap new dummy for old, link; fill and maybe activate */
+	end = ep->dummy;
+	ep->dummy = req->td;
+	req->td = end;
+
+	tmp = ep->td_dma;
+	ep->td_dma = req->td_dma;
+	req->td_dma = tmp;
+
+	end->dmadesc = cpu_to_le32 (ep->td_dma);
+
+	fill_dma_desc (ep, req, valid);
+}
+
+static void
+done (struct net2280_ep *ep, struct net2280_request *req, int status)
+{
+	struct net2280		*dev;
+	unsigned		stopped = ep->stopped;
+
+	list_del_init (&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+	if (req->mapped) {
+		pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
+			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	}
+
+	if (status && status != -ESHUTDOWN)
+		VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock (&dev->lock);
+	req->req.complete (&ep->ep, &req->req);
+	spin_lock (&dev->lock);
+	ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+net2280_queue (struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
+{
+	struct net2280_request	*req;
+	struct net2280_ep	*ep;
+	struct net2280		*dev;
+	unsigned long		flags;
+
+	/* we always require a cpu-view buffer, so that we can
+	 * always use pio (as fallback or whatever).
+	 */
+	req = container_of (_req, struct net2280_request, req);
+	if (!_req || !_req->complete || !_req->buf
+			|| !list_empty (&req->queue))
+		return -EINVAL;
+	if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
+		return -EDOM;
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	/* FIXME implement PIO fallback for ZLPs with DMA */
+	if (ep->dma && _req->length == 0)
+		return -EOPNOTSUPP;
+
+	/* set up dma mapping in case the caller didn't */
+	if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
+		_req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
+			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+		req->mapped = 1;
+	}
+
+#if 0
+	VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
+			_ep->name, _req, _req->length, _req->buf);
+#endif
+
+	spin_lock_irqsave (&dev->lock, flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* kickstart this i/o queue? */
+	if (list_empty (&ep->queue) && !ep->stopped) {
+		/* use DMA if the endpoint supports it, else pio */
+		if (ep->dma)
+			start_dma (ep, req);
+		else {
+			/* maybe there's no control data, just status ack */
+			if (ep->num == 0 && _req->length == 0) {
+				allow_status (ep);
+				done (ep, req, 0);
+				VDEBUG (dev, "%s status ack\n", ep->ep.name);
+				goto done;
+			}
+
+			/* PIO ... stuff the fifo, or unblock it.  */
+			if (ep->is_in)
+				write_fifo (ep, _req);
+			else if (list_empty (&ep->queue)) {
+				u32	s;
+
+				/* OUT FIFO might have packet(s) buffered */
+				s = readl (&ep->regs->ep_stat);
+				if ((s & (1 << FIFO_EMPTY)) == 0) {
+					/* note:  _req->short_not_ok is
+					 * ignored here since PIO _always_
+					 * stops queue advance here, and
+					 * _req->status doesn't change for
+					 * short reads (only _req->actual)
+					 */
+					if (read_fifo (ep, req)) {
+						done (ep, req, 0);
+						if (ep->num == 0)
+							allow_status (ep);
+						/* don't queue it */
+						req = NULL;
+					} else
+						s = readl (&ep->regs->ep_stat);
+				}
+
+				/* don't NAK, let the fifo fill */
+				if (req && (s & (1 << NAK_OUT_PACKETS)))
+					writel ((1 << CLEAR_NAK_OUT_PACKETS),
+							&ep->regs->ep_rsp);
+			}
+		}
+
+	} else if (ep->dma) {
+		int	valid = 1;
+
+		if (ep->is_in) {
+			int	expect;
+
+			/* preventing magic zlps is per-engine state, not
+			 * per-transfer; irq logic must recover hiccups.
+			 */
+			expect = likely (req->req.zero
+				|| (req->req.length % ep->ep.maxpacket) != 0);
+			if (expect != ep->in_fifo_validate)
+				valid = 0;
+		}
+		queue_dma (ep, req, valid);
+
+	} /* else the irq handler advances the queue. */
+
+	if (req)
+		list_add_tail (&req->queue, &ep->queue);
+done:
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* pci writes may still be posted */
+	return 0;
+}
+
+static inline void
+dma_done (
+	struct net2280_ep *ep,
+	struct net2280_request *req,
+	u32 dmacount,
+	int status
+)
+{
+	req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
+	done (ep, req, status);
+}
+
+static void restart_dma (struct net2280_ep *ep);
+
+static void scan_dma_completions (struct net2280_ep *ep)
+{
+	/* only look at descriptors that were "naturally" retired,
+	 * so fifo and list head state won't matter
+	 */
+	while (!list_empty (&ep->queue)) {
+		struct net2280_request	*req;
+		u32			tmp;
+
+		req = list_entry (ep->queue.next,
+				struct net2280_request, queue);
+		if (!req->valid)
+			break;
+		rmb ();
+		tmp = le32_to_cpup (&req->td->dmacount);
+		if ((tmp & (1 << VALID_BIT)) != 0)
+			break;
+
+		/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
+		 * cases where DMA must be aborted; this code handles
+		 * all non-abort DMA completions.
+		 */
+		if (unlikely (req->td->dmadesc == 0)) {
+			/* paranoia */
+			tmp = readl (&ep->dma->dmacount);
+			if (tmp & DMA_BYTE_COUNT_MASK)
+				break;
+			/* single transfer mode */
+			dma_done (ep, req, tmp, 0);
+			break;
+		} else if (!ep->is_in
+				&& (req->req.length % ep->ep.maxpacket) != 0) {
+			tmp = readl (&ep->regs->ep_stat);
+
+			/* AVOID TROUBLE HERE by not issuing short reads from
+			 * your gadget driver.  That helps avoids errata 0121,
+			 * 0122, and 0124; not all cases trigger the warning.
+			 */
+			if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+				WARN (ep->dev, "%s lost packet sync!\n",
+						ep->ep.name);
+				req->req.status = -EOVERFLOW;
+			} else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
+				/* fifo gets flushed later */
+				ep->out_overflow = 1;
+				DEBUG (ep->dev, "%s dma, discard %d len %d\n",
+						ep->ep.name, tmp,
+						req->req.length);
+				req->req.status = -EOVERFLOW;
+			}
+		}
+		dma_done (ep, req, tmp, 0);
+	}
+}
+
+static void restart_dma (struct net2280_ep *ep)
+{
+	struct net2280_request	*req;
+	u32			dmactl = dmactl_default;
+
+	if (ep->stopped)
+		return;
+	req = list_entry (ep->queue.next, struct net2280_request, queue);
+
+	if (!use_dma_chaining) {
+		start_dma (ep, req);
+		return;
+	}
+
+	/* the 2280 will be processing the queue unless queue hiccups after
+	 * the previous transfer:
+	 *  IN:   wanted automagic zlp, head doesn't (or vice versa)
+	 *        DMA_FIFO_VALIDATE doesn't init from dma descriptors.
+	 *  OUT:  was "usb-short", we must restart.
+	 */
+	if (ep->is_in && !req->valid) {
+		struct net2280_request	*entry, *prev = NULL;
+		int			reqmode, done = 0;
+
+		DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
+		ep->in_fifo_validate = likely (req->req.zero
+			|| (req->req.length % ep->ep.maxpacket) != 0);
+		if (ep->in_fifo_validate)
+			dmactl |= (1 << DMA_FIFO_VALIDATE);
+		list_for_each_entry (entry, &ep->queue, queue) {
+			u32		dmacount;
+
+			if (entry == req)
+				continue;
+			dmacount = entry->td->dmacount;
+			if (!done) {
+				reqmode = likely (entry->req.zero
+					|| (entry->req.length
+						% ep->ep.maxpacket) != 0);
+				if (reqmode == ep->in_fifo_validate) {
+					entry->valid = 1;
+					dmacount |= valid_bit;
+					entry->td->dmacount = dmacount;
+					prev = entry;
+					continue;
+				} else {
+					/* force a hiccup */
+					prev->td->dmacount |= dma_done_ie;
+					done = 1;
+				}
+			}
+
+			/* walk the rest of the queue so unlinks behave */
+			entry->valid = 0;
+			dmacount &= ~valid_bit;
+			entry->td->dmacount = dmacount;
+			prev = entry;
+		}
+	}
+
+	writel (0, &ep->dma->dmactl);
+	start_queue (ep, dmactl, req->td_dma);
+}
+
+static void abort_dma (struct net2280_ep *ep)
+{
+	/* abort the current transfer */
+	if (likely (!list_empty (&ep->queue))) {
+		/* FIXME work around errata 0121, 0122, 0124 */
+		writel ((1 << DMA_ABORT), &ep->dma->dmastat);
+		spin_stop_dma (ep->dma);
+	} else
+		stop_dma (ep->dma);
+	scan_dma_completions (ep);
+}
+
+/* dequeue ALL requests */
+static void nuke (struct net2280_ep *ep)
+{
+	struct net2280_request	*req;
+
+	/* called with spinlock held */
+	ep->stopped = 1;
+	if (ep->dma)
+		abort_dma (ep);
+	while (!list_empty (&ep->queue)) {
+		req = list_entry (ep->queue.next,
+				struct net2280_request,
+				queue);
+		done (ep, req, -ESHUTDOWN);
+	}
+}
+
+/* dequeue JUST ONE request */
+static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct net2280_ep	*ep;
+	struct net2280_request	*req;
+	unsigned long		flags;
+	u32			dmactl;
+	int			stopped;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave (&ep->dev->lock, flags);
+	stopped = ep->stopped;
+
+	/* quiesce dma while we patch the queue */
+	dmactl = 0;
+	ep->stopped = 1;
+	if (ep->dma) {
+		dmactl = readl (&ep->dma->dmactl);
+		/* WARNING erratum 0127 may kick in ... */
+		stop_dma (ep->dma);
+		scan_dma_completions (ep);
+	}
+
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore (&ep->dev->lock, flags);
+		return -EINVAL;
+	}
+
+	/* queue head may be partially complete. */
+	if (ep->queue.next == &req->queue) {
+		if (ep->dma) {
+			DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
+			_req->status = -ECONNRESET;
+			abort_dma (ep);
+			if (likely (ep->queue.next == &req->queue)) {
+				// NOTE: misreports single-transfer mode
+				req->td->dmacount = 0;	/* invalidate */
+				dma_done (ep, req,
+					readl (&ep->dma->dmacount),
+					-ECONNRESET);
+			}
+		} else {
+			DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
+			done (ep, req, -ECONNRESET);
+		}
+		req = NULL;
+
+	/* patch up hardware chaining data */
+	} else if (ep->dma && use_dma_chaining) {
+		if (req->queue.prev == ep->queue.next) {
+			writel (le32_to_cpu (req->td->dmadesc),
+				&ep->dma->dmadesc);
+			if (req->td->dmacount & dma_done_ie)
+				writel (readl (&ep->dma->dmacount)
+						| dma_done_ie,
+					&ep->dma->dmacount);
+		} else {
+			struct net2280_request	*prev;
+
+			prev = list_entry (req->queue.prev,
+				struct net2280_request, queue);
+			prev->td->dmadesc = req->td->dmadesc;
+			if (req->td->dmacount & dma_done_ie)
+				prev->td->dmacount |= dma_done_ie;
+		}
+	}
+
+	if (req)
+		done (ep, req, -ECONNRESET);
+	ep->stopped = stopped;
+
+	if (ep->dma) {
+		/* turn off dma on inactive queues */
+		if (list_empty (&ep->queue))
+			stop_dma (ep->dma);
+		else if (!ep->stopped) {
+			/* resume current request, or start new one */
+			if (req)
+				writel (dmactl, &ep->dma->dmactl);
+			else
+				start_dma (ep, list_entry (ep->queue.next,
+					struct net2280_request, queue));
+		}
+	}
+
+	spin_unlock_irqrestore (&ep->dev->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int net2280_fifo_status (struct usb_ep *_ep);
+
+static int
+net2280_set_halt (struct usb_ep *_ep, int value)
+{
+	struct net2280_ep	*ep;
+	unsigned long		flags;
+	int			retval = 0;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+	if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
+						== USB_ENDPOINT_XFER_ISOC)
+		return -EINVAL;
+
+	spin_lock_irqsave (&ep->dev->lock, flags);
+	if (!list_empty (&ep->queue))
+		retval = -EAGAIN;
+	else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
+		retval = -EAGAIN;
+	else {
+		VDEBUG (ep->dev, "%s %s halt\n", _ep->name,
+				value ? "set" : "clear");
+		/* set/clear, then synch memory views with the device */
+		if (value) {
+			if (ep->num == 0)
+				ep->dev->protocol_stall = 1;
+			else
+				set_halt (ep);
+		} else
+			clear_halt (ep);
+		(void) readl (&ep->regs->ep_rsp);
+	}
+	spin_unlock_irqrestore (&ep->dev->lock, flags);
+
+	return retval;
+}
+
+static int
+net2280_fifo_status (struct usb_ep *_ep)
+{
+	struct net2280_ep	*ep;
+	u32			avail;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -ENODEV;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
+	if (avail > ep->fifo_size)
+		return -EOVERFLOW;
+	if (ep->is_in)
+		avail = ep->fifo_size - avail;
+	return avail;
+}
+
+static void
+net2280_fifo_flush (struct usb_ep *_ep)
+{
+	struct net2280_ep	*ep;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return;
+
+	writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
+	(void) readl (&ep->regs->ep_rsp);
+}
+
+static struct usb_ep_ops net2280_ep_ops = {
+	.enable		= net2280_enable,
+	.disable	= net2280_disable,
+
+	.alloc_request	= net2280_alloc_request,
+	.free_request	= net2280_free_request,
+
+	.alloc_buffer	= net2280_alloc_buffer,
+	.free_buffer	= net2280_free_buffer,
+
+	.queue		= net2280_queue,
+	.dequeue	= net2280_dequeue,
+
+	.set_halt	= net2280_set_halt,
+	.fifo_status	= net2280_fifo_status,
+	.fifo_flush	= net2280_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int net2280_get_frame (struct usb_gadget *_gadget)
+{
+	struct net2280		*dev;
+	unsigned long		flags;
+	u16			retval;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of (_gadget, struct net2280, gadget);
+	spin_lock_irqsave (&dev->lock, flags);
+	retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
+	spin_unlock_irqrestore (&dev->lock, flags);
+	return retval;
+}
+
+static int net2280_wakeup (struct usb_gadget *_gadget)
+{
+	struct net2280		*dev;
+	u32			tmp;
+	unsigned long		flags;
+
+	if (!_gadget)
+		return 0;
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	tmp = readl (&dev->usb->usbctl);
+	if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
+		writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* pci writes may still be posted */
+	return 0;
+}
+
+static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
+{
+	struct net2280		*dev;
+	u32			tmp;
+	unsigned long		flags;
+
+	if (!_gadget)
+		return 0;
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	tmp = readl (&dev->usb->usbctl);
+	if (value)
+		tmp |= (1 << SELF_POWERED_STATUS);
+	else
+		tmp &= ~(1 << SELF_POWERED_STATUS);
+	writel (tmp, &dev->usb->usbctl);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	return 0;
+}
+
+static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
+{
+	struct net2280  *dev;
+	u32             tmp;
+	unsigned long   flags;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	tmp = readl (&dev->usb->usbctl);
+	dev->softconnect = (is_on != 0);
+	if (is_on)
+		tmp |= (1 << USB_DETECT_ENABLE);
+	else
+		tmp &= ~(1 << USB_DETECT_ENABLE);
+	writel (tmp, &dev->usb->usbctl);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	return 0;
+}
+
+static const struct usb_gadget_ops net2280_ops = {
+	.get_frame	= net2280_get_frame,
+	.wakeup		= net2280_wakeup,
+	.set_selfpowered = net2280_set_selfpowered,
+	.pullup		= net2280_pullup,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+
+/* FIXME move these into procfs, and use seq_file.
+ * Sysfs _still_ doesn't behave for arbitrarily sized files,
+ * and also doesn't help products using this with 2.4 kernels.
+ */
+
+/* "function" sysfs attribute */
+static ssize_t
+show_function (struct device *_dev, char *buf)
+{
+	struct net2280	*dev = dev_get_drvdata (_dev);
+
+	if (!dev->driver
+			|| !dev->driver->function
+			|| strlen (dev->driver->function) > PAGE_SIZE)
+		return 0;
+	return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
+
+static ssize_t
+show_registers (struct device *_dev, char *buf)
+{
+	struct net2280		*dev;
+	char			*next;
+	unsigned		size, t;
+	unsigned long		flags;
+	int			i;
+	u32			t1, t2;
+	char			*s;
+
+	dev = dev_get_drvdata (_dev);
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave (&dev->lock, flags);
+
+	if (dev->driver)
+		s = dev->driver->driver.name;
+	else
+		s = "(none)";
+
+	/* Main Control Registers */
+	t = scnprintf (next, size, "%s version " DRIVER_VERSION
+			", chiprev %04x, dma %s\n\n"
+			"devinit %03x fifoctl %08x gadget '%s'\n"
+			"pci irqenb0 %02x irqenb1 %08x "
+			"irqstat0 %04x irqstat1 %08x\n",
+			driver_name, dev->chiprev,
+			use_dma
+				? (use_dma_chaining ? "chaining" : "enabled")
+				: "disabled",
+			readl (&dev->regs->devinit),
+			readl (&dev->regs->fifoctl),
+			s,
+			readl (&dev->regs->pciirqenb0),
+			readl (&dev->regs->pciirqenb1),
+			readl (&dev->regs->irqstat0),
+			readl (&dev->regs->irqstat1));
+	size -= t;
+	next += t;
+
+	/* USB Control Registers */
+	t1 = readl (&dev->usb->usbctl);
+	t2 = readl (&dev->usb->usbstat);
+	if (t1 & (1 << VBUS_PIN)) {
+		if (t2 & (1 << HIGH_SPEED))
+			s = "high speed";
+		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+			s = "powered";
+		else
+			s = "full speed";
+		/* full speed bit (6) not working?? */
+	} else
+			s = "not attached";
+	t = scnprintf (next, size,
+			"stdrsp %08x usbctl %08x usbstat %08x "
+				"addr 0x%02x (%s)\n",
+			readl (&dev->usb->stdrsp), t1, t2,
+			readl (&dev->usb->ouraddr), s);
+	size -= t;
+	next += t;
+
+	/* PCI Master Control Registers */
+
+	/* DMA Control Registers */
+
+	/* Configurable EP Control Registers */
+	for (i = 0; i < 7; i++) {
+		struct net2280_ep	*ep;
+
+		ep = &dev->ep [i];
+		if (i && !ep->desc)
+			continue;
+
+		t1 = readl (&ep->regs->ep_cfg);
+		t2 = readl (&ep->regs->ep_rsp) & 0xff;
+		t = scnprintf (next, size,
+				"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
+					"irqenb %02x\n",
+				ep->ep.name, t1, t2,
+				(t2 & (1 << CLEAR_NAK_OUT_PACKETS))
+					? "NAK " : "",
+				(t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
+					? "hide " : "",
+				(t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
+					? "CRC " : "",
+				(t2 & (1 << CLEAR_INTERRUPT_MODE))
+					? "interrupt " : "",
+				(t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
+					? "status " : "",
+				(t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
+					? "NAKmode " : "",
+				(t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
+					? "DATA1 " : "DATA0 ",
+				(t2 & (1 << CLEAR_ENDPOINT_HALT))
+					? "HALT " : "",
+				readl (&ep->regs->ep_irqenb));
+		size -= t;
+		next += t;
+
+		t = scnprintf (next, size,
+				"\tstat %08x avail %04x "
+				"(ep%d%s-%s)%s\n",
+				readl (&ep->regs->ep_stat),
+				readl (&ep->regs->ep_avail),
+				t1 & 0x0f, DIR_STRING (t1),
+				type_string (t1 >> 8),
+				ep->stopped ? "*" : "");
+		size -= t;
+		next += t;
+
+		if (!ep->dma)
+			continue;
+
+		t = scnprintf (next, size,
+				"  dma\tctl %08x stat %08x count %08x\n"
+				"\taddr %08x desc %08x\n",
+				readl (&ep->dma->dmactl),
+				readl (&ep->dma->dmastat),
+				readl (&ep->dma->dmacount),
+				readl (&ep->dma->dmaaddr),
+				readl (&ep->dma->dmadesc));
+		size -= t;
+		next += t;
+
+	}
+
+	/* Indexed Registers */
+		// none yet 
+
+	/* Statistics */
+	t = scnprintf (next, size, "\nirqs:  ");
+	size -= t;
+	next += t;
+	for (i = 0; i < 7; i++) {
+		struct net2280_ep	*ep;
+
+		ep = &dev->ep [i];
+		if (i && !ep->irqs)
+			continue;
+		t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
+		size -= t;
+		next += t;
+
+	}
+	t = scnprintf (next, size, "\n");
+	size -= t;
+	next += t;
+
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
+
+static ssize_t
+show_queues (struct device *_dev, char *buf)
+{
+	struct net2280		*dev;
+	char			*next;
+	unsigned		size;
+	unsigned long		flags;
+	int			i;
+
+	dev = dev_get_drvdata (_dev);
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave (&dev->lock, flags);
+
+	for (i = 0; i < 7; i++) {
+		struct net2280_ep		*ep = &dev->ep [i];
+		struct net2280_request		*req;
+		int				t;
+
+		if (i != 0) {
+			const struct usb_endpoint_descriptor	*d;
+
+			d = ep->desc;
+			if (!d)
+				continue;
+			t = d->bEndpointAddress;
+			t = scnprintf (next, size,
+				"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
+				ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
+				(t & USB_DIR_IN) ? "in" : "out",
+				({ char *val;
+				 switch (d->bmAttributes & 0x03) {
+				 case USB_ENDPOINT_XFER_BULK:
+				 	val = "bulk"; break;
+				 case USB_ENDPOINT_XFER_INT:
+				 	val = "intr"; break;
+				 default:
+				 	val = "iso"; break;
+				 }; val; }),
+				le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
+				ep->dma ? "dma" : "pio", ep->fifo_size
+				);
+		} else /* ep0 should only have one transfer queued */
+			t = scnprintf (next, size, "ep0 max 64 pio %s\n",
+					ep->is_in ? "in" : "out");
+		if (t <= 0 || t > size)
+			goto done;
+		size -= t;
+		next += t;
+
+		if (list_empty (&ep->queue)) {
+			t = scnprintf (next, size, "\t(nothing queued)\n");
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+			continue;
+		}
+		list_for_each_entry (req, &ep->queue, queue) {
+			if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
+				t = scnprintf (next, size,
+					"\treq %p len %d/%d "
+					"buf %p (dmacount %08x)\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf,
+					readl (&ep->dma->dmacount));
+			else
+				t = scnprintf (next, size,
+					"\treq %p len %d/%d buf %p\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf);
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+
+			if (ep->dma) {
+				struct net2280_dma	*td;
+
+				td = req->td;
+				t = scnprintf (next, size, "\t    td %08x "
+					" count %08x buf %08x desc %08x\n",
+					(u32) req->td_dma,
+					le32_to_cpu (td->dmacount),
+					le32_to_cpu (td->dmaaddr),
+					le32_to_cpu (td->dmadesc));
+				if (t <= 0 || t > size)
+					goto done;
+				size -= t;
+				next += t;
+			}
+		}
+	}
+
+done:
+	spin_unlock_irqrestore (&dev->lock, flags);
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
+
+
+#else
+
+#define device_create_file(a,b)	do {} while (0)
+#define device_remove_file	device_create_file
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* another driver-specific mode might be a request type doing dma
+ * to/from another device fifo instead of to/from memory.
+ */
+
+static void set_fifo_mode (struct net2280 *dev, int mode)
+{
+	/* keeping high bits preserves BAR2 */
+	writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
+
+	/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
+	list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
+	switch (mode) {
+	case 0:
+		list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
+		list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
+		break;
+	case 1:
+		dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
+		break;
+	case 2:
+		list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep [1].fifo_size = 2048;
+		dev->ep [2].fifo_size = 1024;
+		break;
+	}
+	/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
+	list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
+	list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
+}
+
+/**
+ * net2280_set_fifo_mode - change allocation of fifo buffers
+ * @gadget: access to the net2280 device that will be updated
+ * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
+ * 	1 for two 2kB buffers (ep-a and ep-b only);
+ * 	2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
+ *
+ * returns zero on success, else negative errno.  when this succeeds,
+ * the contents of gadget->ep_list may have changed.
+ *
+ * you may only call this function when endpoints a-d are all disabled.
+ * use it whenever extra hardware buffering can help performance, such
+ * as before enabling "high bandwidth" interrupt endpoints that use
+ * maxpacket bigger than 512 (when double buffering would otherwise
+ * be unavailable).
+ */
+int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
+{
+	int			i;
+	struct net2280		*dev;
+	int			status = 0;
+	unsigned long		flags;
+
+	if (!gadget)
+		return -ENODEV;
+	dev = container_of (gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+
+	for (i = 1; i <= 4; i++)
+		if (dev->ep [i].desc) {
+			status = -EINVAL;
+			break;
+		}
+	if (mode < 0 || mode > 2)
+		status = -EINVAL;
+	if (status == 0)
+		set_fifo_mode (dev, mode);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	if (status == 0) {
+		if (mode == 1)
+			DEBUG (dev, "fifo:  ep-a 2K, ep-b 2K\n");
+		else if (mode == 2)
+			DEBUG (dev, "fifo:  ep-a 2K, ep-b 1K, ep-c 1K\n");
+		/* else all are 1K */
+	}
+	return status;
+}
+EXPORT_SYMBOL (net2280_set_fifo_mode);
+
+/*-------------------------------------------------------------------------*/
+
+/* keeping it simple:
+ * - one bus driver, initted first;
+ * - one function driver, initted second
+ *
+ * most of the work to support multiple net2280 controllers would
+ * be to associate this gadget driver (yes?) with all of them, or
+ * perhaps to bind specific drivers to specific devices.
+ */
+
+static struct net2280	*the_controller;
+
+static void usb_reset (struct net2280 *dev)
+{
+	u32	tmp;
+
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	(void) readl (&dev->usb->usbctl);
+
+	net2280_led_init (dev);
+
+	/* disable automatic responses, and irqs */
+	writel (0, &dev->usb->stdrsp);
+	writel (0, &dev->regs->pciirqenb0);
+	writel (0, &dev->regs->pciirqenb1);
+
+	/* clear old dma and irq state */
+	for (tmp = 0; tmp < 4; tmp++) {
+		struct net2280_ep	*ep = &dev->ep [tmp + 1];
+
+		if (ep->dma)
+			abort_dma (ep);
+	}
+	writel (~0, &dev->regs->irqstat0),
+	writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
+
+	/* reset, and enable pci */
+	tmp = readl (&dev->regs->devinit)
+		| (1 << PCI_ENABLE)
+		| (1 << FIFO_SOFT_RESET)
+		| (1 << USB_SOFT_RESET)
+		| (1 << M8051_RESET);
+	writel (tmp, &dev->regs->devinit);
+
+	/* standard fifo and endpoint allocations */
+	set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
+}
+
+static void usb_reinit (struct net2280 *dev)
+{
+	u32	tmp;
+	int	init_dma;
+
+	/* use_dma changes are ignored till next device re-init */
+	init_dma = use_dma;
+
+	/* basic endpoint init */
+	for (tmp = 0; tmp < 7; tmp++) {
+		struct net2280_ep	*ep = &dev->ep [tmp];
+
+		ep->ep.name = ep_name [tmp];
+		ep->dev = dev;
+		ep->num = tmp;
+
+		if (tmp > 0 && tmp <= 4) {
+			ep->fifo_size = 1024;
+			if (init_dma)
+				ep->dma = &dev->dma [tmp - 1];
+		} else
+			ep->fifo_size = 64;
+		ep->regs = &dev->epregs [tmp];
+		ep_reset (dev->regs, ep);
+	}
+	dev->ep [0].ep.maxpacket = 64;
+	dev->ep [5].ep.maxpacket = 64;
+	dev->ep [6].ep.maxpacket = 64;
+
+	dev->gadget.ep0 = &dev->ep [0].ep;
+	dev->ep [0].stopped = 0;
+	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+
+	/* we want to prevent lowlevel/insecure access from the USB host,
+	 * but erratum 0119 means this enable bit is ignored
+	 */
+	for (tmp = 0; tmp < 5; tmp++)
+		writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
+}
+
+static void ep0_start (struct net2280 *dev)
+{
+	writel (  (1 << CLEAR_EP_HIDE_STATUS_PHASE)
+		| (1 << CLEAR_NAK_OUT_PACKETS)
+		| (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
+		, &dev->epregs [0].ep_rsp);
+
+	/*
+	 * hardware optionally handles a bunch of standard requests
+	 * that the API hides from drivers anyway.  have it do so.
+	 * endpoint status/features are handled in software, to
+	 * help pass tests for some dubious behavior.
+	 */
+	writel (  (1 << SET_TEST_MODE)
+		| (1 << SET_ADDRESS)
+		| (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
+		| (1 << GET_DEVICE_STATUS)
+		| (1 << GET_INTERFACE_STATUS)
+		, &dev->usb->stdrsp);
+	writel (  (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
+		| (1 << SELF_POWERED_USB_DEVICE)
+		| (1 << REMOTE_WAKEUP_SUPPORT)
+		| (dev->softconnect << USB_DETECT_ENABLE)
+		| (1 << SELF_POWERED_STATUS)
+		, &dev->usb->usbctl);
+
+	/* enable irqs so we can see ep0 and general operation  */
+	writel (  (1 << SETUP_PACKET_INTERRUPT_ENABLE)
+		| (1 << ENDPOINT_0_INTERRUPT_ENABLE)
+		, &dev->regs->pciirqenb0);
+	writel (  (1 << PCI_INTERRUPT_ENABLE)
+		| (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
+		| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
+		| (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
+		| (1 << VBUS_INTERRUPT_ENABLE)
+		| (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
+		| (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
+		, &dev->regs->pciirqenb1);
+
+	/* don't leave any writes posted */
+	(void) readl (&dev->usb->usbctl);
+}
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+int usb_gadget_register_driver (struct usb_gadget_driver *driver)
+{
+	struct net2280		*dev = the_controller;
+	int			retval;
+	unsigned		i;
+
+	/* insist on high speed support from the driver, since
+	 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
+	 * "must not be used in normal operation"
+	 */
+	if (!driver
+			|| driver->speed != USB_SPEED_HIGH
+			|| !driver->bind
+			|| !driver->unbind
+			|| !driver->setup)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	for (i = 0; i < 7; i++)
+		dev->ep [i].irqs = 0;
+
+	/* hook up the driver ... */
+	dev->softconnect = 1;
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+	retval = driver->bind (&dev->gadget);
+	if (retval) {
+		DEBUG (dev, "bind to driver %s --> %d\n",
+				driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	device_create_file (&dev->pdev->dev, &dev_attr_function);
+	device_create_file (&dev->pdev->dev, &dev_attr_queues);
+
+	/* ... then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	net2280_led_active (dev, 1);
+	ep0_start (dev);
+
+	DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
+			driver->driver.name,
+			readl (&dev->usb->usbctl),
+			readl (&dev->usb->stdrsp));
+
+	/* pci writes may still be posted */
+	return 0;
+}
+EXPORT_SYMBOL (usb_gadget_register_driver);
+
+static void
+stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
+{
+	int			i;
+
+	/* don't disconnect if it's not connected */
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* stop hardware; prevent new request submissions;
+	 * and kill any outstanding requests.
+	 */
+	usb_reset (dev);
+	for (i = 0; i < 7; i++)
+		nuke (&dev->ep [i]);
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock (&dev->lock);
+		driver->disconnect (&dev->gadget);
+		spin_lock (&dev->lock);
+	}
+
+	usb_reinit (dev);
+}
+
+int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+{
+	struct net2280	*dev = the_controller;
+	unsigned long	flags;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver)
+		return -EINVAL;
+
+	spin_lock_irqsave (&dev->lock, flags);
+	stop_activity (dev, driver);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	net2280_pullup (&dev->gadget, 0);
+
+	driver->unbind (&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	net2280_led_active (dev, 0);
+	device_remove_file (&dev->pdev->dev, &dev_attr_function);
+	device_remove_file (&dev->pdev->dev, &dev_attr_queues);
+
+	DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL (usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
+ * also works for dma-capable endpoints, in pio mode or just
+ * to manually advance the queue after short OUT transfers.
+ */
+static void handle_ep_small (struct net2280_ep *ep)
+{
+	struct net2280_request	*req;
+	u32			t;
+	/* 0 error, 1 mid-data, 2 done */
+	int			mode = 1;
+
+	if (!list_empty (&ep->queue))
+		req = list_entry (ep->queue.next,
+			struct net2280_request, queue);
+	else
+		req = NULL;
+
+	/* ack all, and handle what we care about */
+	t = readl (&ep->regs->ep_stat);
+	ep->irqs++;
+#if 0
+	VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
+			ep->ep.name, t, req ? &req->req : 0);
+#endif
+	writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
+
+	/* for ep0, monitor token irqs to catch data stage length errors
+	 * and to synchronize on status.
+	 *
+	 * also, to defer reporting of protocol stalls ... here's where
+	 * data or status first appears, handling stalls here should never
+	 * cause trouble on the host side..
+	 *
+	 * control requests could be slightly faster without token synch for
+	 * status, but status can jam up that way.
+	 */
+	if (unlikely (ep->num == 0)) {
+		if (ep->is_in) {
+			/* status; stop NAKing */
+			if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
+				if (ep->dev->protocol_stall) {
+					ep->stopped = 1;
+					set_halt (ep);
+				}
+				if (!req)
+					allow_status (ep);
+				mode = 2;
+			/* reply to extra IN data tokens with a zlp */
+			} else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
+				if (ep->dev->protocol_stall) {
+					ep->stopped = 1;
+					set_halt (ep);
+					mode = 2;
+				} else if (!req && ep->stopped)
+					write_fifo (ep, NULL);
+			}
+		} else {
+			/* status; stop NAKing */
+			if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
+				if (ep->dev->protocol_stall) {
+					ep->stopped = 1;
+					set_halt (ep);
+				}
+				mode = 2;
+			/* an extra OUT token is an error */
+			} else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
+					&& req
+					&& req->req.actual == req->req.length)
+					|| !req) {
+				ep->dev->protocol_stall = 1;
+				set_halt (ep);
+				ep->stopped = 1;
+				if (req)
+					done (ep, req, -EOVERFLOW);
+				req = NULL;
+			}
+		}
+	}
+
+	if (unlikely (!req))
+		return;
+
+	/* manual DMA queue advance after short OUT */
+	if (likely (ep->dma != 0)) {
+		if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+			u32	count;
+			int	stopped = ep->stopped;
+
+			/* TRANSFERRED works around OUT_DONE erratum 0112.
+			 * we expect (N <= maxpacket) bytes; host wrote M.
+			 * iff (M < N) we won't ever see a DMA interrupt.
+			 */
+			ep->stopped = 1;
+			for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
+
+				/* any preceding dma transfers must finish.
+				 * dma handles (M >= N), may empty the queue
+				 */
+				scan_dma_completions (ep);
+				if (unlikely (list_empty (&ep->queue)
+						|| ep->out_overflow)) {
+					req = NULL;
+					break;
+				}
+				req = list_entry (ep->queue.next,
+					struct net2280_request, queue);
+
+				/* here either (M < N), a "real" short rx;
+				 * or (M == N) and the queue didn't empty
+				 */
+				if (likely (t & (1 << FIFO_EMPTY))) {
+					count = readl (&ep->dma->dmacount);
+					count &= DMA_BYTE_COUNT_MASK;
+					if (readl (&ep->dma->dmadesc)
+							!= req->td_dma)
+						req = NULL;
+					break;
+				}
+				udelay(1);
+			}
+
+			/* stop DMA, leave ep NAKing */
+			writel ((1 << DMA_ABORT), &ep->dma->dmastat);
+			spin_stop_dma (ep->dma);
+
+			if (likely (req)) {
+				req->td->dmacount = 0;
+				t = readl (&ep->regs->ep_avail);
+				dma_done (ep, req, count, t);
+			}
+
+			/* also flush to prevent erratum 0106 trouble */
+			if (unlikely (ep->out_overflow
+					|| (ep->dev->chiprev == 0x0100
+						&& ep->dev->gadget.speed
+							== USB_SPEED_FULL))) {
+				out_flush (ep);
+				ep->out_overflow = 0;
+			}
+
+			/* (re)start dma if needed, stop NAKing */
+			ep->stopped = stopped;
+			if (!list_empty (&ep->queue))
+				restart_dma (ep);
+		} else
+			DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
+					ep->ep.name, t);
+		return;
+
+	/* data packet(s) received (in the fifo, OUT) */
+	} else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
+		if (read_fifo (ep, req) && ep->num != 0)
+			mode = 2;
+
+	/* data packet(s) transmitted (IN) */
+	} else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
+		unsigned	len;
+
+		len = req->req.length - req->req.actual;
+		if (len > ep->ep.maxpacket)
+			len = ep->ep.maxpacket;
+		req->req.actual += len;
+
+		/* if we wrote it all, we're usually done */
+		if (req->req.actual == req->req.length) {
+			if (ep->num == 0) {
+				/* wait for control status */
+				if (mode != 2)
+					req = NULL;
+			} else if (!req->req.zero || len != ep->ep.maxpacket)
+				mode = 2;
+		}
+
+	/* there was nothing to do ...  */
+	} else if (mode == 1)
+		return;
+
+	/* done */
+	if (mode == 2) {
+		/* stream endpoints often resubmit/unlink in completion */
+		done (ep, req, 0);
+
+		/* maybe advance queue to next request */
+		if (ep->num == 0) {
+			/* NOTE:  net2280 could let gadget driver start the
+			 * status stage later. since not all controllers let
+			 * them control that, the api doesn't (yet) allow it.
+			 */
+			if (!ep->stopped)
+				allow_status (ep);
+			req = NULL;
+		} else {
+			if (!list_empty (&ep->queue) && !ep->stopped)
+				req = list_entry (ep->queue.next,
+					struct net2280_request, queue);
+			else
+				req = NULL;
+			if (req && !ep->is_in)
+				stop_out_naking (ep);
+		}
+	}
+
+	/* is there a buffer for the next packet?
+	 * for best streaming performance, make sure there is one.
+	 */
+	if (req && !ep->stopped) {
+
+		/* load IN fifo with next packet (may be zlp) */
+		if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
+			write_fifo (ep, &req->req);
+	}
+}
+
+static struct net2280_ep *
+get_ep_by_addr (struct net2280 *dev, u16 wIndex)
+{
+	struct net2280_ep	*ep;
+
+	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+		return &dev->ep [0];
+	list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
+		u8	bEndpointAddress;
+
+		if (!ep->desc)
+			continue;
+		bEndpointAddress = ep->desc->bEndpointAddress;
+		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+			continue;
+		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+			return ep;
+	}
+	return NULL;
+}
+
+static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
+{
+	struct net2280_ep	*ep;
+	u32			num, scratch;
+
+	/* most of these don't need individual acks */
+	stat &= ~(1 << INTA_ASSERTED);
+	if (!stat)
+		return;
+	// DEBUG (dev, "irqstat0 %04x\n", stat);
+
+	/* starting a control request? */
+	if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
+		union {
+			u32			raw [2];
+			struct usb_ctrlrequest	r;
+		} u;
+		int				tmp = 0;
+		struct net2280_request		*req;
+
+		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
+			if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
+				dev->gadget.speed = USB_SPEED_HIGH;
+			else
+				dev->gadget.speed = USB_SPEED_FULL;
+			net2280_led_speed (dev, dev->gadget.speed);
+			DEBUG (dev, "%s speed\n",
+				(dev->gadget.speed == USB_SPEED_HIGH)
+					? "high" : "full");
+		}
+
+		ep = &dev->ep [0];
+		ep->irqs++;
+
+		/* make sure any leftover request state is cleared */
+		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
+		while (!list_empty (&ep->queue)) {
+			req = list_entry (ep->queue.next,
+					struct net2280_request, queue);
+			done (ep, req, (req->req.actual == req->req.length)
+						? 0 : -EPROTO);
+		}
+		ep->stopped = 0;
+		dev->protocol_stall = 0;
+		writel (  (1 << TIMEOUT)
+			| (1 << USB_STALL_SENT)
+			| (1 << USB_IN_NAK_SENT)
+			| (1 << USB_IN_ACK_RCVD)
+			| (1 << USB_OUT_PING_NAK_SENT)
+			| (1 << USB_OUT_ACK_SENT)
+			| (1 << FIFO_OVERFLOW)
+			| (1 << FIFO_UNDERFLOW)
+			| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
+			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
+			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+			| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+			| (1 << DATA_IN_TOKEN_INTERRUPT)
+			, &ep->regs->ep_stat);
+		u.raw [0] = readl (&dev->usb->setup0123);
+		u.raw [1] = readl (&dev->usb->setup4567);
+		
+		cpu_to_le32s (&u.raw [0]);
+		cpu_to_le32s (&u.raw [1]);
+
+		le16_to_cpus (&u.r.wValue);
+		le16_to_cpus (&u.r.wIndex);
+		le16_to_cpus (&u.r.wLength);
+
+		/* ack the irq */
+		writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
+		stat ^= (1 << SETUP_PACKET_INTERRUPT);
+
+		/* watch control traffic at the token level, and force
+		 * synchronization before letting the status stage happen.
+		 * FIXME ignore tokens we'll NAK, until driver responds.
+		 * that'll mean a lot less irqs for some drivers.
+		 */
+		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+		if (ep->is_in) {
+			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+				| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+				| (1 << DATA_IN_TOKEN_INTERRUPT);
+			stop_out_naking (ep);
+		} else
+			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+				| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+				| (1 << DATA_IN_TOKEN_INTERRUPT);
+		writel (scratch, &dev->epregs [0].ep_irqenb);
+
+		/* we made the hardware handle most lowlevel requests;
+		 * everything else goes uplevel to the gadget code.
+		 */
+		switch (u.r.bRequest) {
+		case USB_REQ_GET_STATUS: {
+			struct net2280_ep	*e;
+			u16			status;
+
+			/* hw handles device and interface status */
+			if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
+				goto delegate;
+			if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0
+					|| u.r.wLength > 2)
+				goto do_stall;
+
+			if (readl (&e->regs->ep_rsp)
+					& (1 << SET_ENDPOINT_HALT))
+				status = __constant_cpu_to_le16 (1);
+			else
+				status = __constant_cpu_to_le16 (0);
+
+			/* don't bother with a request object! */
+			writel (0, &dev->epregs [0].ep_irqenb);
+			set_fifo_bytecount (ep, u.r.wLength);
+			writel (status, &dev->epregs [0].ep_data);
+			allow_status (ep);
+			VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
+			goto next_endpoints;
+			}
+			break;
+		case USB_REQ_CLEAR_FEATURE: {
+			struct net2280_ep	*e;
+
+			/* hw handles device features */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (u.r.wValue != USB_ENDPOINT_HALT
+					|| u.r.wLength != 0)
+				goto do_stall;
+			if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
+				goto do_stall;
+			clear_halt (e);
+			allow_status (ep);
+			VDEBUG (dev, "%s clear halt\n", ep->ep.name);
+			goto next_endpoints;
+			}
+			break;
+		case USB_REQ_SET_FEATURE: {
+			struct net2280_ep	*e;
+
+			/* hw handles device features */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (u.r.wValue != USB_ENDPOINT_HALT
+					|| u.r.wLength != 0)
+				goto do_stall;
+			if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
+				goto do_stall;
+			set_halt (e);
+			allow_status (ep);
+			VDEBUG (dev, "%s set halt\n", ep->ep.name);
+			goto next_endpoints;
+			}
+			break;
+		default:
+delegate:
+			VDEBUG (dev, "setup %02x.%02x v%04x i%04x "
+				"ep_cfg %08x\n",
+				u.r.bRequestType, u.r.bRequest,
+				u.r.wValue, u.r.wIndex,
+				readl (&ep->regs->ep_cfg));
+			spin_unlock (&dev->lock);
+			tmp = dev->driver->setup (&dev->gadget, &u.r);
+			spin_lock (&dev->lock);
+		}
+
+		/* stall ep0 on error */
+		if (tmp < 0) {
+do_stall:
+			VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
+					u.r.bRequestType, u.r.bRequest, tmp);
+			dev->protocol_stall = 1;
+		}
+
+		/* some in/out token irq should follow; maybe stall then.
+		 * driver must queue a request (even zlp) or halt ep0
+		 * before the host times out.
+		 */
+	}
+
+next_endpoints:
+	/* endpoint data irq ? */
+	scratch = stat & 0x7f;
+	stat &= ~0x7f;
+	for (num = 0; scratch; num++) {
+		u32		t;
+
+		/* do this endpoint's FIFO and queue need tending? */
+		t = 1 << num;
+		if ((scratch & t) == 0)
+			continue;
+		scratch ^= t;
+
+		ep = &dev->ep [num];
+		handle_ep_small (ep);
+	}
+
+	if (stat)
+		DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
+}
+
+#define DMA_INTERRUPTS ( \
+		  (1 << DMA_D_INTERRUPT) \
+		| (1 << DMA_C_INTERRUPT) \
+		| (1 << DMA_B_INTERRUPT) \
+		| (1 << DMA_A_INTERRUPT))
+#define	PCI_ERROR_INTERRUPTS ( \
+		  (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
+		| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
+		| (1 << PCI_RETRY_ABORT_INTERRUPT))
+
+static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
+{
+	struct net2280_ep	*ep;
+	u32			tmp, num, mask, scratch;
+
+	/* after disconnect there's nothing else to do! */
+	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
+	mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
+
+	/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
+	 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and
+	 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT 
+	 * only indicates a change in the reset state).
+	 */
+	if (stat & tmp) {
+		writel (tmp, &dev->regs->irqstat1);
+		if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && 
+				((readl (&dev->usb->usbstat) & mask) == 0))
+				|| ((readl (&dev->usb->usbctl) & (1 << VBUS_PIN)) == 0) 
+			    ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
+			DEBUG (dev, "disconnect %s\n",
+					dev->driver->driver.name);
+			stop_activity (dev, dev->driver);
+			ep0_start (dev);
+			return;
+		}
+		stat &= ~tmp;
+
+		/* vBUS can bounce ... one of many reasons to ignore the
+		 * notion of hotplug events on bus connect/disconnect!
+		 */
+		if (!stat)
+			return;
+	}
+
+	/* NOTE: chip stays in PCI D0 state for now, but it could
+	 * enter D1 to save more power
+	 */
+	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
+	if (stat & tmp) {
+		writel (tmp, &dev->regs->irqstat1);
+		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
+			if (dev->driver->suspend)
+				dev->driver->suspend (&dev->gadget);
+			if (!enable_suspend)
+				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
+		} else {
+			if (dev->driver->resume)
+				dev->driver->resume (&dev->gadget);
+			/* at high speed, note erratum 0133 */
+		}
+		stat &= ~tmp;
+	}
+
+	/* clear any other status/irqs */
+	if (stat)
+		writel (stat, &dev->regs->irqstat1);
+
+	/* some status we can just ignore */
+	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
+			| (1 << SUSPEND_REQUEST_INTERRUPT)
+			| (1 << RESUME_INTERRUPT)
+			| (1 << SOF_INTERRUPT));
+	if (!stat)
+		return;
+	// DEBUG (dev, "irqstat1 %08x\n", stat);
+
+	/* DMA status, for ep-{a,b,c,d} */
+	scratch = stat & DMA_INTERRUPTS;
+	stat &= ~DMA_INTERRUPTS;
+	scratch >>= 9;
+	for (num = 0; scratch; num++) {
+		struct net2280_dma_regs	__iomem *dma;
+
+		tmp = 1 << num;
+		if ((tmp & scratch) == 0)
+			continue;
+		scratch ^= tmp;
+
+		ep = &dev->ep [num + 1];
+		dma = ep->dma;
+
+		if (!dma)
+			continue;
+
+		/* clear ep's dma status */
+		tmp = readl (&dma->dmastat);
+		writel (tmp, &dma->dmastat);
+
+		/* chaining should stop on abort, short OUT from fifo,
+		 * or (stat0 codepath) short OUT transfer.
+		 */
+		if (!use_dma_chaining) {
+			if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
+					== 0) {
+				DEBUG (ep->dev, "%s no xact done? %08x\n",
+					ep->ep.name, tmp);
+				continue;
+			}
+			stop_dma (ep->dma);
+		}
+
+		/* OUT transfers terminate when the data from the
+		 * host is in our memory.  Process whatever's done.
+		 * On this path, we know transfer's last packet wasn't
+		 * less than req->length. NAK_OUT_PACKETS may be set,
+		 * or the FIFO may already be holding new packets.
+		 *
+		 * IN transfers can linger in the FIFO for a very
+		 * long time ... we ignore that for now, accounting
+		 * precisely (like PIO does) needs per-packet irqs
+		 */
+		scan_dma_completions (ep);
+
+		/* disable dma on inactive queues; else maybe restart */
+		if (list_empty (&ep->queue)) {
+			if (use_dma_chaining)
+				stop_dma (ep->dma);
+		} else {
+			tmp = readl (&dma->dmactl);
+			if (!use_dma_chaining
+					|| (tmp & (1 << DMA_ENABLE)) == 0)
+				restart_dma (ep);
+			else if (ep->is_in && use_dma_chaining) {
+				struct net2280_request	*req;
+				u32			dmacount;
+
+				/* the descriptor at the head of the chain
+				 * may still have VALID_BIT clear; that's
+				 * used to trigger changing DMA_FIFO_VALIDATE
+				 * (affects automagic zlp writes).
+				 */
+				req = list_entry (ep->queue.next,
+						struct net2280_request, queue);
+				dmacount = req->td->dmacount;
+				dmacount &= __constant_cpu_to_le32 (
+						(1 << VALID_BIT)
+						| DMA_BYTE_COUNT_MASK);
+				if (dmacount && (dmacount & valid_bit) == 0)
+					restart_dma (ep);
+			}
+		}
+		ep->irqs++;
+	}
+
+	/* NOTE:  there are other PCI errors we might usefully notice.
+	 * if they appear very often, here's where to try recovering.
+	 */
+	if (stat & PCI_ERROR_INTERRUPTS) {
+		ERROR (dev, "pci dma error; stat %08x\n", stat);
+		stat &= ~PCI_ERROR_INTERRUPTS;
+		/* these are fatal errors, but "maybe" they won't
+		 * happen again ...
+		 */
+		stop_activity (dev, dev->driver);
+		ep0_start (dev);
+		stat = 0;
+	}
+
+	if (stat)
+		DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
+}
+
+static irqreturn_t net2280_irq (int irq, void *_dev, struct pt_regs * r)
+{
+	struct net2280		*dev = _dev;
+
+	spin_lock (&dev->lock);
+
+	/* handle disconnect, dma, and more */
+	handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
+
+	/* control requests and PIO */
+	handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
+
+	spin_unlock (&dev->lock);
+
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void gadget_release (struct device *_dev)
+{
+	struct net2280	*dev = dev_get_drvdata (_dev);
+
+	kfree (dev);
+}
+
+/* tear down the binding between this driver and the pci device */
+
+static void net2280_remove (struct pci_dev *pdev)
+{
+	struct net2280		*dev = pci_get_drvdata (pdev);
+
+	/* start with the driver above us */
+	if (dev->driver) {
+		/* should have been done already by driver model core */
+		WARN (dev, "pci remove, driver '%s' is still registered\n",
+				dev->driver->driver.name);
+		usb_gadget_unregister_driver (dev->driver);
+	}
+
+	/* then clean up the resources we allocated during probe() */
+	net2280_led_shutdown (dev);
+	if (dev->requests) {
+		int		i;
+		for (i = 1; i < 5; i++) {
+			if (!dev->ep [i].dummy)
+				continue;
+			pci_pool_free (dev->requests, dev->ep [i].dummy,
+					dev->ep [i].td_dma);
+		}
+		pci_pool_destroy (dev->requests);
+	}
+	if (dev->got_irq)
+		free_irq (pdev->irq, dev);
+	if (dev->regs)
+		iounmap (dev->regs);
+	if (dev->region)
+		release_mem_region (pci_resource_start (pdev, 0),
+				pci_resource_len (pdev, 0));
+	if (dev->enabled)
+		pci_disable_device (pdev);
+	device_unregister (&dev->gadget.dev);
+	device_remove_file (&pdev->dev, &dev_attr_registers);
+	pci_set_drvdata (pdev, NULL);
+
+	INFO (dev, "unbind\n");
+
+	the_controller = NULL;
+}
+
+/* wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+
+static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct net2280		*dev;
+	unsigned long		resource, len;
+	void			__iomem *base = NULL;
+	int			retval, i;
+	char			buf [8], *bufp;
+
+	/* if you want to support more than one controller in a system,
+	 * usb_gadget_driver_{register,unregister}() must change.
+	 */
+	if (the_controller) {
+		dev_warn (&pdev->dev, "ignoring\n");
+		return -EBUSY;
+	}
+
+	/* alloc, and start init */
+	dev = kmalloc (sizeof *dev, SLAB_KERNEL);
+	if (dev == NULL){
+		retval = -ENOMEM;
+		goto done;
+	}
+
+	memset (dev, 0, sizeof *dev);
+	spin_lock_init (&dev->lock);
+	dev->pdev = pdev;
+	dev->gadget.ops = &net2280_ops;
+	dev->gadget.is_dualspeed = 1;
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	strcpy (dev->gadget.dev.bus_id, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = driver_name;
+
+	/* now all the pci goodies ... */
+	if (pci_enable_device (pdev) < 0) {
+   	        retval = -ENODEV;
+		goto done;
+	}
+	dev->enabled = 1;
+
+	/* BAR 0 holds all the registers
+	 * BAR 1 is 8051 memory; unused here (note erratum 0103)
+	 * BAR 2 is fifo memory; unused here
+	 */
+	resource = pci_resource_start (pdev, 0);
+	len = pci_resource_len (pdev, 0);
+	if (!request_mem_region (resource, len, driver_name)) {
+		DEBUG (dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto done;
+	}
+	dev->region = 1;
+
+	base = ioremap_nocache (resource, len);
+	if (base == NULL) {
+		DEBUG (dev, "can't map memory\n");
+		retval = -EFAULT;
+		goto done;
+	}
+	dev->regs = (struct net2280_regs __iomem *) base;
+	dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
+	dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
+	dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
+	dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
+	dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
+
+	/* put into initial config, link up all endpoints */
+	writel (0, &dev->usb->usbctl);
+	usb_reset (dev);
+	usb_reinit (dev);
+
+	/* irq setup after old hardware is cleaned up */
+	if (!pdev->irq) {
+		ERROR (dev, "No IRQ.  Check PCI setup!\n");
+		retval = -ENODEV;
+		goto done;
+	}
+#ifndef __sparc__
+	scnprintf (buf, sizeof buf, "%d", pdev->irq);
+	bufp = buf;
+#else
+	bufp = __irq_itoa(pdev->irq);
+#endif
+	if (request_irq (pdev->irq, net2280_irq, SA_SHIRQ, driver_name, dev)
+			!= 0) {
+		ERROR (dev, "request interrupt %s failed\n", bufp);
+		retval = -EBUSY;
+		goto done;
+	}
+	dev->got_irq = 1;
+
+	/* DMA setup */
+	/* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
+	dev->requests = pci_pool_create ("requests", pdev,
+		sizeof (struct net2280_dma),
+		0 /* no alignment requirements */,
+		0 /* or page-crossing issues */);
+	if (!dev->requests) {
+		DEBUG (dev, "can't get request pool\n");
+		retval = -ENOMEM;
+		goto done;
+	}
+	for (i = 1; i < 5; i++) {
+		struct net2280_dma	*td;
+
+		td = pci_pool_alloc (dev->requests, GFP_KERNEL,
+				&dev->ep [i].td_dma);
+		if (!td) {
+			DEBUG (dev, "can't get dummy %d\n", i);
+			retval = -ENOMEM;
+			goto done;
+		}
+		td->dmacount = 0;	/* not VALID */
+		td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
+		td->dmadesc = td->dmaaddr;
+		dev->ep [i].dummy = td;
+	}
+
+	/* enable lower-overhead pci memory bursts during DMA */
+	writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
+			// 256 write retries may not be enough...
+			// | (1 << PCI_RETRY_ABORT_ENABLE)
+			| (1 << DMA_READ_MULTIPLE_ENABLE)
+			| (1 << DMA_READ_LINE_ENABLE)
+			, &dev->pci->pcimstctl);
+	/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
+	pci_set_master (pdev);
+	pci_set_mwi (pdev);
+
+	/* ... also flushes any posted pci writes */
+	dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
+
+	/* done */
+	pci_set_drvdata (pdev, dev);
+	INFO (dev, "%s\n", driver_desc);
+	INFO (dev, "irq %s, pci mem %p, chip rev %04x\n",
+			bufp, base, dev->chiprev);
+	INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
+			use_dma
+				? (use_dma_chaining ? "chaining" : "enabled")
+				: "disabled");
+	the_controller = dev;
+
+	device_register (&dev->gadget.dev);
+	device_create_file (&pdev->dev, &dev_attr_registers);
+
+	return 0;
+
+done:
+	if (dev)
+		net2280_remove (pdev);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static struct pci_device_id pci_ids [] = { {
+	.class = 	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask = 	~0,
+	.vendor =	0x17cc,
+	.device =	0x2280,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+
+}, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE (pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver net2280_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	net2280_probe,
+	.remove =	net2280_remove,
+
+	/* FIXME add power management support */
+};
+
+MODULE_DESCRIPTION (DRIVER_DESC);
+MODULE_AUTHOR ("David Brownell");
+MODULE_LICENSE ("GPL");
+
+static int __init init (void)
+{
+	if (!use_dma)
+		use_dma_chaining = 0;
+	return pci_register_driver (&net2280_pci_driver);
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	pci_unregister_driver (&net2280_pci_driver);
+}
+module_exit (cleanup);
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
new file mode 100644
index 0000000..fff4509
--- /dev/null
+++ b/drivers/usb/gadget/net2280.h
@@ -0,0 +1,728 @@
+/*
+ * NetChip 2280 high/full speed USB device controller.
+ * Unlike many such controllers, this one talks PCI.
+ */
+
+/*
+ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* NET2280 MEMORY MAPPED REGISTERS
+ *
+ * The register layout came from the chip documentation, and the bit
+ * number definitions were extracted from chip specification.
+ *
+ * Use the shift operator ('<<') to build bit masks, with readl/writel
+ * to access the registers through PCI.
+ */
+
+/* main registers, BAR0 + 0x0000 */
+struct net2280_regs {
+	// offset 0x0000
+	u32		devinit;
+#define     LOCAL_CLOCK_FREQUENCY                               8
+#define     FORCE_PCI_RESET                                     7
+#define     PCI_ID                                              6
+#define     PCI_ENABLE                                          5
+#define     FIFO_SOFT_RESET                                     4
+#define     CFG_SOFT_RESET                                      3
+#define     PCI_SOFT_RESET                                      2
+#define     USB_SOFT_RESET                                      1
+#define     M8051_RESET                                         0
+	u32		eectl;
+#define     EEPROM_ADDRESS_WIDTH                                23
+#define     EEPROM_CHIP_SELECT_ACTIVE                           22
+#define     EEPROM_PRESENT                                      21
+#define     EEPROM_VALID                                        20
+#define     EEPROM_BUSY                                         19
+#define     EEPROM_CHIP_SELECT_ENABLE                           18
+#define     EEPROM_BYTE_READ_START                              17
+#define     EEPROM_BYTE_WRITE_START                             16
+#define     EEPROM_READ_DATA                                    8
+#define     EEPROM_WRITE_DATA                                   0
+	u32		eeclkfreq;
+	u32		_unused0;
+	// offset 0x0010
+
+	u32		pciirqenb0;		/* interrupt PCI master ... */
+#define     SETUP_PACKET_INTERRUPT_ENABLE                       7
+#define     ENDPOINT_F_INTERRUPT_ENABLE                         6
+#define     ENDPOINT_E_INTERRUPT_ENABLE                         5
+#define     ENDPOINT_D_INTERRUPT_ENABLE                         4
+#define     ENDPOINT_C_INTERRUPT_ENABLE                         3
+#define     ENDPOINT_B_INTERRUPT_ENABLE                         2
+#define     ENDPOINT_A_INTERRUPT_ENABLE                         1
+#define     ENDPOINT_0_INTERRUPT_ENABLE                         0
+	u32		pciirqenb1;
+#define     PCI_INTERRUPT_ENABLE                                31
+#define     POWER_STATE_CHANGE_INTERRUPT_ENABLE                 27
+#define     PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE                26
+#define     PCI_PARITY_ERROR_INTERRUPT_ENABLE                   25
+#define     PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE          20
+#define     PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE          19
+#define     PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE          18
+#define     PCI_RETRY_ABORT_INTERRUPT_ENABLE                    17
+#define     PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE              16
+#define     GPIO_INTERRUPT_ENABLE                               13
+#define     DMA_D_INTERRUPT_ENABLE                              12
+#define     DMA_C_INTERRUPT_ENABLE                              11
+#define     DMA_B_INTERRUPT_ENABLE                              10
+#define     DMA_A_INTERRUPT_ENABLE                              9
+#define     EEPROM_DONE_INTERRUPT_ENABLE                        8
+#define     VBUS_INTERRUPT_ENABLE                               7
+#define     CONTROL_STATUS_INTERRUPT_ENABLE                     6
+#define     ROOT_PORT_RESET_INTERRUPT_ENABLE                    4
+#define     SUSPEND_REQUEST_INTERRUPT_ENABLE                    3
+#define     SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE             2
+#define     RESUME_INTERRUPT_ENABLE                             1
+#define     SOF_INTERRUPT_ENABLE                                0
+	u32		cpu_irqenb0;		/* ... or onboard 8051 */
+#define     SETUP_PACKET_INTERRUPT_ENABLE                       7
+#define     ENDPOINT_F_INTERRUPT_ENABLE                         6
+#define     ENDPOINT_E_INTERRUPT_ENABLE                         5
+#define     ENDPOINT_D_INTERRUPT_ENABLE                         4
+#define     ENDPOINT_C_INTERRUPT_ENABLE                         3
+#define     ENDPOINT_B_INTERRUPT_ENABLE                         2
+#define     ENDPOINT_A_INTERRUPT_ENABLE                         1
+#define     ENDPOINT_0_INTERRUPT_ENABLE                         0
+	u32		cpu_irqenb1;
+#define     CPU_INTERRUPT_ENABLE                                31
+#define     POWER_STATE_CHANGE_INTERRUPT_ENABLE                 27
+#define     PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE                26
+#define     PCI_PARITY_ERROR_INTERRUPT_ENABLE                   25
+#define     PCI_INTA_INTERRUPT_ENABLE                           24
+#define     PCI_PME_INTERRUPT_ENABLE                            23
+#define     PCI_SERR_INTERRUPT_ENABLE                           22
+#define     PCI_PERR_INTERRUPT_ENABLE                           21
+#define     PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE          20
+#define     PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE          19
+#define     PCI_RETRY_ABORT_INTERRUPT_ENABLE                    17
+#define     PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE              16
+#define     GPIO_INTERRUPT_ENABLE                               13
+#define     DMA_D_INTERRUPT_ENABLE                              12
+#define     DMA_C_INTERRUPT_ENABLE                              11
+#define     DMA_B_INTERRUPT_ENABLE                              10
+#define     DMA_A_INTERRUPT_ENABLE                              9
+#define     EEPROM_DONE_INTERRUPT_ENABLE                        8
+#define     VBUS_INTERRUPT_ENABLE                               7
+#define     CONTROL_STATUS_INTERRUPT_ENABLE                     6
+#define     ROOT_PORT_RESET_INTERRUPT_ENABLE                    4
+#define     SUSPEND_REQUEST_INTERRUPT_ENABLE                    3
+#define     SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE             2
+#define     RESUME_INTERRUPT_ENABLE                             1
+#define     SOF_INTERRUPT_ENABLE                                0
+
+	// offset 0x0020
+	u32		_unused1;
+	u32		usbirqenb1;
+#define     USB_INTERRUPT_ENABLE                                31
+#define     POWER_STATE_CHANGE_INTERRUPT_ENABLE                 27
+#define     PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE                26
+#define     PCI_PARITY_ERROR_INTERRUPT_ENABLE                   25
+#define     PCI_INTA_INTERRUPT_ENABLE                           24
+#define     PCI_PME_INTERRUPT_ENABLE                            23
+#define     PCI_SERR_INTERRUPT_ENABLE                           22
+#define     PCI_PERR_INTERRUPT_ENABLE                           21
+#define     PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE          20
+#define     PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE          19
+#define     PCI_RETRY_ABORT_INTERRUPT_ENABLE                    17
+#define     PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE              16
+#define     GPIO_INTERRUPT_ENABLE                               13
+#define     DMA_D_INTERRUPT_ENABLE                              12
+#define     DMA_C_INTERRUPT_ENABLE                              11
+#define     DMA_B_INTERRUPT_ENABLE                              10
+#define     DMA_A_INTERRUPT_ENABLE                              9
+#define     EEPROM_DONE_INTERRUPT_ENABLE                        8
+#define     VBUS_INTERRUPT_ENABLE                               7
+#define     CONTROL_STATUS_INTERRUPT_ENABLE                     6
+#define     ROOT_PORT_RESET_INTERRUPT_ENABLE                    4
+#define     SUSPEND_REQUEST_INTERRUPT_ENABLE                    3
+#define     SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE             2
+#define     RESUME_INTERRUPT_ENABLE                             1
+#define     SOF_INTERRUPT_ENABLE                                0
+	u32		irqstat0;
+#define     INTA_ASSERTED                                       12
+#define     SETUP_PACKET_INTERRUPT                              7
+#define     ENDPOINT_F_INTERRUPT                                6
+#define     ENDPOINT_E_INTERRUPT                                5
+#define     ENDPOINT_D_INTERRUPT                                4
+#define     ENDPOINT_C_INTERRUPT                                3
+#define     ENDPOINT_B_INTERRUPT                                2
+#define     ENDPOINT_A_INTERRUPT                                1
+#define     ENDPOINT_0_INTERRUPT                                0
+	u32		irqstat1;
+#define     POWER_STATE_CHANGE_INTERRUPT                        27
+#define     PCI_ARBITER_TIMEOUT_INTERRUPT                       26
+#define     PCI_PARITY_ERROR_INTERRUPT                          25
+#define     PCI_INTA_INTERRUPT                                  24
+#define     PCI_PME_INTERRUPT                                   23
+#define     PCI_SERR_INTERRUPT                                  22
+#define     PCI_PERR_INTERRUPT                                  21
+#define     PCI_MASTER_ABORT_RECEIVED_INTERRUPT                 20
+#define     PCI_TARGET_ABORT_RECEIVED_INTERRUPT                 19
+#define     PCI_RETRY_ABORT_INTERRUPT                           17
+#define     PCI_MASTER_CYCLE_DONE_INTERRUPT                     16
+#define     GPIO_INTERRUPT                                      13
+#define     DMA_D_INTERRUPT                                     12
+#define     DMA_C_INTERRUPT                                     11
+#define     DMA_B_INTERRUPT                                     10
+#define     DMA_A_INTERRUPT                                     9
+#define     EEPROM_DONE_INTERRUPT                               8
+#define     VBUS_INTERRUPT                                      7
+#define     CONTROL_STATUS_INTERRUPT                            6
+#define     ROOT_PORT_RESET_INTERRUPT                           4
+#define     SUSPEND_REQUEST_INTERRUPT                           3
+#define     SUSPEND_REQUEST_CHANGE_INTERRUPT                    2
+#define     RESUME_INTERRUPT                                    1
+#define     SOF_INTERRUPT                                       0
+	// offset 0x0030
+	u32		idxaddr;
+	u32		idxdata;
+	u32		fifoctl;
+#define     PCI_BASE2_RANGE                                     16
+#define     IGNORE_FIFO_AVAILABILITY                            3
+#define     PCI_BASE2_SELECT                                    2
+#define     FIFO_CONFIGURATION_SELECT                           0
+	u32		_unused2;
+	// offset 0x0040
+	u32		memaddr;
+#define     START                                               28
+#define     DIRECTION                                           27
+#define     FIFO_DIAGNOSTIC_SELECT                              24
+#define     MEMORY_ADDRESS                                      0
+	u32		memdata0;
+	u32		memdata1;
+	u32		_unused3;
+	// offset 0x0050
+	u32		gpioctl;
+#define     GPIO3_LED_SELECT                                    12
+#define     GPIO3_INTERRUPT_ENABLE                              11
+#define     GPIO2_INTERRUPT_ENABLE                              10
+#define     GPIO1_INTERRUPT_ENABLE                              9
+#define     GPIO0_INTERRUPT_ENABLE                              8
+#define     GPIO3_OUTPUT_ENABLE                                 7
+#define     GPIO2_OUTPUT_ENABLE                                 6
+#define     GPIO1_OUTPUT_ENABLE                                 5
+#define     GPIO0_OUTPUT_ENABLE                                 4
+#define     GPIO3_DATA                                          3
+#define     GPIO2_DATA                                          2
+#define     GPIO1_DATA                                          1
+#define     GPIO0_DATA                                          0
+	u32		gpiostat;
+#define     GPIO3_INTERRUPT                                     3
+#define     GPIO2_INTERRUPT                                     2
+#define     GPIO1_INTERRUPT                                     1
+#define     GPIO0_INTERRUPT                                     0
+} __attribute__ ((packed));
+
+/* usb control, BAR0 + 0x0080 */
+struct net2280_usb_regs {
+	// offset 0x0080
+	u32		stdrsp;
+#define     STALL_UNSUPPORTED_REQUESTS                          31
+#define     SET_TEST_MODE                                       16
+#define     GET_OTHER_SPEED_CONFIGURATION                       15
+#define     GET_DEVICE_QUALIFIER                                14
+#define     SET_ADDRESS                                         13
+#define     ENDPOINT_SET_CLEAR_HALT                             12
+#define     DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP               11
+#define     GET_STRING_DESCRIPTOR_2                             10
+#define     GET_STRING_DESCRIPTOR_1                             9
+#define     GET_STRING_DESCRIPTOR_0                             8
+#define     GET_SET_INTERFACE                                   6
+#define     GET_SET_CONFIGURATION                               5
+#define     GET_CONFIGURATION_DESCRIPTOR                        4
+#define     GET_DEVICE_DESCRIPTOR                               3
+#define     GET_ENDPOINT_STATUS                                 2
+#define     GET_INTERFACE_STATUS                                1
+#define     GET_DEVICE_STATUS                                   0
+	u32		prodvendid;
+#define     PRODUCT_ID                                          16
+#define     VENDOR_ID                                           0
+	u32		relnum;
+	u32		usbctl;
+#define     SERIAL_NUMBER_INDEX                                 16
+#define     PRODUCT_ID_STRING_ENABLE                            13
+#define     VENDOR_ID_STRING_ENABLE                             12
+#define     USB_ROOT_PORT_WAKEUP_ENABLE                         11
+#define     VBUS_PIN                                            10
+#define     TIMED_DISCONNECT                                    9
+#define     SUSPEND_IMMEDIATELY                                 7
+#define     SELF_POWERED_USB_DEVICE                             6
+#define     REMOTE_WAKEUP_SUPPORT                               5
+#define     PME_POLARITY                                        4
+#define     USB_DETECT_ENABLE                                   3
+#define     PME_WAKEUP_ENABLE                                   2
+#define     DEVICE_REMOTE_WAKEUP_ENABLE                         1
+#define     SELF_POWERED_STATUS                                 0
+	// offset 0x0090
+	u32		usbstat;
+#define     HIGH_SPEED                                          7
+#define     FULL_SPEED                                          6
+#define     GENERATE_RESUME                                     5
+#define     GENERATE_DEVICE_REMOTE_WAKEUP                       4
+	u32		xcvrdiag;
+#define     FORCE_HIGH_SPEED_MODE                               31
+#define     FORCE_FULL_SPEED_MODE                               30
+#define     USB_TEST_MODE                                       24
+#define     LINE_STATE                                          16
+#define     TRANSCEIVER_OPERATION_MODE                          2
+#define     TRANSCEIVER_SELECT                                  1
+#define     TERMINATION_SELECT                                  0
+	u32		setup0123;
+	u32		setup4567;
+	// offset 0x0090
+	u32		_unused0;
+	u32		ouraddr;
+#define     FORCE_IMMEDIATE                                     7
+#define     OUR_USB_ADDRESS                                     0
+	u32		ourconfig;
+} __attribute__ ((packed));
+
+/* pci control, BAR0 + 0x0100 */
+struct net2280_pci_regs {
+	// offset 0x0100
+	u32		 pcimstctl;
+#define     PCI_ARBITER_PARK_SELECT                             13
+#define     PCI_MULTI LEVEL_ARBITER                             12
+#define     PCI_RETRY_ABORT_ENABLE                              11
+#define     DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE              10
+#define     DMA_READ_MULTIPLE_ENABLE                            9
+#define     DMA_READ_LINE_ENABLE                                8
+#define     PCI_MASTER_COMMAND_SELECT                           6
+#define         MEM_READ_OR_WRITE                                   0
+#define         IO_READ_OR_WRITE                                    1
+#define         CFG_READ_OR_WRITE                                   2
+#define     PCI_MASTER_START                                    5
+#define     PCI_MASTER_READ_WRITE                               4
+#define         PCI_MASTER_WRITE                                    0
+#define         PCI_MASTER_READ                                     1
+#define     PCI_MASTER_BYTE_WRITE_ENABLES                       0
+	u32		 pcimstaddr;
+	u32		 pcimstdata;
+	u32		 pcimststat;
+#define     PCI_ARBITER_CLEAR                                   2
+#define     PCI_EXTERNAL_ARBITER                                1
+#define     PCI_HOST_MODE                                       0
+} __attribute__ ((packed));
+
+/* dma control, BAR0 + 0x0180 ... array of four structs like this,
+ * for channels 0..3.  see also struct net2280_dma:  descriptor
+ * that can be loaded into some of these registers.
+ */
+struct net2280_dma_regs {	/* [11.7] */
+	// offset 0x0180, 0x01a0, 0x01c0, 0x01e0, 
+	u32		dmactl;
+#define     DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE            25
+#define     DMA_CLEAR_COUNT_ENABLE                              21
+#define     DESCRIPTOR_POLLING_RATE                             19
+#define         POLL_CONTINUOUS                                     0
+#define         POLL_1_USEC                                         1
+#define         POLL_100_USEC                                       2
+#define         POLL_1_MSEC                                         3
+#define     DMA_VALID_BIT_POLLING_ENABLE                        18
+#define     DMA_VALID_BIT_ENABLE                                17
+#define     DMA_SCATTER_GATHER_ENABLE                           16
+#define     DMA_OUT_AUTO_START_ENABLE                           4
+#define     DMA_PREEMPT_ENABLE                                  3
+#define     DMA_FIFO_VALIDATE                                   2
+#define     DMA_ENABLE                                          1
+#define     DMA_ADDRESS_HOLD                                    0
+	u32		dmastat;
+#define     DMA_SCATTER_GATHER_DONE_INTERRUPT                   25
+#define     DMA_TRANSACTION_DONE_INTERRUPT                      24
+#define     DMA_ABORT                                           1
+#define     DMA_START                                           0
+	u32		_unused0 [2];
+	// offset 0x0190, 0x01b0, 0x01d0, 0x01f0, 
+	u32		dmacount;
+#define     VALID_BIT                                           31
+#define     DMA_DIRECTION                                       30
+#define     DMA_DONE_INTERRUPT_ENABLE                           29
+#define     END_OF_CHAIN                                        28
+#define         DMA_BYTE_COUNT_MASK                                 ((1<<24)-1)
+#define     DMA_BYTE_COUNT                                      0
+	u32		dmaaddr;
+	u32		dmadesc;
+	u32		_unused1;
+} __attribute__ ((packed));
+
+/* dedicated endpoint registers, BAR0 + 0x0200 */
+
+struct net2280_dep_regs {	/* [11.8] */
+	// offset 0x0200, 0x0210, 0x220, 0x230, 0x240
+	u32		dep_cfg;
+	// offset 0x0204, 0x0214, 0x224, 0x234, 0x244
+	u32		dep_rsp;
+	u32		_unused [2];
+} __attribute__ ((packed));
+
+/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
+ * like this, for ep0 then the configurable endpoints A..F
+ * ep0 reserved for control; E and F have only 64 bytes of fifo
+ */
+struct net2280_ep_regs {	/* [11.9] */
+	// offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0
+	u32		ep_cfg;
+#define     ENDPOINT_BYTE_COUNT                                 16
+#define     ENDPOINT_ENABLE                                     10
+#define     ENDPOINT_TYPE                                       8
+#define     ENDPOINT_DIRECTION                                  7
+#define     ENDPOINT_NUMBER                                     0
+	u32		ep_rsp;
+#define     SET_NAK_OUT_PACKETS                                 15
+#define     SET_EP_HIDE_STATUS_PHASE                            14
+#define     SET_EP_FORCE_CRC_ERROR                              13
+#define     SET_INTERRUPT_MODE                                  12
+#define     SET_CONTROL_STATUS_PHASE_HANDSHAKE                  11
+#define     SET_NAK_OUT_PACKETS_MODE                            10
+#define     SET_ENDPOINT_TOGGLE                                 9
+#define     SET_ENDPOINT_HALT                                   8
+#define     CLEAR_NAK_OUT_PACKETS                               7
+#define     CLEAR_EP_HIDE_STATUS_PHASE                          6
+#define     CLEAR_EP_FORCE_CRC_ERROR                            5
+#define     CLEAR_INTERRUPT_MODE                                4
+#define     CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE                3
+#define     CLEAR_NAK_OUT_PACKETS_MODE                          2
+#define     CLEAR_ENDPOINT_TOGGLE                               1
+#define     CLEAR_ENDPOINT_HALT                                 0
+	u32		ep_irqenb;
+#define     SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE              6
+#define     SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE           5
+#define     DATA_PACKET_RECEIVED_INTERRUPT_ENABLE               3
+#define     DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE            2
+#define     DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE                1
+#define     DATA_IN_TOKEN_INTERRUPT_ENABLE                      0
+	u32		ep_stat;
+#define     FIFO_VALID_COUNT                                    24
+#define     HIGH_BANDWIDTH_OUT_TRANSACTION_PID                  22
+#define     TIMEOUT                                             21
+#define     USB_STALL_SENT                                      20
+#define     USB_IN_NAK_SENT                                     19
+#define     USB_IN_ACK_RCVD                                     18
+#define     USB_OUT_PING_NAK_SENT                               17
+#define     USB_OUT_ACK_SENT                                    16
+#define     FIFO_OVERFLOW                                       13
+#define     FIFO_UNDERFLOW                                      12
+#define     FIFO_FULL                                           11
+#define     FIFO_EMPTY                                          10
+#define     FIFO_FLUSH                                          9
+#define     SHORT_PACKET_OUT_DONE_INTERRUPT                     6
+#define     SHORT_PACKET_TRANSFERRED_INTERRUPT                  5
+#define     NAK_OUT_PACKETS                                     4
+#define     DATA_PACKET_RECEIVED_INTERRUPT                      3
+#define     DATA_PACKET_TRANSMITTED_INTERRUPT                   2
+#define     DATA_OUT_PING_TOKEN_INTERRUPT                       1
+#define     DATA_IN_TOKEN_INTERRUPT                             0
+	// offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0
+	u32		ep_avail;
+	u32		ep_data;
+	u32		_unused0 [2];
+} __attribute__ ((packed));
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	__KERNEL__
+
+/* indexed registers [11.10] are accessed indirectly
+ * caller must own the device lock.
+ */
+
+static inline u32
+get_idx_reg (struct net2280_regs __iomem *regs, u32 index)
+{
+	writel (index, &regs->idxaddr);
+	/* NOTE:  synchs device/cpu memory views */
+	return readl (&regs->idxdata);
+}
+
+static inline void
+set_idx_reg (struct net2280_regs __iomem *regs, u32 index, u32 value)
+{
+	writel (index, &regs->idxaddr);
+	writel (value, &regs->idxdata);
+	/* posted, may not be visible yet */
+}
+
+#endif	/* __KERNEL__ */
+
+
+#define REG_DIAG		0x0
+#define     RETRY_COUNTER                                       16
+#define     FORCE_PCI_SERR                                      11
+#define     FORCE_PCI_INTERRUPT                                 10
+#define     FORCE_USB_INTERRUPT                                 9
+#define     FORCE_CPU_INTERRUPT                                 8
+#define     ILLEGAL_BYTE_ENABLES                                5
+#define     FAST_TIMES                                          4
+#define     FORCE_RECEIVE_ERROR                                 2
+#define     FORCE_TRANSMIT_CRC_ERROR                            0
+#define REG_FRAME		0x02	/* from last sof */
+#define REG_CHIPREV		0x03	/* in bcd */
+#define	REG_HS_NAK_RATE		0x0a	/* NAK per N uframes */
+
+#define	CHIPREV_1	0x0100
+#define	CHIPREV_1A	0x0110
+
+#ifdef	__KERNEL__
+
+/* ep a-f highspeed and fullspeed maxpacket, addresses
+ * computed from ep->num
+ */
+#define REG_EP_MAXPKT(dev,num) (((num) + 1) * 0x10 + \
+		(((dev)->gadget.speed == USB_SPEED_HIGH) ? 0 : 1))
+
+/*-------------------------------------------------------------------------*/
+
+/* [8.3] for scatter/gather i/o
+ * use struct net2280_dma_regs bitfields
+ */
+struct net2280_dma {
+	__le32		dmacount;
+	__le32		dmaaddr;		/* the buffer */
+	__le32		dmadesc;		/* next dma descriptor */
+	__le32		_reserved;
+} __attribute__ ((aligned (16)));
+
+/*-------------------------------------------------------------------------*/
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+
+struct net2280_ep {
+	struct usb_ep				ep;
+	struct net2280_ep_regs			__iomem *regs;
+	struct net2280_dma_regs			__iomem *dma;
+	struct net2280_dma			*dummy;
+	dma_addr_t				td_dma;	/* of dummy */
+	struct net2280				*dev;
+	unsigned long				irqs;
+
+	/* analogous to a host-side qh */
+	struct list_head			queue;
+	const struct usb_endpoint_descriptor	*desc;
+	unsigned				num : 8,
+						fifo_size : 12,
+						in_fifo_validate : 1,
+						out_overflow : 1,
+						stopped : 1,
+						is_in : 1,
+						is_iso : 1;
+};
+
+static inline void allow_status (struct net2280_ep *ep)
+{
+	/* ep0 only */
+	writel (  (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
+		| (1 << CLEAR_NAK_OUT_PACKETS)
+		| (1 << CLEAR_NAK_OUT_PACKETS_MODE)
+		, &ep->regs->ep_rsp);
+	ep->stopped = 1;
+}
+
+/* count (<= 4) bytes in the next fifo write will be valid */
+static inline void set_fifo_bytecount (struct net2280_ep *ep, unsigned count)
+{
+	writeb (count, 2 + (u8 __iomem *) &ep->regs->ep_cfg);
+}
+
+struct net2280_request {
+	struct usb_request		req;
+	struct net2280_dma		*td;
+	dma_addr_t			td_dma;
+	struct list_head		queue;
+	unsigned			mapped : 1,
+					valid : 1;
+};
+
+struct net2280 {
+	/* each pci device provides one gadget, several endpoints */
+	struct usb_gadget		gadget;
+	spinlock_t			lock;
+	struct net2280_ep		ep [7];
+	struct usb_gadget_driver 	*driver;
+	unsigned			enabled : 1,
+					protocol_stall : 1,
+					softconnect : 1,
+					got_irq : 1,
+					region : 1;
+	u16				chiprev;
+
+	/* pci state used to access those endpoints */
+	struct pci_dev			*pdev;
+	struct net2280_regs		__iomem *regs;
+	struct net2280_usb_regs		__iomem *usb;
+	struct net2280_pci_regs		__iomem *pci;
+	struct net2280_dma_regs		__iomem *dma;
+	struct net2280_dep_regs		__iomem *dep;
+	struct net2280_ep_regs		__iomem *epregs;
+
+	struct pci_pool			*requests;
+	// statistics...
+};
+
+static inline void set_halt (struct net2280_ep *ep)
+{
+	/* ep0 and bulk/intr endpoints */
+	writel (  (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
+		    /* set NAK_OUT for erratum 0114 */
+		| ((ep->dev->chiprev == CHIPREV_1) << SET_NAK_OUT_PACKETS)
+		| (1 << SET_ENDPOINT_HALT)
+		, &ep->regs->ep_rsp);
+}
+
+static inline void clear_halt (struct net2280_ep *ep)
+{
+	/* ep0 and bulk/intr endpoints */
+	writel (  (1 << CLEAR_ENDPOINT_HALT)
+		| (1 << CLEAR_ENDPOINT_TOGGLE)
+		    /* unless the gadget driver left a short packet in the
+		     * fifo, this reverses the erratum 0114 workaround.
+		     */
+		| ((ep->dev->chiprev == CHIPREV_1) << CLEAR_NAK_OUT_PACKETS)
+		, &ep->regs->ep_rsp);
+}
+
+#ifdef USE_RDK_LEDS
+
+static inline void net2280_led_init (struct net2280 *dev)
+{
+	/* LED3 (green) is on during USB activity. note erratum 0113. */
+	writel ((1 << GPIO3_LED_SELECT)
+		| (1 << GPIO3_OUTPUT_ENABLE)
+		| (1 << GPIO2_OUTPUT_ENABLE)
+		| (1 << GPIO1_OUTPUT_ENABLE)
+		| (1 << GPIO0_OUTPUT_ENABLE)
+		, &dev->regs->gpioctl);
+}
+
+/* indicate speed with bi-color LED 0/1 */
+static inline
+void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed)
+{
+	u32	val = readl (&dev->regs->gpioctl);
+	switch (speed) {
+	case USB_SPEED_HIGH:		/* green */
+		val &= ~(1 << GPIO0_DATA);
+		val |= (1 << GPIO1_DATA);
+		break;
+	case USB_SPEED_FULL:		/* red */
+		val &= ~(1 << GPIO1_DATA);
+		val |= (1 << GPIO0_DATA);
+		break;
+	default:			/* (off/black) */
+		val &= ~((1 << GPIO1_DATA) | (1 << GPIO0_DATA));
+		break;
+	}
+	writel (val, &dev->regs->gpioctl);
+}
+
+/* indicate power with LED 2 */
+static inline void net2280_led_active (struct net2280 *dev, int is_active)
+{
+	u32	val = readl (&dev->regs->gpioctl);
+
+	// FIXME this LED never seems to turn on.
+	if (is_active)
+		val |= GPIO2_DATA;
+	else
+		val &= ~GPIO2_DATA;
+	writel (val, &dev->regs->gpioctl);
+}
+static inline void net2280_led_shutdown (struct net2280 *dev)
+{
+	/* turn off all four GPIO*_DATA bits */
+	writel (readl (&dev->regs->gpioctl) & ~0x0f,
+			&dev->regs->gpioctl);
+}
+
+#else
+
+#define net2280_led_init(dev)		do { } while (0)
+#define net2280_led_speed(dev, speed)	do { } while (0)
+#define net2280_led_shutdown(dev)	do { } while (0)
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(dev,level,fmt,args...) \
+	printk(level "%s %s: " fmt , driver_name , \
+			pci_name(dev->pdev) , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DEBUG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDEBUG DEBUG
+#else
+#define VDEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif	/* VERBOSE */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARN(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+static inline void start_out_naking (struct net2280_ep *ep)
+{
+	/* NOTE:  hardware races lurk here, and PING protocol issues */
+	writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+	/* synch with device */
+	readl (&ep->regs->ep_rsp);
+}
+
+#ifdef DEBUG
+static inline void assert_out_naking (struct net2280_ep *ep, const char *where)
+{
+	u32	tmp = readl (&ep->regs->ep_stat);
+
+	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+		DEBUG (ep->dev, "%s %s %08x !NAK\n",
+				ep->ep.name, where, tmp);
+		writel ((1 << SET_NAK_OUT_PACKETS),
+			&ep->regs->ep_rsp);
+	}
+}
+#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep,__FUNCTION__)
+#else
+#define ASSERT_OUT_NAKING(ep) do {} while (0)
+#endif
+
+static inline void stop_out_naking (struct net2280_ep *ep)
+{
+	u32	tmp;
+
+	tmp = readl (&ep->regs->ep_stat);
+	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
+		writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+}
+
+#endif	/* __KERNEL__ */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
new file mode 100644
index 0000000..b66ea5a
--- /dev/null
+++ b/drivers/usb/gadget/omap_udc.c
@@ -0,0 +1,2872 @@
+/*
+ * omap_udc.c -- for OMAP full speed udc; most chips support OTG.
+ *
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#undef	DEBUG
+#undef	VERBOSE
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+#include <linux/usb_otg.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <asm/mach-types.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+
+#include "omap_udc.h"
+
+#undef	USB_TRACE
+
+/* bulk DMA seems to be behaving for both IN and OUT */
+#define	USE_DMA
+
+/* ISO too */
+#define	USE_ISO
+
+#define	DRIVER_DESC	"OMAP UDC driver"
+#define	DRIVER_VERSION	"4 October 2004"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+
+/*
+ * The OMAP UDC needs _very_ early endpoint setup:  before enabling the
+ * D+ pullup to allow enumeration.  That's too early for the gadget
+ * framework to use from usb_endpoint_enable(), which happens after
+ * enumeration as part of activating an interface.  (But if we add an
+ * optional new "UDC not yet running" state to the gadget driver model,
+ * even just during driver binding, the endpoint autoconfig logic is the
+ * natural spot to manufacture new endpoints.)
+ *
+ * So instead of using endpoint enable calls to control the hardware setup,
+ * this driver defines a "fifo mode" parameter.  It's used during driver
+ * initialization to choose among a set of pre-defined endpoint configs.
+ * See omap_udc_setup() for available modes, or to add others.  That code
+ * lives in an init section, so use this driver as a module if you need
+ * to change the fifo mode after the kernel boots.
+ *
+ * Gadget drivers normally ignore endpoints they don't care about, and
+ * won't include them in configuration descriptors.  That means only
+ * misbehaving hosts would even notice they exist.
+ */
+#ifdef	USE_ISO
+static unsigned fifo_mode = 3;
+#else
+static unsigned fifo_mode = 0;
+#endif
+
+/* "modprobe omap_udc fifo_mode=42", or else as a kernel
+ * boot parameter "omap_udc:fifo_mode=42"
+ */
+module_param (fifo_mode, uint, 0);
+MODULE_PARM_DESC (fifo_mode, "endpoint setup (0 == default)");
+
+#ifdef	USE_DMA
+static unsigned use_dma = 1;
+
+/* "modprobe omap_udc use_dma=y", or else as a kernel
+ * boot parameter "omap_udc:use_dma=y"
+ */
+module_param (use_dma, bool, 0);
+MODULE_PARM_DESC (use_dma, "enable/disable DMA");
+#else	/* !USE_DMA */
+
+/* save a bit of code */
+#define	use_dma		0
+#endif	/* !USE_DMA */
+
+
+static const char driver_name [] = "omap_udc";
+static const char driver_desc [] = DRIVER_DESC;
+
+/*-------------------------------------------------------------------------*/
+
+/* there's a notion of "current endpoint" for modifying endpoint
+ * state, and PIO access to its FIFO.  
+ */
+
+static void use_ep(struct omap_ep *ep, u16 select)
+{
+	u16	num = ep->bEndpointAddress & 0x0f;
+
+	if (ep->bEndpointAddress & USB_DIR_IN)
+		num |= UDC_EP_DIR;
+	UDC_EP_NUM_REG = num | select;
+	/* when select, MUST deselect later !! */
+}
+
+static inline void deselect_ep(void)
+{
+	UDC_EP_NUM_REG &= ~UDC_EP_SEL;
+	/* 6 wait states before TX will happen */
+}
+
+static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	u16		maxp;
+
+	/* catch various bogus parameters */
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT
+			|| ep->bEndpointAddress != desc->bEndpointAddress
+			|| ep->maxpacket < le16_to_cpu
+						(desc->wMaxPacketSize)) {
+		DBG("%s, bad ep or descriptor\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	maxp = le16_to_cpu (desc->wMaxPacketSize);
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+				&& maxp != ep->maxpacket)
+			|| desc->wMaxPacketSize > ep->maxpacket
+			|| !desc->wMaxPacketSize) {
+		DBG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
+		return -ERANGE;
+	}
+
+#ifdef	USE_ISO
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
+				&& desc->bInterval != 1)) {
+		/* hardware wants period = 1; USB allows 2^(Interval-1) */
+		DBG("%s, unsupported ISO period %dms\n", _ep->name,
+				1 << (desc->bInterval - 1));
+		return -EDOM;
+	}
+#else
+	if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		DBG("%s, ISO nyet\n", _ep->name);
+		return -EDOM;
+	}
+#endif
+
+	/* xfer types must match, except that interrupt ~= bulk */
+	if (ep->bmAttributes != desc->bmAttributes
+			&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+			&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+		DBG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
+		return -EINVAL;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+		DBG("%s, bogus device state\n", __FUNCTION__);
+		return -ESHUTDOWN;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	ep->desc = desc;
+	ep->irqs = 0;
+	ep->stopped = 0;
+	ep->ep.maxpacket = maxp;
+
+	/* set endpoint to initial state */
+	ep->dma_channel = 0;
+	ep->has_dma = 0;
+	ep->lch = -1;
+	use_ep(ep, UDC_EP_SEL);
+	UDC_CTRL_REG = UDC_RESET_EP;
+	ep->ackwait = 0;
+	deselect_ep();
+
+	if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+		list_add(&ep->iso, &udc->iso);
+
+	/* maybe assign a DMA channel to this endpoint */
+	if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
+		/* FIXME ISO can dma, but prefers first channel */
+		dma_channel_claim(ep, 0);
+
+	/* PIO OUT may RX packets */
+	if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
+			&& !ep->has_dma
+			&& !(ep->bEndpointAddress & USB_DIR_IN)) {
+		UDC_CTRL_REG = UDC_SET_FIFO_EN;
+		ep->ackwait = 1 + ep->double_buf;
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	VDBG("%s enabled\n", _ep->name);
+	return 0;
+}
+
+static void nuke(struct omap_ep *, int status);
+
+static int omap_ep_disable(struct usb_ep *_ep)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	unsigned long	flags;
+
+	if (!_ep || !ep->desc) {
+		DBG("%s, %s not enabled\n", __FUNCTION__,
+			_ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	ep->desc = 0;
+	nuke (ep, -ESHUTDOWN);
+	ep->ep.maxpacket = ep->maxpacket;
+	ep->has_dma = 0;
+	UDC_CTRL_REG = UDC_SET_HALT;
+	list_del_init(&ep->iso);
+	del_timer(&ep->timer);
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	VDBG("%s disabled\n", _ep->name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+omap_alloc_request(struct usb_ep *ep, int gfp_flags)
+{
+	struct omap_req	*req;
+
+	req = kmalloc(sizeof *req, gfp_flags);
+	if (req) {
+		memset (req, 0, sizeof *req);
+		req->req.dma = DMA_ADDR_INVALID;
+		INIT_LIST_HEAD (&req->queue);
+	}
+	return &req->req;
+}
+
+static void
+omap_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct omap_req	*req = container_of(_req, struct omap_req, req);
+
+	if (_req)
+		kfree (req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void *
+omap_alloc_buffer(
+	struct usb_ep	*_ep,
+	unsigned	bytes,
+	dma_addr_t	*dma,
+	int		gfp_flags
+)
+{
+	void		*retval;
+	struct omap_ep	*ep;
+
+	ep = container_of(_ep, struct omap_ep, ep);
+	if (use_dma && ep->has_dma) {
+		static int	warned;
+		if (!warned && bytes < PAGE_SIZE) {
+			dev_warn(ep->udc->gadget.dev.parent,
+				"using dma_alloc_coherent for "
+				"small allocations wastes memory\n");
+			warned++;
+		}
+		return dma_alloc_coherent(ep->udc->gadget.dev.parent,
+				bytes, dma, gfp_flags);
+	}
+
+	retval = kmalloc(bytes, gfp_flags);
+	if (retval)
+		*dma = virt_to_phys(retval);
+	return retval;
+}
+
+static void omap_free_buffer(
+	struct usb_ep	*_ep,
+	void		*buf,
+	dma_addr_t	dma,
+	unsigned	bytes
+)
+{
+	struct omap_ep	*ep;
+
+	ep = container_of(_ep, struct omap_ep, ep);
+	if (use_dma && _ep && ep->has_dma)
+		dma_free_coherent(ep->udc->gadget.dev.parent, bytes, buf, dma);
+	else
+		kfree (buf);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+done(struct omap_ep *ep, struct omap_req *req, int status)
+{
+	unsigned		stopped = ep->stopped;
+
+	list_del_init(&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (use_dma && ep->has_dma) {
+		if (req->mapped) {
+			dma_unmap_single(ep->udc->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->req.dma = DMA_ADDR_INVALID;
+			req->mapped = 0;
+		} else
+			dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+	}
+
+#ifndef	USB_TRACE
+	if (status && status != -ESHUTDOWN)
+#endif
+		VDBG("complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&ep->udc->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&ep->udc->lock);
+	ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define	FIFO_FULL	(UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
+#define	FIFO_UNWRITABLE	(UDC_EP_HALTED | FIFO_FULL)
+
+#define FIFO_EMPTY	(UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
+#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
+
+static inline int 
+write_packet(u8 *buf, struct omap_req *req, unsigned max)
+{
+	unsigned	len;
+	u16		*wp;
+
+	len = min(req->req.length - req->req.actual, max);
+	req->req.actual += len;
+
+	max = len;
+	if (likely((((int)buf) & 1) == 0)) {
+		wp = (u16 *)buf;
+		while (max >= 2) {
+			UDC_DATA_REG = *wp++;
+			max -= 2;
+		}
+		buf = (u8 *)wp;
+	}
+	while (max--)
+		*(volatile u8 *)&UDC_DATA_REG = *buf++;
+	return len;
+}
+
+// FIXME change r/w fifo calling convention
+
+
+// return:  0 = still running, 1 = completed, negative = errno
+static int write_fifo(struct omap_ep *ep, struct omap_req *req)
+{
+	u8		*buf;
+	unsigned	count;
+	int		is_last;
+	u16		ep_stat;
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	/* PIO-IN isn't double buffered except for iso */
+	ep_stat = UDC_STAT_FLG_REG;
+	if (ep_stat & FIFO_UNWRITABLE)
+		return 0;
+
+	count = ep->ep.maxpacket;
+	count = write_packet(buf, req, count);
+	UDC_CTRL_REG = UDC_SET_FIFO_EN;
+	ep->ackwait = 1;
+
+	/* last packet is often short (sometimes a zlp) */
+	if (count != ep->ep.maxpacket)
+		is_last = 1;
+	else if (req->req.length == req->req.actual
+			&& !req->req.zero)
+		is_last = 1;
+	else
+		is_last = 0;
+
+	/* NOTE:  requests complete when all IN data is in a
+	 * FIFO (or sometimes later, if a zlp was needed).
+	 * Use usb_ep_fifo_status() where needed.
+	 */
+	if (is_last)
+		done(ep, req, 0);
+	return is_last;
+}
+
+static inline int 
+read_packet(u8 *buf, struct omap_req *req, unsigned avail)
+{
+	unsigned	len;
+	u16		*wp;
+
+	len = min(req->req.length - req->req.actual, avail);
+	req->req.actual += len;
+	avail = len;
+
+	if (likely((((int)buf) & 1) == 0)) {
+		wp = (u16 *)buf;
+		while (avail >= 2) {
+			*wp++ = UDC_DATA_REG;
+			avail -= 2;
+		}
+		buf = (u8 *)wp;
+	}
+	while (avail--)
+		*buf++ = *(volatile u8 *)&UDC_DATA_REG;
+	return len;
+}
+
+// return:  0 = still running, 1 = queue empty, negative = errno
+static int read_fifo(struct omap_ep *ep, struct omap_req *req)
+{
+	u8		*buf;
+	unsigned	count, avail;
+	int		is_last;
+
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+
+	for (;;) {
+		u16	ep_stat = UDC_STAT_FLG_REG;
+
+		is_last = 0;
+		if (ep_stat & FIFO_EMPTY) {
+			if (!ep->double_buf)
+				break;
+			ep->fnf = 1;
+		}
+		if (ep_stat & UDC_EP_HALTED)
+			break;
+
+		if (ep_stat & FIFO_FULL)
+			avail = ep->ep.maxpacket;
+		else  {
+			avail = UDC_RXFSTAT_REG;
+			ep->fnf = ep->double_buf;
+		}
+		count = read_packet(buf, req, avail);
+
+		/* partial packet reads may not be errors */
+		if (count < ep->ep.maxpacket) {
+			is_last = 1;
+			/* overflowed this request?  flush extra data */
+			if (count != avail) {
+				req->req.status = -EOVERFLOW;
+				avail -= count;
+				while (avail--)
+					(void) *(volatile u8 *)&UDC_DATA_REG;
+			}
+		} else if (req->req.length == req->req.actual)
+			is_last = 1;
+		else
+			is_last = 0;
+
+		if (!ep->bEndpointAddress)
+			break;
+		if (is_last)
+			done(ep, req, 0);
+		break;
+	}
+	return is_last;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
+{
+	dma_addr_t	end;
+
+	/* IN-DMA needs this on fault/cancel paths, so 15xx misreports
+	 * the last transfer's bytecount by more than a FIFO's worth.
+	 */
+	if (cpu_is_omap15xx())
+		return 0;
+
+	end = omap_readw(OMAP_DMA_CSAC(ep->lch));
+	if (end == ep->dma_counter)
+		return 0;
+
+	end |= start & (0xffff << 16);
+	if (end < start)
+		end += 0x10000;
+	return end - start;
+}
+
+#define DMA_DEST_LAST(x) (cpu_is_omap15xx() \
+		? OMAP_DMA_CSAC(x) /* really: CPC */ \
+		: OMAP_DMA_CDAC(x))
+
+static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
+{
+	dma_addr_t	end;
+
+	end = omap_readw(DMA_DEST_LAST(ep->lch));
+	if (end == ep->dma_counter)
+		return 0;
+
+	end |= start & (0xffff << 16);
+	if (cpu_is_omap15xx())
+		end++;
+	if (end < start)
+		end += 0x10000;
+	return end - start;
+}
+
+
+/* Each USB transfer request using DMA maps to one or more DMA transfers.
+ * When DMA completion isn't request completion, the UDC continues with
+ * the next DMA transfer for that USB transfer.
+ */
+
+static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
+{
+	u16		txdma_ctrl;
+	unsigned	length = req->req.length - req->req.actual;
+	const int	sync_mode = cpu_is_omap15xx()
+				? OMAP_DMA_SYNC_FRAME
+				: OMAP_DMA_SYNC_ELEMENT;
+
+	/* measure length in either bytes or packets */
+	if ((cpu_is_omap16xx() && length <= (UDC_TXN_TSC + 1))
+			|| (cpu_is_omap15xx() && length < ep->maxpacket)) {
+		txdma_ctrl = UDC_TXN_EOT | length;
+		omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
+				length, 1, sync_mode);
+	} else {
+		length = min(length / ep->maxpacket,
+				(unsigned) UDC_TXN_TSC + 1);
+ 		txdma_ctrl = length;
+		omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
+				ep->ep.maxpacket, length, sync_mode);
+		length *= ep->maxpacket;
+	}
+	omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
+		OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
+
+	omap_start_dma(ep->lch);
+	ep->dma_counter = omap_readw(OMAP_DMA_CSAC(ep->lch));
+	UDC_DMA_IRQ_EN_REG |= UDC_TX_DONE_IE(ep->dma_channel);
+	UDC_TXDMA_REG(ep->dma_channel) = UDC_TXN_START | txdma_ctrl;
+	req->dma_bytes = length;
+}
+
+static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
+{
+	if (status == 0) {
+		req->req.actual += req->dma_bytes;
+
+		/* return if this request needs to send data or zlp */
+		if (req->req.actual < req->req.length)
+			return;
+		if (req->req.zero
+				&& req->dma_bytes != 0
+				&& (req->req.actual % ep->maxpacket) == 0)
+			return;
+	} else
+		req->req.actual += dma_src_len(ep, req->req.dma
+							+ req->req.actual);
+
+	/* tx completion */
+	omap_stop_dma(ep->lch);
+	UDC_DMA_IRQ_EN_REG &= ~UDC_TX_DONE_IE(ep->dma_channel);
+	done(ep, req, status);
+}
+
+static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
+{
+	unsigned packets;
+
+	/* NOTE:  we filtered out "short reads" before, so we know
+	 * the buffer has only whole numbers of packets.
+	 */
+
+	/* set up this DMA transfer, enable the fifo, start */
+	packets = (req->req.length - req->req.actual) / ep->ep.maxpacket;
+	packets = min(packets, (unsigned)UDC_RXN_TC + 1);
+	req->dma_bytes = packets * ep->ep.maxpacket;
+	omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
+			ep->ep.maxpacket, packets,
+			OMAP_DMA_SYNC_ELEMENT);
+	omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
+		OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
+	ep->dma_counter = omap_readw(DMA_DEST_LAST(ep->lch));
+
+	UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1);
+	UDC_DMA_IRQ_EN_REG |= UDC_RX_EOT_IE(ep->dma_channel);
+	UDC_EP_NUM_REG = (ep->bEndpointAddress & 0xf);
+	UDC_CTRL_REG = UDC_SET_FIFO_EN;
+
+	omap_start_dma(ep->lch);
+}
+
+static void
+finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status)
+{
+	u16	count;
+
+	if (status == 0)
+		ep->dma_counter = (u16) (req->req.dma + req->req.actual);
+	count = dma_dest_len(ep, req->req.dma + req->req.actual);
+	count += req->req.actual;
+	if (count <= req->req.length)
+		req->req.actual = count;
+
+	if (count != req->dma_bytes || status)
+		omap_stop_dma(ep->lch);
+
+	/* if this wasn't short, request may need another transfer */
+	else if (req->req.actual < req->req.length)
+		return;
+
+	/* rx completion */
+	UDC_DMA_IRQ_EN_REG &= ~UDC_RX_EOT_IE(ep->dma_channel);
+	done(ep, req, status);
+}
+
+static void dma_irq(struct omap_udc *udc, u16 irq_src)
+{
+	u16		dman_stat = UDC_DMAN_STAT_REG;
+	struct omap_ep	*ep;
+	struct omap_req	*req;
+
+	/* IN dma: tx to host */
+	if (irq_src & UDC_TXN_DONE) {
+		ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
+		ep->irqs++;
+		/* can see TXN_DONE after dma abort */
+		if (!list_empty(&ep->queue)) {
+			req = container_of(ep->queue.next,
+						struct omap_req, queue);
+			finish_in_dma(ep, req, 0);
+		}
+		UDC_IRQ_SRC_REG = UDC_TXN_DONE;
+
+		if (!list_empty (&ep->queue)) {
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			next_in_dma(ep, req);
+		}
+	}
+
+	/* OUT dma: rx from host */
+	if (irq_src & UDC_RXN_EOT) {
+		ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
+		ep->irqs++;
+		/* can see RXN_EOT after dma abort */
+		if (!list_empty(&ep->queue)) {
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			finish_out_dma(ep, req, 0);
+		}
+		UDC_IRQ_SRC_REG = UDC_RXN_EOT;
+
+		if (!list_empty (&ep->queue)) {
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			next_out_dma(ep, req);
+		}
+	}
+
+	if (irq_src & UDC_RXN_CNT) {
+		ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
+		ep->irqs++;
+		/* omap15xx does this unasked... */
+		VDBG("%s, RX_CNT irq?\n", ep->ep.name);
+		UDC_IRQ_SRC_REG = UDC_RXN_CNT;
+	}
+}
+
+static void dma_error(int lch, u16 ch_status, void *data)
+{
+	struct omap_ep	*ep = data;
+
+	/* if ch_status & OMAP_DMA_DROP_IRQ ... */
+	/* if ch_status & OMAP_DMA_TOUT_IRQ ... */
+	ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
+
+	/* complete current transfer ... */
+}
+
+static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
+{
+	u16	reg;
+	int	status, restart, is_in;
+
+	is_in = ep->bEndpointAddress & USB_DIR_IN;
+	if (is_in)
+		reg = UDC_TXDMA_CFG_REG;
+	else
+		reg = UDC_RXDMA_CFG_REG;
+	reg |= 1 << 12;		/* "pulse" activated */
+
+	ep->dma_channel = 0;
+	ep->lch = -1;
+	if (channel == 0 || channel > 3) {
+		if ((reg & 0x0f00) == 0)
+			channel = 3;
+		else if ((reg & 0x00f0) == 0)
+			channel = 2;
+		else if ((reg & 0x000f) == 0)	/* preferred for ISO */
+			channel = 1;
+		else {
+			status = -EMLINK;
+			goto just_restart;
+		}
+	}
+	reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
+	ep->dma_channel = channel;
+
+	if (is_in) {
+		status = omap_request_dma(OMAP_DMA_USB_W2FC_TX0 - 1 + channel,
+			ep->ep.name, dma_error, ep, &ep->lch);
+		if (status == 0) {
+			UDC_TXDMA_CFG_REG = reg;
+			omap_set_dma_dest_params(ep->lch,
+				OMAP_DMA_PORT_TIPB,
+				OMAP_DMA_AMODE_CONSTANT,
+				(unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
+		}
+	} else {
+		status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel,
+			ep->ep.name, dma_error, ep, &ep->lch);
+		if (status == 0) {
+			UDC_RXDMA_CFG_REG = reg;
+			omap_set_dma_src_params(ep->lch,
+				OMAP_DMA_PORT_TIPB,
+				OMAP_DMA_AMODE_CONSTANT,
+				(unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
+		}
+	}
+	if (status)
+		ep->dma_channel = 0;
+	else {
+		ep->has_dma = 1;
+		omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
+
+		/* channel type P: hw synch (fifo) */
+		if (!cpu_is_omap15xx())
+			omap_writew(2, OMAP_DMA_LCH_CTRL(ep->lch));
+	}
+
+just_restart:
+	/* restart any queue, even if the claim failed  */
+	restart = !ep->stopped && !list_empty(&ep->queue);
+
+	if (status)
+		DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
+			restart ? " (restart)" : "");
+	else
+		DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
+			is_in ? 't' : 'r',
+			ep->dma_channel - 1, ep->lch,
+			restart ? " (restart)" : "");
+
+	if (restart) {
+		struct omap_req	*req;
+		req = container_of(ep->queue.next, struct omap_req, queue);
+		if (ep->has_dma)
+			(is_in ? next_in_dma : next_out_dma)(ep, req);
+		else {
+			use_ep(ep, UDC_EP_SEL);
+			(is_in ? write_fifo : read_fifo)(ep, req);
+			deselect_ep();
+			if (!is_in) {
+				UDC_CTRL_REG = UDC_SET_FIFO_EN;
+				ep->ackwait = 1 + ep->double_buf;
+			}
+			/* IN: 6 wait states before it'll tx */
+		}
+	}
+}
+
+static void dma_channel_release(struct omap_ep *ep)
+{
+	int		shift = 4 * (ep->dma_channel - 1);
+	u16		mask = 0x0f << shift;
+	struct omap_req	*req;
+	int		active;
+
+	/* abort any active usb transfer request */
+	if (!list_empty(&ep->queue))
+		req = container_of(ep->queue.next, struct omap_req, queue);
+	else
+		req = 0;
+
+	active = ((1 << 7) & omap_readl(OMAP_DMA_CCR(ep->lch))) != 0;
+
+	DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
+			active ? "active" : "idle",
+			(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
+			ep->dma_channel - 1, req);
+
+	/* wait till current packet DMA finishes, and fifo empties */
+	if (ep->bEndpointAddress & USB_DIR_IN) {
+		UDC_TXDMA_CFG_REG &= ~mask;
+
+		if (req) {
+			finish_in_dma(ep, req, -ECONNRESET);
+
+			/* clear FIFO; hosts probably won't empty it */
+			use_ep(ep, UDC_EP_SEL);
+			UDC_CTRL_REG = UDC_CLR_EP;
+			deselect_ep();
+		}
+		while (UDC_TXDMA_CFG_REG & mask)
+			udelay(10);
+	} else {
+		UDC_RXDMA_CFG_REG &= ~mask;
+
+		/* dma empties the fifo */
+		while (UDC_RXDMA_CFG_REG & mask)
+			udelay(10);
+		if (req)
+			finish_out_dma(ep, req, -ECONNRESET);
+	}
+	omap_free_dma(ep->lch);
+	ep->dma_channel = 0;
+	ep->lch = -1;
+	/* has_dma still set, till endpoint is fully quiesced */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int
+omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	struct omap_req	*req = container_of(_req, struct omap_req, req);
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	int		is_iso = 0;
+
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		DBG("%s, bad params\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	if (!_ep || (!ep->desc && ep->bEndpointAddress)) {
+		DBG("%s, bad ep\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+		is_iso = 1;
+	}
+
+	/* this isn't bogus, but OMAP DMA isn't the only hardware to
+	 * have a hard time with partial packet reads...  reject it.
+	 */
+	if (use_dma
+			&& ep->has_dma
+			&& ep->bEndpointAddress != 0
+			&& (ep->bEndpointAddress & USB_DIR_IN) == 0
+			&& (req->req.length % ep->ep.maxpacket) != 0) {
+		DBG("%s, no partial packet OUT reads\n", __FUNCTION__);
+		return -EMSGSIZE;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	if (use_dma && ep->has_dma) {
+		if (req->req.dma == DMA_ADDR_INVALID) {
+			req->req.dma = dma_map_single(
+				ep->udc->gadget.dev.parent,
+				req->req.buf,
+				req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->mapped = 1;
+		} else {
+			dma_sync_single_for_device(
+				ep->udc->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->mapped = 0;
+		}
+	}
+
+	VDBG("%s queue req %p, len %d buf %p\n",
+		ep->ep.name, _req, _req->length, _req->buf);
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+
+	/* maybe kickstart non-iso i/o queues */
+	if (is_iso)
+		UDC_IRQ_EN_REG |= UDC_SOF_IE;
+	else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
+		int	is_in;
+
+		if (ep->bEndpointAddress == 0) {
+			if (!udc->ep0_pending || !list_empty (&ep->queue)) {
+				spin_unlock_irqrestore(&udc->lock, flags);
+				return -EL2HLT;
+			}
+
+			/* empty DATA stage? */
+			is_in = udc->ep0_in;
+			if (!req->req.length) {
+
+				/* chip became CONFIGURED or ADDRESSED
+				 * earlier; drivers may already have queued
+				 * requests to non-control endpoints
+				 */
+				if (udc->ep0_set_config) {
+					u16	irq_en = UDC_IRQ_EN_REG;
+
+					irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
+					if (!udc->ep0_reset_config)
+						irq_en |= UDC_EPN_RX_IE
+							| UDC_EPN_TX_IE;
+					UDC_IRQ_EN_REG = irq_en;
+				}
+
+				/* STATUS is reverse direction */
+				UDC_EP_NUM_REG = is_in
+						? UDC_EP_SEL
+						: (UDC_EP_SEL|UDC_EP_DIR);
+				UDC_CTRL_REG = UDC_CLR_EP;
+				UDC_CTRL_REG = UDC_SET_FIFO_EN;
+				UDC_EP_NUM_REG = udc->ep0_in ? 0 : UDC_EP_DIR;
+
+				/* cleanup */
+				udc->ep0_pending = 0;
+				done(ep, req, 0);
+				req = 0;
+
+			/* non-empty DATA stage */
+			} else if (is_in) {
+				UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
+			} else {
+				if (udc->ep0_setup)
+					goto irq_wait;
+				UDC_EP_NUM_REG = UDC_EP_SEL;
+			}
+		} else {
+			is_in = ep->bEndpointAddress & USB_DIR_IN;
+			if (!ep->has_dma)
+				use_ep(ep, UDC_EP_SEL);
+			/* if ISO: SOF IRQs must be enabled/disabled! */
+		}
+
+		if (ep->has_dma)
+			(is_in ? next_in_dma : next_out_dma)(ep, req);
+		else if (req) {
+			if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
+				req = 0;
+			deselect_ep();
+			if (!is_in) {
+				UDC_CTRL_REG = UDC_SET_FIFO_EN;
+				ep->ackwait = 1 + ep->double_buf;
+			}
+			/* IN: 6 wait states before it'll tx */
+		}
+	}
+
+irq_wait:
+	/* irq handler advances the queue */
+	if (req != 0)
+		list_add_tail(&req->queue, &ep->queue);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	struct omap_req	*req;
+	unsigned long	flags;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&ep->udc->lock, flags);
+		return -EINVAL;
+	}
+
+	if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
+		int channel = ep->dma_channel;
+
+		/* releasing the channel cancels the request,
+		 * reclaiming the channel restarts the queue
+		 */
+		dma_channel_release(ep);
+		dma_channel_claim(ep, channel);
+	} else 
+		done(ep, req, -ECONNRESET);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	unsigned long	flags;
+	int		status = -EOPNOTSUPP;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	/* just use protocol stalls for ep0; real halts are annoying */
+	if (ep->bEndpointAddress == 0) {
+		if (!ep->udc->ep0_pending)
+			status = -EINVAL;
+		else if (value) {
+			if (ep->udc->ep0_set_config) {
+				WARN("error changing config?\n");
+				UDC_SYSCON2_REG = UDC_CLR_CFG;
+			}
+			UDC_SYSCON2_REG = UDC_STALL_CMD;
+			ep->udc->ep0_pending = 0;
+			status = 0;
+		} else /* NOP */
+			status = 0;
+
+	/* otherwise, all active non-ISO endpoints can halt */
+	} else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) {
+
+		/* IN endpoints must already be idle */
+		if ((ep->bEndpointAddress & USB_DIR_IN)
+				&& !list_empty(&ep->queue)) { 
+			status = -EAGAIN;
+			goto done;
+		}
+
+		if (value) {
+			int	channel;
+
+			if (use_dma && ep->dma_channel
+					&& !list_empty(&ep->queue)) {
+				channel = ep->dma_channel;
+				dma_channel_release(ep);
+			} else
+				channel = 0;
+
+			use_ep(ep, UDC_EP_SEL);
+			if (UDC_STAT_FLG_REG & UDC_NON_ISO_FIFO_EMPTY) {
+				UDC_CTRL_REG = UDC_SET_HALT;
+				status = 0;
+			} else
+				status = -EAGAIN;
+			deselect_ep();
+
+			if (channel)
+				dma_channel_claim(ep, channel);
+		} else {
+			use_ep(ep, 0);
+			UDC_CTRL_REG = UDC_RESET_EP;
+			ep->ackwait = 0;
+			if (!(ep->bEndpointAddress & USB_DIR_IN)) {
+				UDC_CTRL_REG = UDC_SET_FIFO_EN;
+				ep->ackwait = 1 + ep->double_buf;
+			}
+		}
+	}
+done:
+	VDBG("%s %s halt stat %d\n", ep->ep.name,
+		value ? "set" : "clear", status);
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return status;
+}
+
+static struct usb_ep_ops omap_ep_ops = {
+	.enable		= omap_ep_enable,
+	.disable	= omap_ep_disable,
+
+	.alloc_request	= omap_alloc_request,
+	.free_request	= omap_free_request,
+
+	.alloc_buffer	= omap_alloc_buffer,
+	.free_buffer	= omap_free_buffer,
+
+	.queue		= omap_ep_queue,
+	.dequeue	= omap_ep_dequeue,
+
+	.set_halt	= omap_ep_set_halt,
+	// fifo_status ... report bytes in fifo
+	// fifo_flush ... flush fifo
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_get_frame(struct usb_gadget *gadget)
+{
+	u16	sof = UDC_SOF_REG;
+	return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
+}
+
+static int omap_wakeup(struct usb_gadget *gadget)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	int		retval = -EHOSTUNREACH;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (udc->devstat & UDC_SUS) {
+		/* NOTE:  OTG spec erratum says that OTG devices may
+		 * issue wakeups without host enable.
+		 */
+		if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
+			DBG("remote wakeup...\n");
+			UDC_SYSCON2_REG = UDC_RMT_WKP;
+			retval = 0;
+		}
+
+	/* NOTE:  non-OTG systems may use SRP TOO... */
+	} else if (!(udc->devstat & UDC_ATT)) {
+		if (udc->transceiver)
+			retval = otg_start_srp(udc->transceiver);
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return retval;
+}
+
+static int
+omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	u16		syscon1;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	syscon1 = UDC_SYSCON1_REG;
+	if (is_selfpowered)
+		syscon1 |= UDC_SELF_PWR;
+	else
+		syscon1 &= ~UDC_SELF_PWR;
+	UDC_SYSCON1_REG = syscon1;
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static int can_pullup(struct omap_udc *udc)
+{
+	return udc->driver && udc->softconnect && udc->vbus_active;
+}
+
+static void pullup_enable(struct omap_udc *udc)
+{
+	UDC_SYSCON1_REG |= UDC_PULLUP_EN;
+#ifndef CONFIG_USB_OTG
+	if (!cpu_is_omap15xx())
+		OTG_CTRL_REG |= OTG_BSESSVLD;
+#endif
+	UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
+}
+
+static void pullup_disable(struct omap_udc *udc)
+{
+#ifndef CONFIG_USB_OTG
+	if (!cpu_is_omap15xx())
+		OTG_CTRL_REG &= ~OTG_BSESSVLD;
+#endif
+	UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
+	UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
+}
+
+/*
+ * Called by whatever detects VBUS sessions:  external transceiver
+ * driver, or maybe GPIO0 VBUS IRQ.  May request 48 MHz clock.
+ */
+static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	VDBG("VBUS %s\n", is_active ? "on" : "off");
+	udc->vbus_active = (is_active != 0);
+	if (cpu_is_omap15xx()) {
+		/* "software" detect, ignored if !VBUS_MODE_1510 */
+		if (is_active)
+			FUNC_MUX_CTRL_0_REG |= VBUS_CTRL_1510;
+		else
+			FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
+	}
+	if (can_pullup(udc))
+		pullup_enable(udc);
+	else
+		pullup_disable(udc);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct omap_udc	*udc;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	if (udc->transceiver)
+		return otg_set_power(udc->transceiver, mA);
+	return -EOPNOTSUPP;
+}
+
+static int omap_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->softconnect = (is_on != 0);
+	if (can_pullup(udc))
+		pullup_enable(udc);
+	else
+		pullup_disable(udc);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static struct usb_gadget_ops omap_gadget_ops = {
+	.get_frame		= omap_get_frame,
+	.wakeup			= omap_wakeup,
+	.set_selfpowered	= omap_set_selfpowered,
+	.vbus_session		= omap_vbus_session,
+	.vbus_draw		= omap_vbus_draw,
+	.pullup			= omap_pullup,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* dequeue ALL requests; caller holds udc->lock */
+static void nuke(struct omap_ep *ep, int status)
+{
+	struct omap_req	*req;
+
+	ep->stopped = 1;
+
+	if (use_dma && ep->dma_channel)
+		dma_channel_release(ep);
+
+	use_ep(ep, 0);
+	UDC_CTRL_REG = UDC_CLR_EP;
+	if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
+		UDC_CTRL_REG = UDC_SET_HALT;
+
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct omap_req, queue);
+		done(ep, req, status);
+	}
+}
+
+/* caller holds udc->lock */
+static void udc_quiesce(struct omap_udc *udc)
+{
+	struct omap_ep	*ep;
+
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	nuke(&udc->ep[0], -ESHUTDOWN);
+	list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list)
+		nuke(ep, -ESHUTDOWN);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void update_otg(struct omap_udc *udc)
+{
+	u16	devstat;
+
+	if (!udc->gadget.is_otg)
+		return;
+
+	if (OTG_CTRL_REG & OTG_ID)
+		devstat = UDC_DEVSTAT_REG;
+	else
+		devstat = 0;
+
+	udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
+	udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
+	udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
+
+	/* Enable HNP early, avoiding races on suspend irq path.
+	 * ASSUMES OTG state machine B_BUS_REQ input is true.
+	 */
+	if (udc->gadget.b_hnp_enable)
+		OTG_CTRL_REG = (OTG_CTRL_REG | OTG_B_HNPEN | OTG_B_BUSREQ)
+				& ~OTG_PULLUP;
+}
+
+static void ep0_irq(struct omap_udc *udc, u16 irq_src)
+{
+	struct omap_ep	*ep0 = &udc->ep[0];
+	struct omap_req	*req = 0;
+
+	ep0->irqs++;
+
+	/* Clear any pending requests and then scrub any rx/tx state
+	 * before starting to handle the SETUP request.
+	 */
+	if (irq_src & UDC_SETUP) {
+		u16	ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
+
+		nuke(ep0, 0);
+		if (ack) {
+			UDC_IRQ_SRC_REG = ack;
+			irq_src = UDC_SETUP;
+		}
+	}
+
+	/* IN/OUT packets mean we're in the DATA or STATUS stage.  
+	 * This driver uses only uses protocol stalls (ep0 never halts),
+	 * and if we got this far the gadget driver already had a
+	 * chance to stall.  Tries to be forgiving of host oddities.
+	 *
+	 * NOTE:  the last chance gadget drivers have to stall control
+	 * requests is during their request completion callback.
+	 */
+	if (!list_empty(&ep0->queue))
+		req = container_of(ep0->queue.next, struct omap_req, queue);
+
+	/* IN == TX to host */
+	if (irq_src & UDC_EP0_TX) {
+		int	stat;
+
+		UDC_IRQ_SRC_REG = UDC_EP0_TX;
+		UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
+		stat = UDC_STAT_FLG_REG;
+		if (stat & UDC_ACK) {
+			if (udc->ep0_in) {
+				/* write next IN packet from response,
+				 * or set up the status stage.
+				 */
+				if (req)
+					stat = write_fifo(ep0, req);
+				UDC_EP_NUM_REG = UDC_EP_DIR;
+				if (!req && udc->ep0_pending) {
+					UDC_EP_NUM_REG = UDC_EP_SEL;
+					UDC_CTRL_REG = UDC_CLR_EP;
+					UDC_CTRL_REG = UDC_SET_FIFO_EN;
+					UDC_EP_NUM_REG = 0;
+					udc->ep0_pending = 0;
+				} /* else:  6 wait states before it'll tx */
+			} else {
+				/* ack status stage of OUT transfer */
+				UDC_EP_NUM_REG = UDC_EP_DIR;
+				if (req)
+					done(ep0, req, 0);
+			}
+			req = 0;
+		} else if (stat & UDC_STALL) {
+			UDC_CTRL_REG = UDC_CLR_HALT;
+			UDC_EP_NUM_REG = UDC_EP_DIR;
+		} else {
+			UDC_EP_NUM_REG = UDC_EP_DIR;
+		}
+	}
+
+	/* OUT == RX from host */
+	if (irq_src & UDC_EP0_RX) {
+		int	stat;
+
+		UDC_IRQ_SRC_REG = UDC_EP0_RX;
+		UDC_EP_NUM_REG = UDC_EP_SEL;
+		stat = UDC_STAT_FLG_REG;
+		if (stat & UDC_ACK) {
+			if (!udc->ep0_in) {
+				stat = 0;
+				/* read next OUT packet of request, maybe
+				 * reactiviting the fifo; stall on errors.
+				 */
+				if (!req || (stat = read_fifo(ep0, req)) < 0) {
+					UDC_SYSCON2_REG = UDC_STALL_CMD;
+					udc->ep0_pending = 0;
+					stat = 0;
+				} else if (stat == 0)
+					UDC_CTRL_REG = UDC_SET_FIFO_EN;
+				UDC_EP_NUM_REG = 0;
+				
+				/* activate status stage */
+				if (stat == 1) {
+					done(ep0, req, 0);
+					/* that may have STALLed ep0... */
+					UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
+					UDC_CTRL_REG = UDC_CLR_EP;
+					UDC_CTRL_REG = UDC_SET_FIFO_EN;
+					UDC_EP_NUM_REG = UDC_EP_DIR;
+					udc->ep0_pending = 0;
+				}
+			} else {
+				/* ack status stage of IN transfer */
+				UDC_EP_NUM_REG = 0;
+				if (req)
+					done(ep0, req, 0);
+			}
+		} else if (stat & UDC_STALL) {
+			UDC_CTRL_REG = UDC_CLR_HALT;
+			UDC_EP_NUM_REG = 0;
+		} else {
+			UDC_EP_NUM_REG = 0;
+		}
+	}
+
+	/* SETUP starts all control transfers */
+	if (irq_src & UDC_SETUP) {
+		union u {
+			u16			word[4];
+			struct usb_ctrlrequest	r;
+		} u;
+		int			status = -EINVAL;
+		struct omap_ep		*ep;
+
+		/* read the (latest) SETUP message */
+		do {
+			UDC_EP_NUM_REG = UDC_SETUP_SEL;
+			/* two bytes at a time */
+			u.word[0] = UDC_DATA_REG;
+			u.word[1] = UDC_DATA_REG;
+			u.word[2] = UDC_DATA_REG;
+			u.word[3] = UDC_DATA_REG;
+			UDC_EP_NUM_REG = 0;
+		} while (UDC_IRQ_SRC_REG & UDC_SETUP);
+		le16_to_cpus (&u.r.wValue);
+		le16_to_cpus (&u.r.wIndex);
+		le16_to_cpus (&u.r.wLength);
+
+		/* Delegate almost all control requests to the gadget driver,
+		 * except for a handful of ch9 status/feature requests that
+		 * hardware doesn't autodecode _and_ the gadget API hides.
+		 */
+		udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+		udc->ep0_set_config = 0;
+		udc->ep0_pending = 1;
+		ep0->stopped = 0;
+		ep0->ackwait = 0;
+		switch (u.r.bRequest) {
+		case USB_REQ_SET_CONFIGURATION:
+			/* udc needs to know when ep != 0 is valid */
+			if (u.r.bRequestType != USB_RECIP_DEVICE)
+				goto delegate;
+			if (u.r.wLength != 0)
+				goto do_stall;
+			udc->ep0_set_config = 1;
+			udc->ep0_reset_config = (u.r.wValue == 0);
+			VDBG("set config %d\n", u.r.wValue);
+
+			/* update udc NOW since gadget driver may start
+			 * queueing requests immediately; clear config
+			 * later if it fails the request.
+			 */
+			if (udc->ep0_reset_config)
+				UDC_SYSCON2_REG = UDC_CLR_CFG;
+			else
+				UDC_SYSCON2_REG = UDC_DEV_CFG;
+			update_otg(udc);
+			goto delegate;
+		case USB_REQ_CLEAR_FEATURE:
+			/* clear endpoint halt */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (u.r.wValue != USB_ENDPOINT_HALT
+					|| u.r.wLength != 0)
+				goto do_stall;
+			ep = &udc->ep[u.r.wIndex & 0xf];
+			if (ep != ep0) {
+				if (u.r.wIndex & USB_DIR_IN)
+					ep += 16;
+				if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+						|| !ep->desc)
+					goto do_stall;
+				use_ep(ep, 0);
+				UDC_CTRL_REG = UDC_RESET_EP;
+				ep->ackwait = 0;
+				if (!(ep->bEndpointAddress & USB_DIR_IN)) {
+					UDC_CTRL_REG = UDC_SET_FIFO_EN;
+					ep->ackwait = 1 + ep->double_buf;
+				}
+			}
+			VDBG("%s halt cleared by host\n", ep->name);
+			goto ep0out_status_stage;
+		case USB_REQ_SET_FEATURE:
+			/* set endpoint halt */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (u.r.wValue != USB_ENDPOINT_HALT
+					|| u.r.wLength != 0)
+				goto do_stall;
+			ep = &udc->ep[u.r.wIndex & 0xf];
+			if (u.r.wIndex & USB_DIR_IN)
+				ep += 16;
+			if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+					|| ep == ep0 || !ep->desc)
+				goto do_stall;
+			if (use_dma && ep->has_dma) {
+				/* this has rude side-effects (aborts) and
+				 * can't really work if DMA-IN is active
+				 */
+				DBG("%s host set_halt, NYET \n", ep->name);
+				goto do_stall;
+			}
+			use_ep(ep, 0);
+			/* can't halt if fifo isn't empty... */
+			UDC_CTRL_REG = UDC_CLR_EP;
+			UDC_CTRL_REG = UDC_SET_HALT;
+			VDBG("%s halted by host\n", ep->name);
+ep0out_status_stage:
+			status = 0;
+			UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
+			UDC_CTRL_REG = UDC_CLR_EP;
+			UDC_CTRL_REG = UDC_SET_FIFO_EN;
+			UDC_EP_NUM_REG = UDC_EP_DIR;
+			udc->ep0_pending = 0;
+			break;
+		case USB_REQ_GET_STATUS:
+			/* return interface status.  if we were pedantic,
+			 * we'd detect non-existent interfaces, and stall.
+			 */
+			if (u.r.bRequestType
+					!= (USB_DIR_IN|USB_RECIP_INTERFACE))
+				goto delegate;
+			/* return two zero bytes */
+			UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
+			UDC_DATA_REG = 0;
+			UDC_CTRL_REG = UDC_SET_FIFO_EN;
+			UDC_EP_NUM_REG = UDC_EP_DIR;
+			status = 0;
+			VDBG("GET_STATUS, interface %d\n", u.r.wIndex);
+			/* next, status stage */
+			break;
+		default:
+delegate:
+			/* activate the ep0out fifo right away */
+			if (!udc->ep0_in && u.r.wLength) {
+				UDC_EP_NUM_REG = 0;
+				UDC_CTRL_REG = UDC_SET_FIFO_EN;
+			}
+
+			/* gadget drivers see class/vendor specific requests,
+			 * {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
+			 * and more
+			 */
+			VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
+				u.r.bRequestType, u.r.bRequest,
+				u.r.wValue, u.r.wIndex, u.r.wLength);
+
+			/* The gadget driver may return an error here,
+			 * causing an immediate protocol stall.
+			 *
+			 * Else it must issue a response, either queueing a
+			 * response buffer for the DATA stage, or halting ep0
+			 * (causing a protocol stall, not a real halt).  A
+			 * zero length buffer means no DATA stage.
+			 *
+			 * It's fine to issue that response after the setup()
+			 * call returns, and this IRQ was handled.
+			 */
+			udc->ep0_setup = 1;
+			spin_unlock(&udc->lock);
+			status = udc->driver->setup (&udc->gadget, &u.r);
+			spin_lock(&udc->lock);
+			udc->ep0_setup = 0;
+		}
+
+		if (status < 0) {
+do_stall:
+			VDBG("req %02x.%02x protocol STALL; stat %d\n",
+					u.r.bRequestType, u.r.bRequest, status);
+			if (udc->ep0_set_config) {
+				if (udc->ep0_reset_config)
+					WARN("error resetting config?\n");
+				else
+					UDC_SYSCON2_REG = UDC_CLR_CFG;
+			}
+			UDC_SYSCON2_REG = UDC_STALL_CMD;
+			udc->ep0_pending = 0;
+		}
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
+
+static void devstate_irq(struct omap_udc *udc, u16 irq_src)
+{
+	u16	devstat, change;
+
+	devstat = UDC_DEVSTAT_REG;
+	change = devstat ^ udc->devstat;
+	udc->devstat = devstat;
+
+	if (change & (UDC_USB_RESET|UDC_ATT)) {
+		udc_quiesce(udc);
+
+		if (change & UDC_ATT) {
+			/* driver for any external transceiver will
+			 * have called omap_vbus_session() already
+			 */
+			if (devstat & UDC_ATT) {
+				udc->gadget.speed = USB_SPEED_FULL;
+				VDBG("connect\n");
+				if (!udc->transceiver)
+					pullup_enable(udc);
+				// if (driver->connect) call it
+			} else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+				udc->gadget.speed = USB_SPEED_UNKNOWN;
+				if (!udc->transceiver)
+					pullup_disable(udc);
+				DBG("disconnect, gadget %s\n",
+					udc->driver->driver.name);
+				if (udc->driver->disconnect) {
+					spin_unlock(&udc->lock);
+					udc->driver->disconnect(&udc->gadget);
+					spin_lock(&udc->lock);
+				}
+			}
+			change &= ~UDC_ATT;
+		}
+
+		if (change & UDC_USB_RESET) {
+			if (devstat & UDC_USB_RESET) {
+				VDBG("RESET=1\n");
+			} else {
+				udc->gadget.speed = USB_SPEED_FULL;
+				INFO("USB reset done, gadget %s\n",
+					udc->driver->driver.name);
+				/* ep0 traffic is legal from now on */
+				UDC_IRQ_EN_REG = UDC_DS_CHG_IE | UDC_EP0_IE;
+			}
+			change &= ~UDC_USB_RESET;
+		}
+	}
+	if (change & UDC_SUS) {
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+			// FIXME tell isp1301 to suspend/resume (?)
+			if (devstat & UDC_SUS) {
+				VDBG("suspend\n");
+				update_otg(udc);
+				/* HNP could be under way already */
+				if (udc->gadget.speed == USB_SPEED_FULL
+						&& udc->driver->suspend) {
+					spin_unlock(&udc->lock);
+					udc->driver->suspend(&udc->gadget);
+					spin_lock(&udc->lock);
+				}
+			} else {
+				VDBG("resume\n");
+				if (udc->gadget.speed == USB_SPEED_FULL
+						&& udc->driver->resume) {
+					spin_unlock(&udc->lock);
+					udc->driver->resume(&udc->gadget);
+					spin_lock(&udc->lock);
+				}
+			}
+		}
+		change &= ~UDC_SUS;
+	}
+	if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
+		update_otg(udc);
+		change &= ~OTG_FLAGS;
+	}
+
+	change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
+	if (change)
+		VDBG("devstat %03x, ignore change %03x\n",
+			devstat,  change);
+
+	UDC_IRQ_SRC_REG = UDC_DS_CHG;
+}
+
+static irqreturn_t
+omap_udc_irq(int irq, void *_udc, struct pt_regs *r)
+{
+	struct omap_udc	*udc = _udc;
+	u16		irq_src;
+	irqreturn_t	status = IRQ_NONE;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	irq_src = UDC_IRQ_SRC_REG;
+
+	/* Device state change (usb ch9 stuff) */
+	if (irq_src & UDC_DS_CHG) {
+		devstate_irq(_udc, irq_src);
+		status = IRQ_HANDLED;
+		irq_src &= ~UDC_DS_CHG;
+	}
+
+	/* EP0 control transfers */
+	if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
+		ep0_irq(_udc, irq_src);
+		status = IRQ_HANDLED;
+		irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
+	}
+
+	/* DMA transfer completion */
+	if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
+		dma_irq(_udc, irq_src);
+		status = IRQ_HANDLED;
+		irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
+	}
+
+	irq_src &= ~(UDC_SOF|UDC_EPN_TX|UDC_EPN_RX);
+	if (irq_src)
+		DBG("udc_irq, unhandled %03x\n", irq_src);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return status;
+}
+
+/* workaround for seemingly-lost IRQs for RX ACKs... */
+#define PIO_OUT_TIMEOUT	(jiffies + HZ/3)
+#define HALF_FULL(f)	(!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
+
+static void pio_out_timer(unsigned long _ep)
+{
+	struct omap_ep	*ep = (void *) _ep;
+	unsigned long	flags;
+	u16		stat_flg;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	if (!list_empty(&ep->queue) && ep->ackwait) {
+		use_ep(ep, 0);
+		stat_flg = UDC_STAT_FLG_REG;
+
+		if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
+				|| (ep->double_buf && HALF_FULL(stat_flg)))) {
+			struct omap_req	*req;
+
+			VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			UDC_EP_NUM_REG = ep->bEndpointAddress | UDC_EP_SEL;
+			(void) read_fifo(ep, req);
+			UDC_EP_NUM_REG = ep->bEndpointAddress;
+			UDC_CTRL_REG = UDC_SET_FIFO_EN;
+			ep->ackwait = 1 + ep->double_buf;
+		}
+	}
+	mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+}
+
+static irqreturn_t
+omap_udc_pio_irq(int irq, void *_dev, struct pt_regs *r)
+{
+	u16		epn_stat, irq_src;
+	irqreturn_t	status = IRQ_NONE;
+	struct omap_ep	*ep;
+	int		epnum;
+	struct omap_udc	*udc = _dev;
+	struct omap_req	*req;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	epn_stat = UDC_EPN_STAT_REG;
+	irq_src = UDC_IRQ_SRC_REG;
+
+	/* handle OUT first, to avoid some wasteful NAKs */
+	if (irq_src & UDC_EPN_RX) {
+		epnum = (epn_stat >> 8) & 0x0f;
+		UDC_IRQ_SRC_REG = UDC_EPN_RX;
+		status = IRQ_HANDLED;
+		ep = &udc->ep[epnum];
+		ep->irqs++;
+
+		UDC_EP_NUM_REG = epnum | UDC_EP_SEL;
+		ep->fnf = 0;
+		if ((UDC_STAT_FLG_REG & UDC_ACK)) {
+			ep->ackwait--;
+			if (!list_empty(&ep->queue)) {
+				int stat;
+				req = container_of(ep->queue.next,
+						struct omap_req, queue);
+				stat = read_fifo(ep, req);
+				if (!ep->double_buf)
+					ep->fnf = 1;
+			}
+		}
+		/* min 6 clock delay before clearing EP_SEL ... */
+		epn_stat = UDC_EPN_STAT_REG;
+		epn_stat = UDC_EPN_STAT_REG;
+		UDC_EP_NUM_REG = epnum;
+
+		/* enabling fifo _after_ clearing ACK, contrary to docs,
+		 * reduces lossage; timer still needed though (sigh).
+		 */
+		if (ep->fnf) {
+			UDC_CTRL_REG = UDC_SET_FIFO_EN;
+			ep->ackwait = 1 + ep->double_buf;
+		}
+		mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
+	}
+
+	/* then IN transfers */
+	else if (irq_src & UDC_EPN_TX) {
+		epnum = epn_stat & 0x0f;
+		UDC_IRQ_SRC_REG = UDC_EPN_TX;
+		status = IRQ_HANDLED;
+		ep = &udc->ep[16 + epnum];
+		ep->irqs++;
+
+		UDC_EP_NUM_REG = epnum | UDC_EP_DIR | UDC_EP_SEL;
+		if ((UDC_STAT_FLG_REG & UDC_ACK)) {
+			ep->ackwait = 0;
+			if (!list_empty(&ep->queue)) {
+				req = container_of(ep->queue.next,
+						struct omap_req, queue);
+				(void) write_fifo(ep, req);
+			}
+		}
+		/* min 6 clock delay before clearing EP_SEL ... */
+		epn_stat = UDC_EPN_STAT_REG;
+		epn_stat = UDC_EPN_STAT_REG;
+		UDC_EP_NUM_REG = epnum | UDC_EP_DIR;
+		/* then 6 clocks before it'd tx */
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return status;
+}
+
+#ifdef	USE_ISO
+static irqreturn_t
+omap_udc_iso_irq(int irq, void *_dev, struct pt_regs *r)
+{
+	struct omap_udc	*udc = _dev;
+	struct omap_ep	*ep;
+	int		pending = 0;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* handle all non-DMA ISO transfers */
+	list_for_each_entry (ep, &udc->iso, iso) {
+		u16		stat;
+		struct omap_req	*req;
+
+		if (ep->has_dma || list_empty(&ep->queue))
+			continue;
+		req = list_entry(ep->queue.next, struct omap_req, queue);
+
+		use_ep(ep, UDC_EP_SEL);
+		stat = UDC_STAT_FLG_REG;
+
+		/* NOTE: like the other controller drivers, this isn't
+		 * currently reporting lost or damaged frames.
+		 */
+		if (ep->bEndpointAddress & USB_DIR_IN) {
+			if (stat & UDC_MISS_IN)
+				/* done(ep, req, -EPROTO) */;
+			else
+				write_fifo(ep, req);
+		} else {
+			int	status = 0;
+
+			if (stat & UDC_NO_RXPACKET)
+				status = -EREMOTEIO;
+			else if (stat & UDC_ISO_ERR)
+				status = -EILSEQ;
+			else if (stat & UDC_DATA_FLUSH)
+				status = -ENOSR;
+
+			if (status)
+				/* done(ep, req, status) */;
+			else
+				read_fifo(ep, req);
+		}
+		deselect_ep();
+		/* 6 wait states before next EP */
+
+		ep->irqs++;
+		if (!list_empty(&ep->queue))
+			pending = 1;
+	}
+	if (!pending)
+		UDC_IRQ_EN_REG &= ~UDC_SOF_IE;
+	UDC_IRQ_SRC_REG = UDC_SOF;
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return IRQ_HANDLED;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static struct omap_udc *udc;
+
+int usb_gadget_register_driver (struct usb_gadget_driver *driver)
+{
+	int		status = -ENODEV;
+	struct omap_ep	*ep;
+	unsigned long	flags;
+
+	/* basic sanity tests */
+	if (!udc)
+		return -ENODEV;
+	if (!driver
+			// FIXME if otg, check:  driver->is_otg
+			|| driver->speed < USB_SPEED_FULL
+			|| !driver->bind
+			|| !driver->unbind
+			|| !driver->setup)
+		return -EINVAL;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (udc->driver) {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return -EBUSY;
+	}
+
+	/* reset state */
+	list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
+		ep->irqs = 0;
+		if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+			continue;
+		use_ep(ep, 0);
+		UDC_CTRL_REG = UDC_SET_HALT;
+	}
+	udc->ep0_pending = 0;
+	udc->ep[0].irqs = 0;
+	udc->softconnect = 1;
+
+	/* hook up the driver */
+	driver->driver.bus = 0;
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	status = driver->bind (&udc->gadget);
+	if (status) {
+		DBG("bind to %s --> %d\n", driver->driver.name, status);
+		udc->gadget.dev.driver = 0;
+		udc->driver = 0;
+		goto done;
+	}
+	DBG("bound to driver %s\n", driver->driver.name);
+
+	UDC_IRQ_SRC_REG = UDC_IRQ_SRC_MASK;
+
+	/* connect to bus through transceiver */
+	if (udc->transceiver) {
+		status = otg_set_peripheral(udc->transceiver, &udc->gadget);
+		if (status < 0) {
+			ERR("can't bind to transceiver\n");
+			driver->unbind (&udc->gadget);
+			udc->gadget.dev.driver = 0;
+			udc->driver = 0;
+			goto done;
+		}
+	} else {
+		if (can_pullup(udc))
+			pullup_enable (udc);
+		else
+			pullup_disable (udc);
+	}
+
+	/* boards that don't have VBUS sensing can't autogate 48MHz;
+	 * can't enter deep sleep while a gadget driver is active.
+	 */
+	if (machine_is_omap_innovator() || machine_is_omap_osk())
+		omap_vbus_session(&udc->gadget, 1);
+
+done:
+	return status;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+{
+	unsigned long	flags;
+	int		status = -ENODEV;
+
+	if (!udc)
+		return -ENODEV;
+	if (!driver || driver != udc->driver)
+		return -EINVAL;
+
+	if (machine_is_omap_innovator() || machine_is_omap_osk())
+		omap_vbus_session(&udc->gadget, 0);
+
+	if (udc->transceiver)
+		(void) otg_set_peripheral(udc->transceiver, 0);
+	else
+		pullup_disable(udc);
+
+	spin_lock_irqsave(&udc->lock, flags);
+	udc_quiesce(udc);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	driver->unbind(&udc->gadget);
+	udc->gadget.dev.driver = 0;
+	udc->driver = 0;
+
+
+	DBG("unregistered driver '%s'\n", driver->driver.name);
+	return status;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char proc_filename[] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
+{
+	u16		stat_flg;
+	struct omap_req	*req;
+	char		buf[20];
+
+	use_ep(ep, 0);
+
+	if (use_dma && ep->has_dma)
+		snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
+			(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
+			ep->dma_channel - 1, ep->lch);
+	else
+		buf[0] = 0;
+
+	stat_flg = UDC_STAT_FLG_REG;
+	seq_printf(s,
+		"\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
+		ep->name, buf,
+		ep->double_buf ? "dbuf " : "",
+		({char *s; switch(ep->ackwait){
+		case 0: s = ""; break;
+		case 1: s = "(ackw) "; break;
+		case 2: s = "(ackw2) "; break;
+		default: s = "(?) "; break;
+		} s;}),
+		ep->irqs, stat_flg,
+		(stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
+		(stat_flg & UDC_MISS_IN) ? "miss_in " : "",
+		(stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
+		(stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
+		(stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
+		(stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
+		(stat_flg & UDC_EP_HALTED) ? "HALT " : "",
+		(stat_flg & UDC_STALL) ? "STALL " : "",
+		(stat_flg & UDC_NAK) ? "NAK " : "",
+		(stat_flg & UDC_ACK) ? "ACK " : "",
+		(stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
+		(stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
+		(stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
+
+	if (list_empty (&ep->queue))
+		seq_printf(s, "\t(queue empty)\n");
+	else
+		list_for_each_entry (req, &ep->queue, queue) {
+			unsigned	length = req->req.actual;
+
+			if (use_dma && buf[0]) {
+				length += ((ep->bEndpointAddress & USB_DIR_IN)
+						? dma_src_len : dma_dest_len)
+					(ep, req->req.dma + length);
+				buf[0] = 0;
+			}
+			seq_printf(s, "\treq %p len %d/%d buf %p\n",
+					&req->req, length,
+					req->req.length, req->req.buf);
+		}
+}
+
+static char *trx_mode(unsigned m, int enabled)
+{
+	switch (m) {
+	case 0:		return enabled ? "*6wire" : "unused";
+	case 1:		return "4wire";
+	case 2:		return "3wire";
+	case 3: 	return "6wire";
+	default:	return "unknown";
+	}
+}
+
+static int proc_otg_show(struct seq_file *s)
+{
+	u32		tmp;
+	u32		trans;
+
+	tmp = OTG_REV_REG;
+	trans = USB_TRANSCEIVER_CTRL_REG;
+	seq_printf(s, "OTG rev %d.%d, transceiver_ctrl %03x\n",
+		tmp >> 4, tmp & 0xf, trans);
+	tmp = OTG_SYSCON_1_REG;
+	seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
+			FOURBITS "\n", tmp,
+		trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
+		trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
+		(USB0_TRX_MODE(tmp) == 0)
+			? "internal"
+			: trx_mode(USB0_TRX_MODE(tmp), 1),
+		(tmp & OTG_IDLE_EN) ? " !otg" : "",
+		(tmp & HST_IDLE_EN) ? " !host" : "",
+		(tmp & DEV_IDLE_EN) ? " !dev" : "",
+		(tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
+	tmp = OTG_SYSCON_2_REG;
+	seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
+			" b_ase_brst=%d hmc=%d\n", tmp,
+		(tmp & OTG_EN) ? " otg_en" : "",
+		(tmp & USBX_SYNCHRO) ? " synchro" : "",
+		// much more SRP stuff
+		(tmp & SRP_DATA) ? " srp_data" : "",
+		(tmp & SRP_VBUS) ? " srp_vbus" : "",
+		(tmp & OTG_PADEN) ? " otg_paden" : "",
+		(tmp & HMC_PADEN) ? " hmc_paden" : "",
+		(tmp & UHOST_EN) ? " uhost_en" : "",
+		(tmp & HMC_TLLSPEED) ? " tllspeed" : "",
+		(tmp & HMC_TLLATTACH) ? " tllattach" : "",
+		B_ASE_BRST(tmp),
+		OTG_HMC(tmp));
+	tmp = OTG_CTRL_REG;
+	seq_printf(s, "otg_ctrl    %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
+		(tmp & OTG_ASESSVLD) ? " asess" : "",
+		(tmp & OTG_BSESSEND) ? " bsess_end" : "",
+		(tmp & OTG_BSESSVLD) ? " bsess" : "",
+		(tmp & OTG_VBUSVLD) ? " vbus" : "",
+		(tmp & OTG_ID) ? " id" : "",
+		(tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
+		(tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
+		(tmp & OTG_A_BUSREQ) ? " a_bus" : "",
+		(tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
+		(tmp & OTG_B_BUSREQ) ? " b_bus" : "",
+		(tmp & OTG_BUSDROP) ? " busdrop" : "",
+		(tmp & OTG_PULLDOWN) ? " down" : "",
+		(tmp & OTG_PULLUP) ? " up" : "",
+		(tmp & OTG_DRV_VBUS) ? " drv" : "",
+		(tmp & OTG_PD_VBUS) ? " pd_vb" : "",
+		(tmp & OTG_PU_VBUS) ? " pu_vb" : "",
+		(tmp & OTG_PU_ID) ? " pu_id" : ""
+		);
+	tmp = OTG_IRQ_EN_REG;
+	seq_printf(s, "otg_irq_en  %04x" "\n", tmp);
+	tmp = OTG_IRQ_SRC_REG;
+	seq_printf(s, "otg_irq_src %04x" "\n", tmp);
+	tmp = OTG_OUTCTRL_REG;
+	seq_printf(s, "otg_outctrl %04x" "\n", tmp);
+	tmp = OTG_TEST_REG;
+	seq_printf(s, "otg_test    %04x" "\n", tmp);
+}
+
+static int proc_udc_show(struct seq_file *s, void *_)
+{
+	u32		tmp;
+	struct omap_ep	*ep;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	seq_printf(s, "%s, version: " DRIVER_VERSION
+#ifdef	USE_ISO
+		" (iso)"
+#endif
+		"%s\n",
+		driver_desc,
+		use_dma ?  " (dma)" : "");
+
+	tmp = UDC_REV_REG & 0xff; 
+	seq_printf(s,
+		"UDC rev %d.%d, fifo mode %d, gadget %s\n"
+		"hmc %d, transceiver %s\n",
+		tmp >> 4, tmp & 0xf,
+		fifo_mode,
+		udc->driver ? udc->driver->driver.name : "(none)",
+		HMC,
+		udc->transceiver ? udc->transceiver->label : "(none)");
+	seq_printf(s, "ULPD control %04x req %04x status %04x\n",
+		__REG16(ULPD_CLOCK_CTRL),
+		__REG16(ULPD_SOFT_REQ),
+		__REG16(ULPD_STATUS_REQ));
+
+	/* OTG controller registers */
+	if (!cpu_is_omap15xx())
+		proc_otg_show(s);
+
+	tmp = UDC_SYSCON1_REG;
+	seq_printf(s, "\nsyscon1     %04x" EIGHTBITS "\n", tmp,
+		(tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
+		(tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
+		(tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
+		(tmp & UDC_NAK_EN) ? " nak" : "",
+		(tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
+		(tmp & UDC_SELF_PWR) ? " self_pwr" : "",
+		(tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
+		(tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
+	// syscon2 is write-only
+
+	/* UDC controller registers */
+	if (!(tmp & UDC_PULLUP_EN)) {
+		seq_printf(s, "(suspended)\n");
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return 0;
+	}
+
+	tmp = UDC_DEVSTAT_REG;
+	seq_printf(s, "devstat     %04x" EIGHTBITS "%s%s\n", tmp,
+		(tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
+		(tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
+		(tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
+		(tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
+		(tmp & UDC_USB_RESET) ? " usb_reset" : "",
+		(tmp & UDC_SUS) ? " SUS" : "",
+		(tmp & UDC_CFG) ? " CFG" : "",
+		(tmp & UDC_ADD) ? " ADD" : "",
+		(tmp & UDC_DEF) ? " DEF" : "",
+		(tmp & UDC_ATT) ? " ATT" : "");
+	seq_printf(s, "sof         %04x\n", UDC_SOF_REG);
+	tmp = UDC_IRQ_EN_REG;
+	seq_printf(s, "irq_en      %04x" FOURBITS "%s\n", tmp,
+		(tmp & UDC_SOF_IE) ? " sof" : "",
+		(tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
+		(tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
+		(tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
+		(tmp & UDC_EP0_IE) ? " ep0" : "");
+	tmp = UDC_IRQ_SRC_REG;
+	seq_printf(s, "irq_src     %04x" EIGHTBITS "%s%s\n", tmp,
+		(tmp & UDC_TXN_DONE) ? " txn_done" : "",
+		(tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
+		(tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
+		(tmp & UDC_SOF) ? " sof" : "",
+		(tmp & UDC_EPN_RX) ? " epn_rx" : "",
+		(tmp & UDC_EPN_TX) ? " epn_tx" : "",
+		(tmp & UDC_DS_CHG) ? " ds_chg" : "",
+		(tmp & UDC_SETUP) ? " setup" : "",
+		(tmp & UDC_EP0_RX) ? " ep0out" : "",
+		(tmp & UDC_EP0_TX) ? " ep0in" : "");
+	if (use_dma) {
+		unsigned i;
+
+		tmp = UDC_DMA_IRQ_EN_REG;
+		seq_printf(s, "dma_irq_en  %04x%s" EIGHTBITS "\n", tmp,
+			(tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
+			(tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
+			(tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
+
+			(tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
+			(tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
+			(tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
+
+			(tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
+			(tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
+			(tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
+
+		tmp = UDC_RXDMA_CFG_REG;
+		seq_printf(s, "rxdma_cfg   %04x\n", tmp);
+		if (tmp) {
+			for (i = 0; i < 3; i++) {
+				if ((tmp & (0x0f << (i * 4))) == 0)
+					continue;
+				seq_printf(s, "rxdma[%d]    %04x\n", i,
+						UDC_RXDMA_REG(i + 1));
+			}
+		}
+		tmp = UDC_TXDMA_CFG_REG;
+		seq_printf(s, "txdma_cfg   %04x\n", tmp);
+		if (tmp) {
+			for (i = 0; i < 3; i++) {
+				if (!(tmp & (0x0f << (i * 4))))
+					continue;
+				seq_printf(s, "txdma[%d]    %04x\n", i,
+						UDC_TXDMA_REG(i + 1));
+			}
+		}
+	}
+
+	tmp = UDC_DEVSTAT_REG;
+	if (tmp & UDC_ATT) {
+		proc_ep_show(s, &udc->ep[0]);
+		if (tmp & UDC_ADD) {
+			list_for_each_entry (ep, &udc->gadget.ep_list,
+					ep.ep_list) {
+				if (ep->desc)
+					proc_ep_show(s, ep);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int proc_udc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_udc_show, 0);
+}
+
+static struct file_operations proc_ops = {
+	.open		= proc_udc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void create_proc_file(void)
+{
+	struct proc_dir_entry *pde;
+
+	pde = create_proc_entry (proc_filename, 0, NULL);
+	if (pde)
+		pde->proc_fops = &proc_ops;
+}
+
+static void remove_proc_file(void)
+{
+	remove_proc_entry(proc_filename, 0);
+}
+
+#else
+
+static inline void create_proc_file(void) {}
+static inline void remove_proc_file(void) {}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* Before this controller can enumerate, we need to pick an endpoint
+ * configuration, or "fifo_mode"  That involves allocating 2KB of packet
+ * buffer space among the endpoints we'll be operating.
+ */
+static unsigned __init
+omap_ep_setup(char *name, u8 addr, u8 type,
+		unsigned buf, unsigned maxp, int dbuf)
+{
+	struct omap_ep	*ep;
+	u16		epn_rxtx = 0;
+
+	/* OUT endpoints first, then IN */
+	ep = &udc->ep[addr & 0xf];
+	if (addr & USB_DIR_IN)
+		ep += 16;
+
+	/* in case of ep init table bugs */
+	BUG_ON(ep->name[0]);
+
+	/* chip setup ... bit values are same for IN, OUT */
+	if (type == USB_ENDPOINT_XFER_ISOC) {
+		switch (maxp) {
+		case 8:		epn_rxtx = 0 << 12; break;
+		case 16:	epn_rxtx = 1 << 12; break;
+		case 32:	epn_rxtx = 2 << 12; break;
+		case 64:	epn_rxtx = 3 << 12; break;
+		case 128:	epn_rxtx = 4 << 12; break;
+		case 256:	epn_rxtx = 5 << 12; break;
+		case 512:	epn_rxtx = 6 << 12; break;
+		default:	BUG();
+		}
+		epn_rxtx |= UDC_EPN_RX_ISO;
+		dbuf = 1;
+	} else {
+		/* double-buffering "not supported" on 15xx,
+		 * and ignored for PIO-IN on 16xx
+		 */
+		if (!use_dma || cpu_is_omap15xx())
+			dbuf = 0;
+
+		switch (maxp) {
+		case 8:		epn_rxtx = 0 << 12; break;
+		case 16:	epn_rxtx = 1 << 12; break;
+		case 32:	epn_rxtx = 2 << 12; break;
+		case 64:	epn_rxtx = 3 << 12; break;
+		default:	BUG();
+		}
+		if (dbuf && addr)
+			epn_rxtx |= UDC_EPN_RX_DB;
+		init_timer(&ep->timer);
+		ep->timer.function = pio_out_timer;
+		ep->timer.data = (unsigned long) ep;
+	}
+	if (addr)
+		epn_rxtx |= UDC_EPN_RX_VALID;
+	BUG_ON(buf & 0x07);
+	epn_rxtx |= buf >> 3;
+
+	DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
+		name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
+
+	if (addr & USB_DIR_IN)
+		UDC_EP_TX_REG(addr & 0xf) = epn_rxtx;
+	else
+		UDC_EP_RX_REG(addr) = epn_rxtx;
+
+	/* next endpoint's buffer starts after this one's */
+	buf += maxp;
+	if (dbuf)
+		buf += maxp;
+	BUG_ON(buf > 2048);
+
+	/* set up driver data structures */
+	BUG_ON(strlen(name) >= sizeof ep->name);
+	strlcpy(ep->name, name, sizeof ep->name);
+	INIT_LIST_HEAD(&ep->queue);
+	INIT_LIST_HEAD(&ep->iso);
+	ep->bEndpointAddress = addr;
+	ep->bmAttributes = type;
+	ep->double_buf = dbuf;
+	ep->udc = udc; 
+
+	ep->ep.name = ep->name;
+	ep->ep.ops = &omap_ep_ops;
+	ep->ep.maxpacket = ep->maxpacket = maxp;
+	list_add_tail (&ep->ep.ep_list, &udc->gadget.ep_list);
+
+	return buf;
+}
+
+static void omap_udc_release(struct device *dev)
+{
+	complete(udc->done);
+	kfree (udc);
+	udc = 0;
+}
+
+static int __init
+omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
+{
+	unsigned	tmp, buf;
+
+	/* abolish any previous hardware state */
+	UDC_SYSCON1_REG = 0;
+	UDC_IRQ_EN_REG = 0;
+	UDC_IRQ_SRC_REG = UDC_IRQ_SRC_MASK;
+	UDC_DMA_IRQ_EN_REG = 0;
+	UDC_RXDMA_CFG_REG = 0;
+	UDC_TXDMA_CFG_REG = 0;
+
+	/* UDC_PULLUP_EN gates the chip clock */
+	// OTG_SYSCON_1_REG |= DEV_IDLE_EN;
+
+	udc = kmalloc (sizeof *udc, SLAB_KERNEL);
+	if (!udc)
+		return -ENOMEM;
+
+	memset(udc, 0, sizeof *udc);
+	spin_lock_init (&udc->lock);
+
+	udc->gadget.ops = &omap_gadget_ops;
+	udc->gadget.ep0 = &udc->ep[0].ep;
+	INIT_LIST_HEAD(&udc->gadget.ep_list);
+	INIT_LIST_HEAD(&udc->iso);
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	udc->gadget.name = driver_name;
+
+	device_initialize(&udc->gadget.dev);
+	strcpy (udc->gadget.dev.bus_id, "gadget");
+	udc->gadget.dev.release = omap_udc_release;
+	udc->gadget.dev.parent = &odev->dev;
+	if (use_dma)
+		udc->gadget.dev.dma_mask = odev->dev.dma_mask;
+
+	udc->transceiver = xceiv;
+
+	/* ep0 is special; put it right after the SETUP buffer */
+	buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
+			8 /* after SETUP */, 64 /* maxpacket */, 0);
+	list_del_init(&udc->ep[0].ep.ep_list);
+
+	/* initially disable all non-ep0 endpoints */
+	for (tmp = 1; tmp < 15; tmp++) {
+		UDC_EP_RX_REG(tmp) = 0;
+		UDC_EP_TX_REG(tmp) = 0;
+	}
+
+#define OMAP_BULK_EP(name,addr) \
+	buf = omap_ep_setup(name "-bulk", addr, \
+			USB_ENDPOINT_XFER_BULK, buf, 64, 1);
+#define OMAP_INT_EP(name,addr, maxp) \
+	buf = omap_ep_setup(name "-int", addr, \
+			USB_ENDPOINT_XFER_INT, buf, maxp, 0);
+#define OMAP_ISO_EP(name,addr, maxp) \
+	buf = omap_ep_setup(name "-iso", addr, \
+			USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
+
+	switch (fifo_mode) {
+	case 0:
+		OMAP_BULK_EP("ep1in",  USB_DIR_IN  | 1);
+		OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+		OMAP_INT_EP("ep3in",   USB_DIR_IN  | 3, 16);
+		break;
+	case 1:
+		OMAP_BULK_EP("ep1in",  USB_DIR_IN  | 1);
+		OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+		OMAP_BULK_EP("ep3in",  USB_DIR_IN  | 3);
+		OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
+
+		OMAP_BULK_EP("ep5in",  USB_DIR_IN  | 5);
+		OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
+		OMAP_BULK_EP("ep6in",  USB_DIR_IN  | 6);
+		OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
+
+		OMAP_BULK_EP("ep7in",  USB_DIR_IN  | 7);
+		OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
+		OMAP_BULK_EP("ep8in",  USB_DIR_IN  | 8);
+		OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
+
+		OMAP_INT_EP("ep9in",   USB_DIR_IN  | 9, 16);
+		OMAP_INT_EP("ep10out", USB_DIR_IN  | 10, 16);
+		OMAP_INT_EP("ep11in",  USB_DIR_IN  | 9, 16);
+		OMAP_INT_EP("ep12out", USB_DIR_IN  | 10, 16);
+		break;
+
+#ifdef	USE_ISO
+	case 2:			/* mixed iso/bulk */
+		OMAP_ISO_EP("ep1in",   USB_DIR_IN  | 1, 256);
+		OMAP_ISO_EP("ep2out",  USB_DIR_OUT | 2, 256);
+		OMAP_ISO_EP("ep3in",   USB_DIR_IN  | 3, 128);
+		OMAP_ISO_EP("ep4out",  USB_DIR_OUT | 4, 128);
+
+		OMAP_INT_EP("ep5in",   USB_DIR_IN  | 5, 16);
+
+		OMAP_BULK_EP("ep6in",  USB_DIR_IN  | 6);
+		OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
+		OMAP_INT_EP("ep8in",   USB_DIR_IN  | 8, 16);
+		break;
+	case 3:			/* mixed bulk/iso */
+		OMAP_BULK_EP("ep1in",  USB_DIR_IN  | 1);
+		OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+		OMAP_INT_EP("ep3in",   USB_DIR_IN  | 3, 16);
+
+		OMAP_BULK_EP("ep4in",  USB_DIR_IN  | 4);
+		OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
+		OMAP_INT_EP("ep6in",   USB_DIR_IN  | 6, 16);
+
+		OMAP_ISO_EP("ep7in",   USB_DIR_IN  | 7, 256);
+		OMAP_ISO_EP("ep8out",  USB_DIR_OUT | 8, 256);
+		OMAP_INT_EP("ep9in",   USB_DIR_IN  | 9, 16);
+		break;
+#endif
+
+	/* add more modes as needed */
+
+	default:
+		ERR("unsupported fifo_mode #%d\n", fifo_mode);
+		return -ENODEV;
+	}
+	UDC_SYSCON1_REG = UDC_CFG_LOCK|UDC_SELF_PWR;
+	INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
+	return 0;
+}
+
+static int __init omap_udc_probe(struct device *dev)
+{
+	struct platform_device	*odev = to_platform_device(dev);
+	int			status = -ENODEV;
+	int			hmc;
+	struct otg_transceiver	*xceiv = 0;
+	const char		*type = 0;
+	struct omap_usb_config	*config = dev->platform_data;
+
+	/* NOTE:  "knows" the order of the resources! */
+	if (!request_mem_region(odev->resource[0].start, 
+			odev->resource[0].end - odev->resource[0].start + 1,
+			driver_name)) {
+		DBG("request_mem_region failed\n");
+		return -EBUSY;
+	}
+
+	INFO("OMAP UDC rev %d.%d%s\n",
+		UDC_REV_REG >> 4, UDC_REV_REG & 0xf,
+		config->otg ? ", Mini-AB" : "");
+
+	/* use the mode given to us by board init code */
+	if (cpu_is_omap15xx()) {
+		hmc = HMC_1510;
+		type = "(unknown)";
+
+		if (machine_is_omap_innovator()) {
+			/* just set up software VBUS detect, and then
+			 * later rig it so we always report VBUS.
+			 * FIXME without really sensing VBUS, we can't
+			 * know when to turn PULLUP_EN on/off; and that
+			 * means we always "need" the 48MHz clock.
+			 */
+			u32 tmp = FUNC_MUX_CTRL_0_REG;
+
+			FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
+			tmp |= VBUS_MODE_1510;
+			tmp &= ~VBUS_CTRL_1510;
+			FUNC_MUX_CTRL_0_REG = tmp;
+		}
+	} else {
+		hmc = HMC_1610;
+		switch (hmc) {
+		case 3:
+		case 11:
+		case 16:
+		case 19:
+		case 25:
+			xceiv = otg_get_transceiver();
+			if (!xceiv) {
+				DBG("external transceiver not registered!\n");
+				if (config->otg)
+					goto cleanup0;
+				type = "(unknown external)";
+			} else
+				type = xceiv->label;
+			break;
+		case 0:			/* POWERUP DEFAULT == 0 */
+		case 4:
+		case 12:
+		case 20:
+			type = "INTEGRATED";
+			break;
+		case 21:			/* internal loopback */
+			type = "(loopback)";
+			break;
+		case 14:			/* transceiverless */
+			type = "(none)";
+			break;
+
+		default:
+			ERR("unrecognized UDC HMC mode %d\n", hmc);
+			return -ENODEV;
+		}
+	}
+	INFO("hmc mode %d, transceiver %s\n", hmc, type);
+
+	/* a "gadget" abstracts/virtualizes the controller */
+	status = omap_udc_setup(odev, xceiv);
+	if (status) {
+		goto cleanup0;
+	}
+	xceiv = 0;
+	// "udc" is now valid
+	pullup_disable(udc);
+#if	defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+	udc->gadget.is_otg = (config->otg != 0);
+#endif
+
+	/* USB general purpose IRQ:  ep0, state changes, dma, etc */
+	status = request_irq(odev->resource[1].start, omap_udc_irq,
+			SA_SAMPLE_RANDOM, driver_name, udc);
+	if (status != 0) {
+		ERR( "can't get irq %ld, err %d\n",
+			odev->resource[1].start, status);
+		goto cleanup1;
+	}
+
+	/* USB "non-iso" IRQ (PIO for all but ep0) */
+	status = request_irq(odev->resource[2].start, omap_udc_pio_irq,
+			SA_SAMPLE_RANDOM, "omap_udc pio", udc);
+	if (status != 0) {
+		ERR( "can't get irq %ld, err %d\n",
+			odev->resource[2].start, status);
+		goto cleanup2;
+	}
+#ifdef	USE_ISO
+	status = request_irq(odev->resource[3].start, omap_udc_iso_irq,
+			SA_INTERRUPT, "omap_udc iso", udc);
+	if (status != 0) {
+		ERR("can't get irq %ld, err %d\n",
+			odev->resource[3].start, status);
+		goto cleanup3;
+	}
+#endif
+
+	create_proc_file();
+	device_add(&udc->gadget.dev);
+	return 0;
+
+#ifdef	USE_ISO
+cleanup3:
+	free_irq(odev->resource[2].start, udc);
+#endif
+
+cleanup2:
+	free_irq(odev->resource[1].start, udc);
+
+cleanup1:
+	kfree (udc);
+	udc = 0;
+
+cleanup0:
+	if (xceiv)
+		put_device(xceiv->dev);
+	release_mem_region(odev->resource[0].start,
+			odev->resource[0].end - odev->resource[0].start + 1);
+	return status;
+}
+
+static int __exit omap_udc_remove(struct device *dev)
+{
+	struct platform_device	*odev = to_platform_device(dev);
+	DECLARE_COMPLETION(done);
+
+	if (!udc)
+		return -ENODEV;
+
+	udc->done = &done;
+
+	pullup_disable(udc);
+	if (udc->transceiver) {
+		put_device(udc->transceiver->dev);
+		udc->transceiver = 0;
+	}
+	UDC_SYSCON1_REG = 0;
+
+	remove_proc_file();
+
+#ifdef	USE_ISO
+	free_irq(odev->resource[3].start, udc);
+#endif
+	free_irq(odev->resource[2].start, udc);
+	free_irq(odev->resource[1].start, udc);
+
+	release_mem_region(odev->resource[0].start,
+			odev->resource[0].end - odev->resource[0].start + 1);
+
+	device_unregister(&udc->gadget.dev);
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+/* suspend/resume/wakeup from sysfs (echo > power/state) */
+
+static int omap_udc_suspend(struct device *dev, u32 state, u32 level)
+{
+	if (level != 0)
+		return 0;
+
+	DBG("suspend, state %d\n", state);
+	omap_pullup(&udc->gadget, 0);
+	udc->gadget.dev.power.power_state = 3;
+	udc->gadget.dev.parent->power.power_state = 3;
+	return 0;
+}
+
+static int omap_udc_resume(struct device *dev, u32 level)
+{
+	if (level != 0)
+		return 0;
+
+	DBG("resume + wakeup/SRP\n");
+	udc->gadget.dev.parent->power.power_state = 0;
+	udc->gadget.dev.power.power_state = 0;
+	omap_pullup(&udc->gadget, 1);
+
+	/* maybe the host would enumerate us if we nudged it */
+	msleep(100);
+	return omap_wakeup(&udc->gadget);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct device_driver udc_driver = {
+	.name		= (char *) driver_name,
+	.bus		= &platform_bus_type,
+	.probe		= omap_udc_probe,
+	.remove		= __exit_p(omap_udc_remove),
+	.suspend	= omap_udc_suspend,
+	.resume		= omap_udc_resume,
+};
+
+static int __init udc_init(void)
+{
+	INFO("%s, version: " DRIVER_VERSION
+#ifdef	USE_ISO
+		" (iso)"
+#endif
+		"%s\n", driver_desc,
+		use_dma ?  " (dma)" : "");
+	return driver_register(&udc_driver);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h
new file mode 100644
index 0000000..c9e6854
--- /dev/null
+++ b/drivers/usb/gadget/omap_udc.h
@@ -0,0 +1,208 @@
+/*
+ * omap_udc.h -- for omap 3.2 udc, with OTG support
+ *
+ * 2004 (C) Texas Instruments, Inc.
+ * 2004 (C) David Brownell
+ */
+
+/*
+ * USB device/endpoint management registers
+ */
+#define UDC_REG(offset)              __REG16(UDC_BASE + (offset))
+
+#define	UDC_REV_REG			UDC_REG(0x0)	/* Revision */
+#define	UDC_EP_NUM_REG			UDC_REG(0x4)	/* Which endpoint */
+#	define	UDC_SETUP_SEL		(1 << 6)
+#	define	UDC_EP_SEL		(1 << 5)
+#	define	UDC_EP_DIR		(1 << 4)
+	/* low 4 bits for endpoint number */
+#define	UDC_DATA_REG			UDC_REG(0x08)	/* Endpoint FIFO */
+#define	UDC_CTRL_REG			UDC_REG(0x0C)	/* Endpoint control */
+#	define	UDC_CLR_HALT		(1 << 7)
+#	define	UDC_SET_HALT		(1 << 6)
+#	define	UDC_SET_FIFO_EN		(1 << 2)
+#	define	UDC_CLR_EP		(1 << 1)
+#	define	UDC_RESET_EP		(1 << 0)
+#define	UDC_STAT_FLG_REG		UDC_REG(0x10)	/* Endpoint status */
+#	define	UDC_NO_RXPACKET		(1 << 15)
+#	define	UDC_MISS_IN		(1 << 14)
+#	define	UDC_DATA_FLUSH		(1 << 13)
+#	define	UDC_ISO_ERR		(1 << 12)
+#	define	UDC_ISO_FIFO_EMPTY	(1 << 9)
+#	define	UDC_ISO_FIFO_FULL	(1 << 8)
+#	define	UDC_EP_HALTED		(1 << 6)
+#	define	UDC_STALL		(1 << 5)
+#	define	UDC_NAK			(1 << 4)
+#	define	UDC_ACK			(1 << 3)
+#	define	UDC_FIFO_EN		(1 << 2)
+#	define	UDC_NON_ISO_FIFO_EMPTY	(1 << 1)
+#	define	UDC_NON_ISO_FIFO_FULL	(1 << 0)
+#define	UDC_RXFSTAT_REG			UDC_REG(0x14)	/* OUT bytecount */
+#define	UDC_SYSCON1_REG			UDC_REG(0x18)	/* System config 1 */
+#	define	UDC_CFG_LOCK		(1 << 8)
+#	define	UDC_DATA_ENDIAN		(1 << 7)
+#	define	UDC_DMA_ENDIAN		(1 << 6)
+#	define	UDC_NAK_EN		(1 << 4)
+#	define	UDC_AUTODECODE_DIS	(1 << 3)
+#	define	UDC_SELF_PWR		(1 << 2)
+#	define	UDC_SOFF_DIS		(1 << 1)
+#	define	UDC_PULLUP_EN		(1 << 0)
+#define	UDC_SYSCON2_REG			UDC_REG(0x1C)	/* System config 2 */
+#	define	UDC_RMT_WKP		(1 << 6)
+#	define	UDC_STALL_CMD		(1 << 5)
+#	define	UDC_DEV_CFG		(1 << 3)
+#	define	UDC_CLR_CFG		(1 << 2)
+#define	UDC_DEVSTAT_REG			UDC_REG(0x20)	/* Device status */
+#	define	UDC_B_HNP_ENABLE	(1 << 9)
+#	define	UDC_A_HNP_SUPPORT	(1 << 8)
+#	define	UDC_A_ALT_HNP_SUPPORT	(1 << 7)
+#	define	UDC_R_WK_OK		(1 << 6)
+#	define	UDC_USB_RESET		(1 << 5)
+#	define	UDC_SUS			(1 << 4)
+#	define	UDC_CFG			(1 << 3)
+#	define	UDC_ADD			(1 << 2)
+#	define	UDC_DEF			(1 << 1)
+#	define	UDC_ATT			(1 << 0)
+#define	UDC_SOF_REG			UDC_REG(0x24)	/* Start of frame */
+#	define	UDC_FT_LOCK		(1 << 12)
+#	define	UDC_TS_OK		(1 << 11)
+#	define	UDC_TS			0x03ff
+#define	UDC_IRQ_EN_REG			UDC_REG(0x28)	/* Interrupt enable */
+#	define	UDC_SOF_IE		(1 << 7)
+#	define	UDC_EPN_RX_IE		(1 << 5)
+#	define	UDC_EPN_TX_IE		(1 << 4)
+#	define	UDC_DS_CHG_IE		(1 << 3)
+#	define	UDC_EP0_IE		(1 << 0)
+#define	UDC_DMA_IRQ_EN_REG		UDC_REG(0x2C)	/* DMA irq enable */
+	/* rx/tx dma channels numbered 1-3 not 0-2 */
+#	define	UDC_TX_DONE_IE(n)	(1 << (4 * (n) - 2))
+#	define	UDC_RX_CNT_IE(n)	(1 << (4 * (n) - 3))
+#	define	UDC_RX_EOT_IE(n)	(1 << (4 * (n) - 4))
+#define	UDC_IRQ_SRC_REG			UDC_REG(0x30)	/* Interrupt source */
+#	define	UDC_TXN_DONE		(1 << 10)
+#	define	UDC_RXN_CNT		(1 << 9)
+#	define	UDC_RXN_EOT		(1 << 8)
+#	define	UDC_SOF			(1 << 7)
+#	define	UDC_EPN_RX		(1 << 5)
+#	define	UDC_EPN_TX		(1 << 4)
+#	define	UDC_DS_CHG		(1 << 3)
+#	define	UDC_SETUP		(1 << 2)
+#	define	UDC_EP0_RX		(1 << 1)
+#	define	UDC_EP0_TX		(1 << 0)
+#	define	UDC_IRQ_SRC_MASK	0x7bf
+#define	UDC_EPN_STAT_REG		UDC_REG(0x34)	/* EP irq status */
+#define	UDC_DMAN_STAT_REG		UDC_REG(0x38)	/* DMA irq status */
+#	define	UDC_DMA_RX_SB		(1 << 12)
+#	define	UDC_DMA_RX_SRC(x)	(((x)>>8) & 0xf)
+#	define	UDC_DMA_TX_SRC(x)	(((x)>>0) & 0xf)
+
+
+/* DMA configuration registers:  up to three channels in each direction.  */
+#define	UDC_RXDMA_CFG_REG		UDC_REG(0x40)	/* 3 eps for RX DMA */
+#define	UDC_TXDMA_CFG_REG		UDC_REG(0x44)	/* 3 eps for TX DMA */
+#define	UDC_DATA_DMA_REG		UDC_REG(0x48)	/* rx/tx fifo addr */
+
+/* rx/tx dma control, numbering channels 1-3 not 0-2 */
+#define	UDC_TXDMA_REG(chan)		UDC_REG(0x50 - 4 + 4 * (chan))
+#	define UDC_TXN_EOT		(1 << 15)	/* bytes vs packets */
+#	define UDC_TXN_START		(1 << 14)	/* start transfer */
+#	define UDC_TXN_TSC		0x03ff		/* units in xfer */
+#define	UDC_RXDMA_REG(chan)		UDC_REG(0x60 - 4 + 4 * (chan))
+#	define UDC_RXN_STOP		(1 << 15)	/* enable EOT irq */
+#	define UDC_RXN_TC		0x00ff		/* packets in xfer */
+
+
+/*
+ * Endpoint configuration registers (used before CFG_LOCK is set)
+ * UDC_EP_TX_REG(0) is unused
+ */
+#define	UDC_EP_RX_REG(endpoint)		UDC_REG(0x80 + (endpoint)*4)
+#	define	UDC_EPN_RX_VALID	(1 << 15)
+#	define	UDC_EPN_RX_DB		(1 << 14)
+	/* buffer size in bits 13, 12 */
+#	define	UDC_EPN_RX_ISO		(1 << 11)
+	/* buffer pointer in low 11 bits */
+#define	UDC_EP_TX_REG(endpoint)		UDC_REG(0xc0 + (endpoint)*4)
+	/* same bitfields as in RX_REG */
+
+/*-------------------------------------------------------------------------*/
+
+struct omap_req {
+	struct usb_request		req;
+	struct list_head		queue;
+	unsigned			dma_bytes;
+	unsigned			mapped:1;
+};
+
+struct omap_ep {
+	struct usb_ep			ep;
+	struct list_head		queue;
+	unsigned long			irqs;
+	struct list_head		iso;
+	const struct usb_endpoint_descriptor	*desc;
+	char				name[14];
+	u16				maxpacket;
+	u8				bEndpointAddress;
+	u8				bmAttributes;
+	unsigned			double_buf:1;
+	unsigned			stopped:1;
+	unsigned			fnf:1;
+	unsigned			has_dma:1;
+	u8				ackwait;
+	u8				dma_channel;
+	u16				dma_counter;
+	int				lch;
+	struct omap_udc			*udc;
+	struct timer_list		timer;
+};
+
+struct omap_udc {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	spinlock_t			lock;
+	struct omap_ep			ep[32];
+	u16				devstat;
+	struct otg_transceiver		*transceiver;
+	struct list_head		iso;
+	unsigned			softconnect:1;
+	unsigned			vbus_active:1;
+	unsigned			ep0_pending:1;
+	unsigned			ep0_in:1;
+	unsigned			ep0_set_config:1;
+	unsigned			ep0_reset_config:1;
+	unsigned			ep0_setup:1;
+
+	struct completion		*done;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef DEBUG
+#define DBG(stuff...)		printk(KERN_DEBUG "udc: " stuff)
+#else
+#define DBG(stuff...)		do{}while(0)
+#endif
+
+#ifdef VERBOSE
+#    define VDBG		DBG
+#else
+#    define VDBG(stuff...)	do{}while(0)
+#endif
+
+#define ERR(stuff...)		printk(KERN_ERR "udc: " stuff)
+#define WARN(stuff...)		printk(KERN_WARNING "udc: " stuff)
+#define INFO(stuff...)		printk(KERN_INFO "udc: " stuff)
+
+/*-------------------------------------------------------------------------*/
+
+#define	MOD_CONF_CTRL_0_REG	__REG32(MOD_CONF_CTRL_0)
+#define	VBUS_W2FC_1510		(1 << 17)	/* 0 gpio0, 1 dvdd2 pin */
+
+#define	FUNC_MUX_CTRL_0_REG	__REG32(FUNC_MUX_CTRL_0)
+#define	VBUS_CTRL_1510		(1 << 19)	/* 1 connected (software) */
+#define	VBUS_MODE_1510		(1 << 18)	/* 0 hardware, 1 software */
+
+#define	HMC_1510	((MOD_CONF_CTRL_0_REG >> 1) & 0x3f)
+#define	HMC_1610	(OTG_SYSCON_2_REG & 0x3f)
+#define	HMC		(cpu_is_omap15xx() ? HMC_1510 : HMC_1610)
+
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
new file mode 100644
index 0000000..6390c57
--- /dev/null
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -0,0 +1,2648 @@
+/*
+ * linux/drivers/usb/gadget/pxa2xx_udc.c
+ * Intel PXA2xx and IXP4xx on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
+ * Copyright (C) 2003 Robert Schwebel, Pengutronix
+ * Copyright (C) 2003 Benedikt Spranger, Pengutronix
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003 Joshua Wise
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#undef	DEBUG
+// #define	VERBOSE	DBG_VERBOSE
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <asm/unaligned.h>
+#include <asm/hardware.h>
+#include <asm/arch/pxa-regs.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include <asm/arch/udc.h>
+
+
+/*
+ * This driver handles the USB Device Controller (UDC) in Intel's PXA 2xx
+ * series processors.  The UDC for the IXP 4xx series is very similar.
+ * There are fifteen endpoints, in addition to ep0.
+ *
+ * Such controller drivers work with a gadget driver.  The gadget driver
+ * returns descriptors, implements configuration and data protocols used
+ * by the host to interact with this device, and allocates endpoints to
+ * the different protocol interfaces.  The controller driver virtualizes
+ * usb hardware so that the gadget drivers will be more portable.
+ * 
+ * This UDC hardware wants to implement a bit too much USB protocol, so
+ * it constrains the sorts of USB configuration change events that work.
+ * The errata for these chips are misleading; some "fixed" bugs from
+ * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
+ */
+
+#define	DRIVER_VERSION	"14-Dec-2003"
+#define	DRIVER_DESC	"PXA 2xx USB Device Controller driver"
+
+
+static const char driver_name [] = "pxa2xx_udc";
+
+static const char ep0name [] = "ep0";
+
+
+// #define	USE_DMA
+// #define	USE_OUT_DMA
+// #define	DISABLE_TEST_MODE
+
+#ifdef CONFIG_ARCH_IXP4XX
+#undef USE_DMA
+
+/* cpu-specific register addresses are compiled in to this code */
+#ifdef CONFIG_ARCH_PXA
+#error "Can't configure both IXP and PXA"
+#endif
+
+#endif
+
+#include "pxa2xx_udc.h"
+
+
+#ifdef	USE_DMA
+static int use_dma = 1;
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC (use_dma, "true to use dma");
+
+static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
+static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
+
+#ifdef USE_OUT_DMA
+#define	DMASTR " (dma support)"
+#else
+#define	DMASTR " (dma in)"
+#endif
+
+#else	/* !USE_DMA */
+#define	DMASTR " (pio only)"
+#undef	USE_OUT_DMA
+#endif
+
+#ifdef	CONFIG_USB_PXA2XX_SMALL
+#define SIZE_STR	" (small)"
+#else
+#define SIZE_STR	""
+#endif
+
+#ifdef DISABLE_TEST_MODE
+/* (mode == 0) == no undocumented chip tweaks
+ * (mode & 1)  == double buffer bulk IN
+ * (mode & 2)  == double buffer bulk OUT
+ * ... so mode = 3 (or 7, 15, etc) does it for both
+ */
+static ushort fifo_mode = 0;
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
+#endif
+
+/* ---------------------------------------------------------------------------
+ * 	endpoint related parts of the api to the usb controller hardware,
+ *	used by gadget driver; and the inner talker-to-hardware core.
+ * ---------------------------------------------------------------------------
+ */
+
+static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
+static void nuke (struct pxa2xx_ep *, int status);
+
+static void pio_irq_enable(int bEndpointAddress)
+{
+        bEndpointAddress &= 0xf;
+        if (bEndpointAddress < 8)
+                UICR0 &= ~(1 << bEndpointAddress);
+        else {
+                bEndpointAddress -= 8;
+                UICR1 &= ~(1 << bEndpointAddress);
+	}
+}
+
+static void pio_irq_disable(int bEndpointAddress)
+{
+        bEndpointAddress &= 0xf;
+        if (bEndpointAddress < 8)
+                UICR0 |= 1 << bEndpointAddress;
+        else {
+                bEndpointAddress -= 8;
+                UICR1 |= 1 << bEndpointAddress;
+        }
+}
+
+/* The UDCCR reg contains mask and interrupt status bits,
+ * so using '|=' isn't safe as it may ack an interrupt.
+ */
+#define UDCCR_MASK_BITS         (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
+
+static inline void udc_set_mask_UDCCR(int mask)
+{
+	UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
+}
+
+static inline void udc_clear_mask_UDCCR(int mask)
+{
+	UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
+}
+
+static inline void udc_ack_int_UDCCR(int mask)
+{
+	/* udccr contains the bits we dont want to change */
+	__u32 udccr = UDCCR & UDCCR_MASK_BITS;
+
+	UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
+}
+
+/*
+ * endpoint enable/disable
+ *
+ * we need to verify the descriptors used to enable endpoints.  since pxa2xx
+ * endpoint configurations are fixed, and are pretty much always enabled,
+ * there's not a lot to manage here.
+ *
+ * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
+ * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
+ * for a single interface (with only the default altsetting) and for gadget
+ * drivers that don't halt endpoints (not reset by set_interface).  that also
+ * means that if you use ISO, you must violate the USB spec rule that all
+ * iso endpoints must be in non-default altsettings.
+ */
+static int pxa2xx_ep_enable (struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct pxa2xx_ep        *ep;
+	struct pxa2xx_udc       *dev;
+
+	ep = container_of (_ep, struct pxa2xx_ep, ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT
+			|| ep->bEndpointAddress != desc->bEndpointAddress
+			|| ep->fifo_size < le16_to_cpu
+						(desc->wMaxPacketSize)) {
+		DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* xfer types must match, except that interrupt ~= bulk */
+	if (ep->bmAttributes != desc->bmAttributes
+			&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+			&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+		DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
+		return -EINVAL;
+	}
+
+	/* hardware _could_ do smaller, but driver doesn't */
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+				&& le16_to_cpu (desc->wMaxPacketSize)
+						!= BULK_FIFO_SIZE)
+			|| !desc->wMaxPacketSize) {
+		DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
+		return -ERANGE;
+	}
+
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
+		DMSG("%s, bogus device state\n", __FUNCTION__);
+		return -ESHUTDOWN;
+	}
+
+	ep->desc = desc;
+	ep->dma = -1;
+	ep->stopped = 0;
+	ep->pio_irqs = ep->dma_irqs = 0;
+	ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
+
+	/* flush fifo (mostly for OUT buffers) */
+	pxa2xx_ep_fifo_flush (_ep);
+
+	/* ... reset halt state too, if we could ... */
+
+#ifdef	USE_DMA
+	/* for (some) bulk and ISO endpoints, try to get a DMA channel and
+	 * bind it to the endpoint.  otherwise use PIO. 
+	 */
+	switch (ep->bmAttributes) {
+	case USB_ENDPOINT_XFER_ISOC:
+		if (le16_to_cpu(desc->wMaxPacketSize) % 32)
+			break;
+		// fall through
+	case USB_ENDPOINT_XFER_BULK:
+		if (!use_dma || !ep->reg_drcmr)
+			break;
+		ep->dma = pxa_request_dma ((char *)_ep->name,
+ 				(le16_to_cpu (desc->wMaxPacketSize) > 64)
+					? DMA_PRIO_MEDIUM /* some iso */
+					: DMA_PRIO_LOW,
+				dma_nodesc_handler, ep);
+		if (ep->dma >= 0) {
+			*ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
+			DMSG("%s using dma%d\n", _ep->name, ep->dma);
+		}
+	}
+#endif
+
+	DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
+	return 0;
+}
+
+static int pxa2xx_ep_disable (struct usb_ep *_ep)
+{
+	struct pxa2xx_ep	*ep;
+
+	ep = container_of (_ep, struct pxa2xx_ep, ep);
+	if (!_ep || !ep->desc) {
+		DMSG("%s, %s not enabled\n", __FUNCTION__,
+			_ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+	nuke (ep, -ESHUTDOWN);
+
+#ifdef	USE_DMA
+	if (ep->dma >= 0) {
+		*ep->reg_drcmr = 0;
+		pxa_free_dma (ep->dma);
+		ep->dma = -1;
+	}
+#endif
+
+	/* flush fifo (mostly for IN buffers) */
+	pxa2xx_ep_fifo_flush (_ep);
+
+	ep->desc = NULL;
+	ep->stopped = 1;
+
+	DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* for the pxa2xx, these can just wrap kmalloc/kfree.  gadget drivers
+ * must still pass correctly initialized endpoints, since other controller
+ * drivers may care about how it's currently set up (dma issues etc).
+ */
+
+/*
+ * 	pxa2xx_ep_alloc_request - allocate a request data structure
+ */
+static struct usb_request *
+pxa2xx_ep_alloc_request (struct usb_ep *_ep, int gfp_flags)
+{
+	struct pxa2xx_request *req;
+
+	req = kmalloc (sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	memset (req, 0, sizeof *req);
+	INIT_LIST_HEAD (&req->queue);
+	return &req->req;
+}
+
+
+/*
+ * 	pxa2xx_ep_free_request - deallocate a request data structure
+ */
+static void
+pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct pxa2xx_request	*req;
+
+	req = container_of (_req, struct pxa2xx_request, req);
+	WARN_ON (!list_empty (&req->queue));
+	kfree(req);
+}
+
+
+/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
+ * no device-affinity and the heap works perfectly well for i/o buffers.
+ * It wastes much less memory than dma_alloc_coherent() would, and even
+ * prevents cacheline (32 bytes wide) sharing problems.
+ */
+static void *
+pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
+	dma_addr_t *dma, int gfp_flags)
+{
+	char			*retval;
+
+	retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
+	if (retval)
+#ifdef	USE_DMA
+		*dma = virt_to_bus (retval);
+#else
+		*dma = (dma_addr_t)~0;
+#endif
+	return retval;
+}
+
+static void
+pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
+		unsigned bytes)
+{
+	kfree (buf);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ *	done - retire a request; caller blocked irqs
+ */
+static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
+{
+	unsigned		stopped = ep->stopped;
+
+	list_del_init(&req->queue);
+
+	if (likely (req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (status && status != -ESHUTDOWN)
+		DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	req->req.complete(&ep->ep, &req->req);
+	ep->stopped = stopped;
+}
+
+
+static inline void ep0_idle (struct pxa2xx_udc *dev)
+{
+	dev->ep0state = EP0_IDLE;
+}
+
+static int
+write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
+{
+	u8		*buf;
+	unsigned	length, count;
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	/* how big will this packet be? */
+	length = min(req->req.length - req->req.actual, max);
+	req->req.actual += length;
+
+	count = length;
+	while (likely(count--))
+		*uddr = *buf++;
+
+	return length;
+}
+
+/*
+ * write to an IN endpoint fifo, as many packets as possible.
+ * irqs will use this to write the rest later.
+ * caller guarantees at least one packet buffer is ready (or a zlp).
+ */
+static int
+write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
+{
+	unsigned		max;
+
+	max = le16_to_cpu(ep->desc->wMaxPacketSize);
+	do {
+		unsigned	count;
+		int		is_last, is_short;
+
+		count = write_packet(ep->reg_uddr, req, max);
+
+		/* last packet is usually short (or a zlp) */
+		if (unlikely (count != max))
+			is_last = is_short = 1;
+		else {
+			if (likely(req->req.length != req->req.actual)
+					|| req->req.zero)
+				is_last = 0;
+			else
+				is_last = 1;
+			/* interrupt/iso maxpacket may not fill the fifo */
+			is_short = unlikely (max < ep->fifo_size);
+		}
+
+		DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
+			ep->ep.name, count,
+			is_last ? "/L" : "", is_short ? "/S" : "",
+			req->req.length - req->req.actual, req);
+
+		/* let loose that packet. maybe try writing another one,
+		 * double buffering might work.  TSP, TPC, and TFS
+		 * bit values are the same for all normal IN endpoints.
+		 */
+		*ep->reg_udccs = UDCCS_BI_TPC;
+		if (is_short)
+			*ep->reg_udccs = UDCCS_BI_TSP;
+
+		/* requests complete when all IN data is in the FIFO */
+		if (is_last) {
+			done (ep, req, 0);
+			if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
+				pio_irq_disable (ep->bEndpointAddress);
+#ifdef USE_DMA
+				/* unaligned data and zlps couldn't use dma */
+				if (unlikely(!list_empty(&ep->queue))) {
+					req = list_entry(ep->queue.next,
+						struct pxa2xx_request, queue);
+					kick_dma(ep,req);
+					return 0;
+				}
+#endif
+			}
+			return 1;
+		}
+
+		// TODO experiment: how robust can fifo mode tweaking be?
+		// double buffering is off in the default fifo mode, which
+		// prevents TFS from being set here.
+
+	} while (*ep->reg_udccs & UDCCS_BI_TFS);
+	return 0;
+}
+
+/* caller asserts req->pending (ep0 irq status nyet cleared); starts
+ * ep0 data stage.  these chips want very simple state transitions.
+ */
+static inline
+void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
+{
+	UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
+	USIR0 = USIR0_IR0;
+	dev->req_pending = 0;
+	DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
+		__FUNCTION__, tag, UDCCS0, flags);
+}
+
+static int
+write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
+{
+	unsigned	count;
+	int		is_short;
+
+	count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
+	ep->dev->stats.write.bytes += count;
+
+	/* last packet "must be" short (or a zlp) */
+	is_short = (count != EP0_FIFO_SIZE);
+
+	DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
+		req->req.length - req->req.actual, req);
+
+	if (unlikely (is_short)) {
+		if (ep->dev->req_pending)
+			ep0start(ep->dev, UDCCS0_IPR, "short IN");
+		else
+			UDCCS0 = UDCCS0_IPR;
+
+		count = req->req.length;
+		done (ep, req, 0);
+		ep0_idle(ep->dev);
+#if 1
+		/* This seems to get rid of lost status irqs in some cases:
+		 * host responds quickly, or next request involves config
+		 * change automagic, or should have been hidden, or ...
+		 *
+		 * FIXME get rid of all udelays possible...
+		 */
+		if (count >= EP0_FIFO_SIZE) {
+			count = 100;
+			do {
+				if ((UDCCS0 & UDCCS0_OPR) != 0) {
+					/* clear OPR, generate ack */
+					UDCCS0 = UDCCS0_OPR;
+					break;
+				}
+				count--;
+				udelay(1);
+			} while (count);
+		}
+#endif
+	} else if (ep->dev->req_pending)
+		ep0start(ep->dev, 0, "IN");
+	return is_short;
+}
+
+
+/*
+ * read_fifo -  unload packet(s) from the fifo we use for usb OUT
+ * transfers and put them into the request.  caller should have made
+ * sure there's at least one packet ready.
+ *
+ * returns true if the request completed because of short packet or the
+ * request buffer having filled (and maybe overran till end-of-packet).
+ */
+static int
+read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
+{
+	for (;;) {
+		u32		udccs;
+		u8		*buf;
+		unsigned	bufferspace, count, is_short;
+
+		/* make sure there's a packet in the FIFO.
+		 * UDCCS_{BO,IO}_RPC are all the same bit value.
+		 * UDCCS_{BO,IO}_RNE are all the same bit value.
+		 */
+		udccs = *ep->reg_udccs;
+		if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
+			break;
+		buf = req->req.buf + req->req.actual;
+		prefetchw(buf);
+		bufferspace = req->req.length - req->req.actual;
+
+		/* read all bytes from this packet */
+		if (likely (udccs & UDCCS_BO_RNE)) {
+			count = 1 + (0x0ff & *ep->reg_ubcr);
+			req->req.actual += min (count, bufferspace);
+		} else /* zlp */
+			count = 0;
+		is_short = (count < ep->ep.maxpacket);
+		DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
+			ep->ep.name, udccs, count,
+			is_short ? "/S" : "",
+			req, req->req.actual, req->req.length);
+		while (likely (count-- != 0)) {
+			u8	byte = (u8) *ep->reg_uddr;
+
+			if (unlikely (bufferspace == 0)) {
+				/* this happens when the driver's buffer
+				 * is smaller than what the host sent.
+				 * discard the extra data.
+				 */
+				if (req->req.status != -EOVERFLOW)
+					DMSG("%s overflow %d\n",
+						ep->ep.name, count);
+				req->req.status = -EOVERFLOW;
+			} else {
+				*buf++ = byte;
+				bufferspace--;
+			}
+		}
+		*ep->reg_udccs =  UDCCS_BO_RPC;
+		/* RPC/RSP/RNE could now reflect the other packet buffer */
+
+		/* iso is one request per packet */
+		if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+			if (udccs & UDCCS_IO_ROF)
+				req->req.status = -EHOSTUNREACH;
+			/* more like "is_done" */
+			is_short = 1;
+		}
+
+		/* completion */
+		if (is_short || req->req.actual == req->req.length) {
+			done (ep, req, 0);
+			if (list_empty(&ep->queue))
+				pio_irq_disable (ep->bEndpointAddress);
+			return 1;
+		}
+
+		/* finished that packet.  the next one may be waiting... */
+	}
+	return 0;
+}
+
+/*
+ * special ep0 version of the above.  no UBCR0 or double buffering; status
+ * handshaking is magic.  most device protocols don't need control-OUT.
+ * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
+ * protocols do use them.
+ */
+static int
+read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
+{
+	u8		*buf, byte;
+	unsigned	bufferspace;
+
+	buf = req->req.buf + req->req.actual;
+	bufferspace = req->req.length - req->req.actual;
+
+	while (UDCCS0 & UDCCS0_RNE) {
+		byte = (u8) UDDR0;
+
+		if (unlikely (bufferspace == 0)) {
+			/* this happens when the driver's buffer
+			 * is smaller than what the host sent.
+			 * discard the extra data.
+			 */
+			if (req->req.status != -EOVERFLOW)
+				DMSG("%s overflow\n", ep->ep.name);
+			req->req.status = -EOVERFLOW;
+		} else {
+			*buf++ = byte;
+			req->req.actual++;
+			bufferspace--;
+		}
+	}
+
+	UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
+
+	/* completion */
+	if (req->req.actual >= req->req.length)
+		return 1;
+
+	/* finished that packet.  the next one may be waiting... */
+	return 0;
+}
+
+#ifdef	USE_DMA
+
+#define	MAX_IN_DMA	((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
+
+static void
+start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
+{
+	u32	dcmd = req->req.length;
+	u32	buf = req->req.dma;
+	u32	fifo = io_v2p ((u32)ep->reg_uddr);
+
+	/* caller guarantees there's a packet or more remaining
+	 *  - IN may end with a short packet (TSP set separately),
+	 *  - OUT is always full length
+	 */
+	buf += req->req.actual;
+	dcmd -= req->req.actual;
+	ep->dma_fixup = 0;
+
+	/* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
+	DCSR(ep->dma) = DCSR_NODESC;
+	if (is_in) {
+		DSADR(ep->dma) = buf;
+		DTADR(ep->dma) = fifo;
+		if (dcmd > MAX_IN_DMA)
+			dcmd = MAX_IN_DMA;
+		else
+			ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
+		dcmd |= DCMD_BURST32 | DCMD_WIDTH1
+			| DCMD_FLOWTRG | DCMD_INCSRCADDR;
+	} else {
+#ifdef USE_OUT_DMA
+		DSADR(ep->dma) = fifo;
+		DTADR(ep->dma) = buf;
+		if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
+			dcmd = ep->ep.maxpacket;
+		dcmd |= DCMD_BURST32 | DCMD_WIDTH1
+			| DCMD_FLOWSRC | DCMD_INCTRGADDR;
+#endif
+	}
+	DCMD(ep->dma) = dcmd;
+	DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
+		| (unlikely(is_in)
+			? DCSR_STOPIRQEN	/* use dma_nodesc_handler() */
+			: 0);			/* use handle_ep() */
+}
+
+static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
+{
+	int	is_in = ep->bEndpointAddress & USB_DIR_IN;
+
+	if (is_in) {
+		/* unaligned tx buffers and zlps only work with PIO */
+		if ((req->req.dma & 0x0f) != 0
+				|| unlikely((req->req.length - req->req.actual)
+						== 0)) {
+			pio_irq_enable(ep->bEndpointAddress);
+			if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
+				(void) write_fifo(ep, req);
+		} else {
+			start_dma_nodesc(ep, req, USB_DIR_IN);
+		}
+	} else {
+		if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
+			DMSG("%s short dma read...\n", ep->ep.name);
+			/* we're always set up for pio out */
+			read_fifo (ep, req);
+		} else {
+			*ep->reg_udccs = UDCCS_BO_DME
+				| (*ep->reg_udccs & UDCCS_BO_FST);
+			start_dma_nodesc(ep, req, USB_DIR_OUT);
+		}
+	}
+}
+
+static void cancel_dma(struct pxa2xx_ep *ep)
+{
+	struct pxa2xx_request	*req;
+	u32			tmp;
+
+	if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
+		return;
+
+	DCSR(ep->dma) = 0;
+	while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
+		cpu_relax();
+
+	req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
+	tmp = DCMD(ep->dma) & DCMD_LENGTH;
+	req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
+
+	/* the last tx packet may be incomplete, so flush the fifo.
+	 * FIXME correct req.actual if we can
+	 */
+	if (ep->bEndpointAddress & USB_DIR_IN)
+		*ep->reg_udccs = UDCCS_BI_FTF;
+}
+
+/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
+static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
+{
+	struct pxa2xx_ep	*ep = _ep;
+	struct pxa2xx_request	*req;
+	u32			tmp, completed;
+
+	local_irq_disable();
+
+	req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
+
+	ep->dma_irqs++;
+	ep->dev->stats.irqs++;
+	HEX_DISPLAY(ep->dev->stats.irqs);
+
+	/* ack/clear */
+	tmp = DCSR(ep->dma);
+	DCSR(ep->dma) = tmp;
+	if ((tmp & DCSR_STOPSTATE) == 0
+			|| (DDADR(ep->dma) & DDADR_STOP) != 0) {
+		DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
+			ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
+		goto done;
+	}
+	DCSR(ep->dma) = 0;	/* clear DCSR_STOPSTATE */
+
+	/* update transfer status */
+	completed = tmp & DCSR_BUSERR;
+	if (ep->bEndpointAddress & USB_DIR_IN)
+		tmp = DSADR(ep->dma);
+	else
+		tmp = DTADR(ep->dma);
+	req->req.actual = tmp - req->req.dma;
+
+	/* FIXME seems we sometimes see partial transfers... */
+
+	if (unlikely(completed != 0))
+		req->req.status = -EIO;
+	else if (req->req.actual) {
+		/* these registers have zeroes in low bits; they miscount
+		 * some (end-of-transfer) short packets:  tx 14 as tx 12
+		 */
+		if (ep->dma_fixup)
+			req->req.actual = min(req->req.actual + 3,
+						req->req.length);
+
+		tmp = (req->req.length - req->req.actual);
+		completed = (tmp == 0);
+		if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
+
+			/* maybe validate final short packet ... */
+			if ((req->req.actual % ep->ep.maxpacket) != 0)
+				*ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
+
+			/* ... or zlp, using pio fallback */
+			else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
+					&& req->req.zero) {
+				DMSG("%s zlp terminate ...\n", ep->ep.name);
+				completed = 0;
+			}
+		}
+	}
+
+	if (likely(completed)) {
+		done(ep, req, 0);
+
+		/* maybe re-activate after completion */
+		if (ep->stopped || list_empty(&ep->queue))
+			goto done;
+		req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
+	}
+	kick_dma(ep, req);
+done:
+	local_irq_enable();
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int
+pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
+{
+	struct pxa2xx_request	*req;
+	struct pxa2xx_ep	*ep;
+	struct pxa2xx_udc	*dev;
+	unsigned long		flags;
+
+	req = container_of(_req, struct pxa2xx_request, req);
+	if (unlikely (!_req || !_req->complete || !_req->buf
+			|| !list_empty(&req->queue))) {
+		DMSG("%s, bad params\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	ep = container_of(_ep, struct pxa2xx_ep, ep);
+	if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		DMSG("%s, bad ep\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	dev = ep->dev;
+	if (unlikely (!dev->driver
+			|| dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+		DMSG("%s, bogus device state\n", __FUNCTION__);
+		return -ESHUTDOWN;
+	}
+
+	/* iso is always one packet per request, that's the only way
+	 * we can report per-packet status.  that also helps with dma.
+	 */
+	if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+			&& req->req.length > le16_to_cpu
+						(ep->desc->wMaxPacketSize)))
+		return -EMSGSIZE;
+
+#ifdef	USE_DMA
+	// FIXME caller may already have done the dma mapping
+	if (ep->dma >= 0) {
+		_req->dma = dma_map_single(dev->dev,
+			_req->buf, _req->length,
+			((ep->bEndpointAddress & USB_DIR_IN) != 0)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+	}
+#endif
+
+	DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
+	     _ep->name, _req, _req->length, _req->buf);
+
+	local_irq_save(flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* kickstart this i/o queue? */
+	if (list_empty(&ep->queue) && !ep->stopped) {
+		if (ep->desc == 0 /* ep0 */) {
+			unsigned	length = _req->length;
+
+			switch (dev->ep0state) {
+			case EP0_IN_DATA_PHASE:
+				dev->stats.write.ops++;
+				if (write_ep0_fifo(ep, req))
+					req = NULL;
+				break;
+
+			case EP0_OUT_DATA_PHASE:
+				dev->stats.read.ops++;
+				/* messy ... */
+				if (dev->req_config) {
+					DBG(DBG_VERBOSE, "ep0 config ack%s\n",
+						dev->has_cfr ?  "" : " raced");
+					if (dev->has_cfr)
+						UDCCFR = UDCCFR_AREN|UDCCFR_ACM
+							|UDCCFR_MB1;
+					done(ep, req, 0);
+					dev->ep0state = EP0_END_XFER;
+					local_irq_restore (flags);
+					return 0;
+				}
+				if (dev->req_pending)
+					ep0start(dev, UDCCS0_IPR, "OUT");
+				if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
+						&& read_ep0_fifo(ep, req))) {
+					ep0_idle(dev);
+					done(ep, req, 0);
+					req = NULL;
+				}
+				break;
+
+			default:
+				DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
+				local_irq_restore (flags);
+				return -EL2HLT;
+			}
+#ifdef	USE_DMA
+		/* either start dma or prime pio pump */
+		} else if (ep->dma >= 0) {
+			kick_dma(ep, req);
+#endif
+		/* can the FIFO can satisfy the request immediately? */
+		} else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+				&& (*ep->reg_udccs & UDCCS_BI_TFS) != 0
+				&& write_fifo(ep, req)) {
+			req = NULL;
+		} else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
+				&& read_fifo(ep, req)) {
+			req = NULL;
+		}
+
+		if (likely (req && ep->desc) && ep->dma < 0)
+			pio_irq_enable(ep->bEndpointAddress);
+	}
+
+	/* pio or dma irq handler advances the queue. */
+	if (likely (req != 0))
+		list_add_tail(&req->queue, &ep->queue);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+
+/*
+ * 	nuke - dequeue ALL requests
+ */
+static void nuke(struct pxa2xx_ep *ep, int status)
+{
+	struct pxa2xx_request *req;
+
+	/* called with irqs blocked */
+#ifdef	USE_DMA
+	if (ep->dma >= 0 && !ep->stopped)
+		cancel_dma(ep);
+#endif
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next,
+				struct pxa2xx_request,
+				queue);
+		done(ep, req, status);
+	}
+	if (ep->desc)
+		pio_irq_disable (ep->bEndpointAddress);
+}
+
+
+/* dequeue JUST ONE request */
+static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct pxa2xx_ep	*ep;
+	struct pxa2xx_request	*req;
+	unsigned long		flags;
+
+	ep = container_of(_ep, struct pxa2xx_ep, ep);
+	if (!_ep || ep->ep.name == ep0name)
+		return -EINVAL;
+
+	local_irq_save(flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		local_irq_restore(flags);
+		return -EINVAL;
+	}
+
+#ifdef	USE_DMA
+	if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
+		cancel_dma(ep);
+		done(ep, req, -ECONNRESET);
+		/* restart i/o */
+		if (!list_empty(&ep->queue)) {
+			req = list_entry(ep->queue.next,
+					struct pxa2xx_request, queue);
+			kick_dma(ep, req);
+		}
+	} else
+#endif
+		done(ep, req, -ECONNRESET);
+
+	local_irq_restore(flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct pxa2xx_ep	*ep;
+	unsigned long		flags;
+
+	ep = container_of(_ep, struct pxa2xx_ep, ep);
+	if (unlikely (!_ep
+			|| (!ep->desc && ep->ep.name != ep0name))
+			|| ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		DMSG("%s, bad ep\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	if (value == 0) {
+		/* this path (reset toggle+halt) is needed to implement
+		 * SET_INTERFACE on normal hardware.  but it can't be
+		 * done from software on the PXA UDC, and the hardware
+		 * forgets to do it as part of SET_INTERFACE automagic.
+		 */
+		DMSG("only host can clear %s halt\n", _ep->name);
+		return -EROFS;
+	}
+
+	local_irq_save(flags);
+
+	if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+			&& ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
+			   || !list_empty(&ep->queue))) {
+		local_irq_restore(flags);
+		return -EAGAIN;
+	}
+
+	/* FST bit is the same for control, bulk in, bulk out, interrupt in */
+	*ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
+
+	/* ep0 needs special care */
+	if (!ep->desc) {
+		start_watchdog(ep->dev);
+		ep->dev->req_pending = 0;
+		ep->dev->ep0state = EP0_STALL;
+
+ 	/* and bulk/intr endpoints like dropping stalls too */
+ 	} else {
+ 		unsigned i;
+ 		for (i = 0; i < 1000; i += 20) {
+ 			if (*ep->reg_udccs & UDCCS_BI_SST)
+ 				break;
+ 			udelay(20);
+ 		}
+  	}
+ 	local_irq_restore(flags);
+
+	DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
+	return 0;
+}
+
+static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
+{
+	struct pxa2xx_ep        *ep;
+
+	ep = container_of(_ep, struct pxa2xx_ep, ep);
+	if (!_ep) {
+		DMSG("%s, bad ep\n", __FUNCTION__);
+		return -ENODEV;
+	}
+	/* pxa can't report unclaimed bytes from IN fifos */
+	if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
+		return -EOPNOTSUPP;
+	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
+			|| (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
+		return 0;
+	else
+		return (*ep->reg_ubcr & 0xfff) + 1;
+}
+
+static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct pxa2xx_ep        *ep;
+
+	ep = container_of(_ep, struct pxa2xx_ep, ep);
+	if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
+		DMSG("%s, bad ep\n", __FUNCTION__);
+		return;
+	}
+
+	/* toggle and halt bits stay unchanged */
+
+	/* for OUT, just read and discard the FIFO contents. */
+	if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
+		while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
+			(void) *ep->reg_uddr;
+		return;
+	}
+
+	/* most IN status is the same, but ISO can't stall */
+	*ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
+		| (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+			? 0 : UDCCS_BI_SST;
+}
+
+
+static struct usb_ep_ops pxa2xx_ep_ops = {
+	.enable		= pxa2xx_ep_enable,
+	.disable	= pxa2xx_ep_disable,
+
+	.alloc_request	= pxa2xx_ep_alloc_request,
+	.free_request	= pxa2xx_ep_free_request,
+
+	.alloc_buffer	= pxa2xx_ep_alloc_buffer,
+	.free_buffer	= pxa2xx_ep_free_buffer,
+
+	.queue		= pxa2xx_ep_queue,
+	.dequeue	= pxa2xx_ep_dequeue,
+
+	.set_halt	= pxa2xx_ep_set_halt,
+	.fifo_status	= pxa2xx_ep_fifo_status,
+	.fifo_flush	= pxa2xx_ep_fifo_flush,
+};
+
+
+/* ---------------------------------------------------------------------------
+ * 	device-scoped parts of the api to the usb controller hardware
+ * ---------------------------------------------------------------------------
+ */
+
+static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
+{
+	return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
+}
+
+static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
+{
+	/* host may not have enabled remote wakeup */
+	if ((UDCCS0 & UDCCS0_DRWF) == 0)
+		return -EHOSTUNREACH;
+	udc_set_mask_UDCCR(UDCCR_RSM);
+	return 0;
+}
+
+static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
+static void udc_enable (struct pxa2xx_udc *);
+static void udc_disable(struct pxa2xx_udc *);
+
+/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
+ * in active use.  
+ */
+static int pullup(struct pxa2xx_udc *udc, int is_active)
+{
+	is_active = is_active && udc->vbus && udc->pullup;
+	DMSG("%s\n", is_active ? "active" : "inactive");
+	if (is_active)
+		udc_enable(udc);
+	else {
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+			DMSG("disconnect %s\n", udc->driver
+				? udc->driver->driver.name
+				: "(no driver)");
+			stop_activity(udc, udc->driver);
+		}
+		udc_disable(udc);
+	}
+	return 0;
+}
+
+/* VBUS reporting logically comes from a transceiver */
+static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct pxa2xx_udc	*udc;
+
+	udc = container_of(_gadget, struct pxa2xx_udc, gadget);
+	udc->vbus = is_active = (is_active != 0);
+	DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
+	pullup(udc, is_active);
+	return 0;
+}
+
+/* drivers may have software control over D+ pullup */
+static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct pxa2xx_udc	*udc;
+
+	udc = container_of(_gadget, struct pxa2xx_udc, gadget);
+
+	/* not all boards support pullup control */
+	if (!udc->mach->udc_command)
+		return -EOPNOTSUPP;
+
+	is_active = (is_active != 0);
+	udc->pullup = is_active;
+	pullup(udc, is_active);
+	return 0;
+}
+
+static const struct usb_gadget_ops pxa2xx_udc_ops = {
+	.get_frame	= pxa2xx_udc_get_frame,
+	.wakeup		= pxa2xx_udc_wakeup,
+	.vbus_session	= pxa2xx_udc_vbus_session,
+	.pullup		= pxa2xx_udc_pullup,
+
+	// .vbus_draw ... boards may consume current from VBUS, up to
+	// 100-500mA based on config.  the 500uA suspend ceiling means
+	// that exclusively vbus-powered PXA designs violate USB specs.
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static const char proc_node_name [] = "driver/udc";
+
+static int
+udc_proc_read(char *page, char **start, off_t off, int count,
+		int *eof, void *_dev)
+{
+	char			*buf = page;
+	struct pxa2xx_udc	*dev = _dev;
+	char			*next = buf;
+	unsigned		size = count;
+	unsigned long		flags;
+	int			i, t;
+	u32			tmp;
+
+	if (off != 0)
+		return 0;
+
+	local_irq_save(flags);
+
+	/* basic device status */
+	t = scnprintf(next, size, DRIVER_DESC "\n"
+		"%s version: %s\nGadget driver: %s\nHost %s\n\n",
+		driver_name, DRIVER_VERSION SIZE_STR DMASTR,
+		dev->driver ? dev->driver->driver.name : "(none)",
+		is_usb_connected() ? "full speed" : "disconnected");
+	size -= t;
+	next += t;
+
+	/* registers for device and ep0 */
+	t = scnprintf(next, size,
+		"uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
+		UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
+	size -= t;
+	next += t;
+
+	tmp = UDCCR;
+	t = scnprintf(next, size,
+		"udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
+		(tmp & UDCCR_REM) ? " rem" : "",
+		(tmp & UDCCR_RSTIR) ? " rstir" : "",
+		(tmp & UDCCR_SRM) ? " srm" : "",
+		(tmp & UDCCR_SUSIR) ? " susir" : "",
+		(tmp & UDCCR_RESIR) ? " resir" : "",
+		(tmp & UDCCR_RSM) ? " rsm" : "",
+		(tmp & UDCCR_UDA) ? " uda" : "",
+		(tmp & UDCCR_UDE) ? " ude" : "");
+	size -= t;
+	next += t;
+
+	tmp = UDCCS0;
+	t = scnprintf(next, size,
+		"udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
+		(tmp & UDCCS0_SA) ? " sa" : "",
+		(tmp & UDCCS0_RNE) ? " rne" : "",
+		(tmp & UDCCS0_FST) ? " fst" : "",
+		(tmp & UDCCS0_SST) ? " sst" : "",
+		(tmp & UDCCS0_DRWF) ? " dwrf" : "",
+		(tmp & UDCCS0_FTF) ? " ftf" : "",
+		(tmp & UDCCS0_IPR) ? " ipr" : "",
+		(tmp & UDCCS0_OPR) ? " opr" : "");
+	size -= t;
+	next += t;
+
+	if (dev->has_cfr) {
+		tmp = UDCCFR;
+		t = scnprintf(next, size,
+			"udccfr %02X =%s%s\n", tmp,
+			(tmp & UDCCFR_AREN) ? " aren" : "",
+			(tmp & UDCCFR_ACM) ? " acm" : "");
+		size -= t;
+		next += t;
+	}
+
+	if (!is_usb_connected() || !dev->driver)
+		goto done;
+
+	t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
+		dev->stats.write.bytes, dev->stats.write.ops,
+		dev->stats.read.bytes, dev->stats.read.ops,
+		dev->stats.irqs);
+	size -= t;
+	next += t;
+
+	/* dump endpoint queues */
+	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		struct pxa2xx_ep	*ep = &dev->ep [i];
+		struct pxa2xx_request	*req;
+		int			t;
+
+		if (i != 0) {
+			const struct usb_endpoint_descriptor	*d;
+
+			d = ep->desc;
+			if (!d)
+				continue;
+			tmp = *dev->ep [i].reg_udccs;
+			t = scnprintf(next, size,
+				"%s max %d %s udccs %02x irqs %lu/%lu\n",
+				ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
+				(ep->dma >= 0) ? "dma" : "pio", tmp,
+				ep->pio_irqs, ep->dma_irqs);
+			/* TODO translate all five groups of udccs bits! */
+
+		} else /* ep0 should only have one transfer queued */
+			t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
+				ep->pio_irqs);
+		if (t <= 0 || t > size)
+			goto done;
+		size -= t;
+		next += t;
+
+		if (list_empty(&ep->queue)) {
+			t = scnprintf(next, size, "\t(nothing queued)\n");
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+			continue;
+		}
+		list_for_each_entry(req, &ep->queue, queue) {
+#ifdef	USE_DMA
+			if (ep->dma >= 0 && req->queue.prev == &ep->queue)
+				t = scnprintf(next, size,
+					"\treq %p len %d/%d "
+					"buf %p (dma%d dcmd %08x)\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf,
+					ep->dma, DCMD(ep->dma)
+					// low 13 bits == bytes-to-go
+					);
+			else
+#endif
+				t = scnprintf(next, size,
+					"\treq %p len %d/%d buf %p\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf);
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+		}
+	}
+
+done:
+	local_irq_restore(flags);
+	*eof = 1;
+	return count - size;
+}
+
+#define create_proc_files() \
+	create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
+#define remove_proc_files() \
+	remove_proc_entry(proc_node_name, NULL)
+
+#else	/* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_proc_files() do {} while (0)
+#define remove_proc_files() do {} while (0)
+
+#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/* "function" sysfs attribute */
+static ssize_t
+show_function (struct device *_dev, char *buf)
+{
+	struct pxa2xx_udc	*dev = dev_get_drvdata (_dev);
+
+	if (!dev->driver
+			|| !dev->driver->function
+			|| strlen (dev->driver->function) > PAGE_SIZE)
+		return 0;
+	return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * 	udc_disable - disable USB device controller
+ */
+static void udc_disable(struct pxa2xx_udc *dev)
+{
+	/* block all irqs */
+	udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
+	UICR0 = UICR1 = 0xff;
+	UFNRH = UFNRH_SIM;
+
+	/* if hardware supports it, disconnect from usb */
+	make_usb_disappear();
+
+	udc_clear_mask_UDCCR(UDCCR_UDE);
+
+#ifdef	CONFIG_ARCH_PXA
+        /* Disable clock for USB device */
+	pxa_set_cken(CKEN11_USB, 0);
+#endif
+
+	ep0_idle (dev);
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	LED_CONNECTED_OFF;
+}
+
+
+/*
+ * 	udc_reinit - initialize software state
+ */
+static void udc_reinit(struct pxa2xx_udc *dev)
+{
+	u32	i;
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+	dev->ep0state = EP0_IDLE;
+
+	/* basic endpoint records init */
+	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		struct pxa2xx_ep *ep = &dev->ep[i];
+
+		if (i != 0)
+			list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+
+		ep->desc = NULL;
+		ep->stopped = 0;
+		INIT_LIST_HEAD (&ep->queue);
+		ep->pio_irqs = ep->dma_irqs = 0;
+	}
+
+	/* the rest was statically initialized, and is read-only */
+}
+
+/* until it's enabled, this UDC should be completely invisible
+ * to any USB host.
+ */
+static void udc_enable (struct pxa2xx_udc *dev)
+{
+	udc_clear_mask_UDCCR(UDCCR_UDE);
+
+#ifdef	CONFIG_ARCH_PXA
+        /* Enable clock for USB device */
+	pxa_set_cken(CKEN11_USB, 1);
+	udelay(5);
+#endif
+
+	/* try to clear these bits before we enable the udc */
+	udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
+
+	ep0_idle(dev);
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	dev->stats.irqs = 0;
+
+	/*
+	 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
+	 * - enable UDC
+	 * - if RESET is already in progress, ack interrupt
+	 * - unmask reset interrupt
+	 */
+	udc_set_mask_UDCCR(UDCCR_UDE);
+	if (!(UDCCR & UDCCR_UDA))
+		udc_ack_int_UDCCR(UDCCR_RSTIR);
+
+	if (dev->has_cfr /* UDC_RES2 is defined */) {
+		/* pxa255 (a0+) can avoid a set_config race that could
+		 * prevent gadget drivers from configuring correctly
+		 */
+		UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
+	} else {
+		/* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
+		 * which could result in missing packets and interrupts.
+		 * supposedly one bit per endpoint, controlling whether it
+		 * double buffers or not; ACM/AREN bits fit into the holes.
+		 * zero bits (like USIR0_IRx) disable double buffering.
+		 */
+		UDC_RES1 = 0x00;
+		UDC_RES2 = 0x00;
+	}
+
+#ifdef	DISABLE_TEST_MODE
+	/* "test mode" seems to have become the default in later chip
+	 * revs, preventing double buffering (and invalidating docs).
+	 * this EXPERIMENT enables it for bulk endpoints by tweaking
+	 * undefined/reserved register bits (that other drivers clear).
+	 * Belcarra code comments noted this usage.
+	 */
+	if (fifo_mode & 1) {	/* IN endpoints */
+		UDC_RES1 |= USIR0_IR1|USIR0_IR6;
+		UDC_RES2 |= USIR1_IR11;
+	}
+	if (fifo_mode & 2) {	/* OUT endpoints */
+		UDC_RES1 |= USIR0_IR2|USIR0_IR7;
+		UDC_RES2 |= USIR1_IR12;
+	}
+#endif
+
+	/* enable suspend/resume and reset irqs */
+	udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
+
+	/* enable ep0 irqs */
+	UICR0 &= ~UICR0_IM0;
+
+	/* if hardware supports it, pullup D+ and wait for reset */
+	let_usb_appear();
+}
+
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct pxa2xx_udc	*dev = the_controller;
+	int			retval;
+
+	if (!driver
+			|| driver->speed != USB_SPEED_FULL
+			|| !driver->bind
+			|| !driver->unbind
+			|| !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+	dev->pullup = 1;
+
+	device_add (&dev->gadget.dev);
+	retval = driver->bind(&dev->gadget);
+	if (retval) {
+		DMSG("bind to driver %s --> error %d\n",
+				driver->driver.name, retval);
+		device_del (&dev->gadget.dev);
+
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+	device_create_file(dev->dev, &dev_attr_function);
+
+	/* ... then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	DMSG("registered gadget driver '%s'\n", driver->driver.name);
+	pullup(dev, 1);
+	dump_state(dev);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void
+stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
+{
+	int i;
+
+	/* don't disconnect drivers more than once */
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		struct pxa2xx_ep *ep = &dev->ep[i];
+
+		ep->stopped = 1;
+		nuke(ep, -ESHUTDOWN);
+	}
+	del_timer_sync(&dev->timer);
+
+	/* report disconnect; the driver is already quiesced */
+	LED_CONNECTED_OFF;
+	if (driver)
+		driver->disconnect(&dev->gadget);
+
+	/* re-init driver-visible data structures */
+	udc_reinit(dev);
+}
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct pxa2xx_udc	*dev = the_controller;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver)
+		return -EINVAL;
+
+	local_irq_disable();
+	pullup(dev, 0);
+	stop_activity(dev, driver);
+	local_irq_enable();
+
+	driver->unbind(&dev->gadget);
+	dev->driver = NULL;
+
+	device_del (&dev->gadget.dev);
+	device_remove_file(dev->dev, &dev_attr_function);
+
+	DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
+	dump_state(dev);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_LUBBOCK
+
+/* Lubbock has separate connect and disconnect irqs.  More typical designs
+ * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
+ */
+
+static irqreturn_t
+lubbock_vbus_irq(int irq, void *_dev, struct pt_regs *r)
+{
+	struct pxa2xx_udc	*dev = _dev;
+	int			vbus;
+
+	dev->stats.irqs++;
+	HEX_DISPLAY(dev->stats.irqs);
+	switch (irq) {
+	case LUBBOCK_USB_IRQ:
+		LED_CONNECTED_ON;
+		vbus = 1;
+		disable_irq(LUBBOCK_USB_IRQ);
+		enable_irq(LUBBOCK_USB_DISC_IRQ);
+		break;
+	case LUBBOCK_USB_DISC_IRQ:
+		LED_CONNECTED_OFF;
+		vbus = 0;
+		disable_irq(LUBBOCK_USB_DISC_IRQ);
+		enable_irq(LUBBOCK_USB_IRQ);
+		break;
+	default:
+		return IRQ_NONE;
+	}
+
+	pxa2xx_udc_vbus_session(&dev->gadget, vbus);
+	return IRQ_HANDLED;
+}
+
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline void clear_ep_state (struct pxa2xx_udc *dev)
+{
+	unsigned i;
+
+	/* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
+	 * fifos, and pending transactions mustn't be continued in any case.
+	 */
+	for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
+		nuke(&dev->ep[i], -ECONNABORTED);
+}
+
+static void udc_watchdog(unsigned long _dev)
+{
+	struct pxa2xx_udc	*dev = (void *)_dev;
+
+	local_irq_disable();
+	if (dev->ep0state == EP0_STALL
+			&& (UDCCS0 & UDCCS0_FST) == 0
+			&& (UDCCS0 & UDCCS0_SST) == 0) {
+		UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
+		DBG(DBG_VERBOSE, "ep0 re-stall\n");
+		start_watchdog(dev);
+	}
+	local_irq_enable();
+}
+
+static void handle_ep0 (struct pxa2xx_udc *dev)
+{
+	u32			udccs0 = UDCCS0;
+	struct pxa2xx_ep	*ep = &dev->ep [0];
+	struct pxa2xx_request	*req;
+	union {
+		struct usb_ctrlrequest	r;
+		u8			raw [8];
+		u32			word [2];
+	} u;
+
+	if (list_empty(&ep->queue))
+		req = NULL;
+	else
+		req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
+
+	/* clear stall status */
+	if (udccs0 & UDCCS0_SST) {
+		nuke(ep, -EPIPE);
+		UDCCS0 = UDCCS0_SST;
+		del_timer(&dev->timer);
+		ep0_idle(dev);
+	}
+
+	/* previous request unfinished?  non-error iff back-to-back ... */
+	if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
+		nuke(ep, 0);
+		del_timer(&dev->timer);
+		ep0_idle(dev);
+	}
+
+	switch (dev->ep0state) {
+	case EP0_IDLE:
+		/* late-breaking status? */
+		udccs0 = UDCCS0;
+
+		/* start control request? */
+		if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
+				== (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
+			int i;
+
+			nuke (ep, -EPROTO);
+
+			/* read SETUP packet */
+			for (i = 0; i < 8; i++) {
+				if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
+bad_setup:
+					DMSG("SETUP %d!\n", i);
+					goto stall;
+				}
+				u.raw [i] = (u8) UDDR0;
+			}
+			if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
+				goto bad_setup;
+
+got_setup:
+			DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+				u.r.bRequestType, u.r.bRequest,
+				le16_to_cpu(u.r.wValue),
+				le16_to_cpu(u.r.wIndex),
+				le16_to_cpu(u.r.wLength));
+
+			/* cope with automagic for some standard requests. */
+			dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
+						== USB_TYPE_STANDARD;
+			dev->req_config = 0;
+			dev->req_pending = 1;
+			switch (u.r.bRequest) {
+			/* hardware restricts gadget drivers here! */
+			case USB_REQ_SET_CONFIGURATION:
+				if (u.r.bRequestType == USB_RECIP_DEVICE) {
+					/* reflect hardware's automagic
+					 * up to the gadget driver.
+					 */
+config_change:
+					dev->req_config = 1;
+					clear_ep_state(dev);
+					/* if !has_cfr, there's no synch
+					 * else use AREN (later) not SA|OPR
+					 * USIR0_IR0 acts edge sensitive
+					 */
+				}
+				break;
+			/* ... and here, even more ... */
+			case USB_REQ_SET_INTERFACE:
+				if (u.r.bRequestType == USB_RECIP_INTERFACE) {
+					/* udc hardware is broken by design:
+					 *  - altsetting may only be zero;
+					 *  - hw resets all interfaces' eps;
+					 *  - ep reset doesn't include halt(?).
+					 */
+					DMSG("broken set_interface (%d/%d)\n",
+						le16_to_cpu(u.r.wIndex),
+						le16_to_cpu(u.r.wValue));
+					goto config_change;
+				}
+				break;
+			/* hardware was supposed to hide this */
+			case USB_REQ_SET_ADDRESS:
+				if (u.r.bRequestType == USB_RECIP_DEVICE) {
+					ep0start(dev, 0, "address");
+					return;
+				}
+				break;
+			}
+
+			if (u.r.bRequestType & USB_DIR_IN)
+				dev->ep0state = EP0_IN_DATA_PHASE;
+			else
+				dev->ep0state = EP0_OUT_DATA_PHASE;
+
+			i = dev->driver->setup(&dev->gadget, &u.r);
+			if (i < 0) {
+				/* hardware automagic preventing STALL... */
+				if (dev->req_config) {
+					/* hardware sometimes neglects to tell
+					 * tell us about config change events,
+					 * so later ones may fail...
+					 */
+					WARN("config change %02x fail %d?\n",
+						u.r.bRequest, i);
+					return;
+					/* TODO experiment:  if has_cfr,
+					 * hardware didn't ACK; maybe we
+					 * could actually STALL!
+					 */
+				}
+				DBG(DBG_VERBOSE, "protocol STALL, "
+					"%02x err %d\n", UDCCS0, i);
+stall:
+				/* the watchdog timer helps deal with cases
+				 * where udc seems to clear FST wrongly, and
+				 * then NAKs instead of STALLing.
+				 */
+				ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
+				start_watchdog(dev);
+				dev->ep0state = EP0_STALL;
+
+			/* deferred i/o == no response yet */
+			} else if (dev->req_pending) {
+				if (likely(dev->ep0state == EP0_IN_DATA_PHASE
+						|| dev->req_std || u.r.wLength))
+					ep0start(dev, 0, "defer");
+				else
+					ep0start(dev, UDCCS0_IPR, "defer/IPR");
+			}
+
+			/* expect at least one data or status stage irq */
+			return;
+
+		} else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
+				== (UDCCS0_OPR|UDCCS0_SA))) {
+			unsigned i;
+
+			/* pxa210/250 erratum 131 for B0/B1 says RNE lies.
+			 * still observed on a pxa255 a0.
+			 */
+			DBG(DBG_VERBOSE, "e131\n");
+			nuke(ep, -EPROTO);
+
+			/* read SETUP data, but don't trust it too much */
+			for (i = 0; i < 8; i++)
+				u.raw [i] = (u8) UDDR0;
+			if ((u.r.bRequestType & USB_RECIP_MASK)
+					> USB_RECIP_OTHER)
+				goto stall;
+			if (u.word [0] == 0 && u.word [1] == 0)
+				goto stall;
+			goto got_setup;
+		} else {
+			/* some random early IRQ:
+			 * - we acked FST
+			 * - IPR cleared
+			 * - OPR got set, without SA (likely status stage)
+			 */
+			UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
+		}
+		break;
+	case EP0_IN_DATA_PHASE:			/* GET_DESCRIPTOR etc */
+		if (udccs0 & UDCCS0_OPR) {
+			UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
+			DBG(DBG_VERBOSE, "ep0in premature status\n");
+			if (req)
+				done(ep, req, 0);
+			ep0_idle(dev);
+		} else /* irq was IPR clearing */ {
+			if (req) {
+				/* this IN packet might finish the request */
+				(void) write_ep0_fifo(ep, req);
+			} /* else IN token before response was written */
+		}
+		break;
+	case EP0_OUT_DATA_PHASE:		/* SET_DESCRIPTOR etc */
+		if (udccs0 & UDCCS0_OPR) {
+			if (req) {
+				/* this OUT packet might finish the request */
+				if (read_ep0_fifo(ep, req))
+					done(ep, req, 0);
+				/* else more OUT packets expected */
+			} /* else OUT token before read was issued */
+		} else /* irq was IPR clearing */ {
+			DBG(DBG_VERBOSE, "ep0out premature status\n");
+			if (req)
+				done(ep, req, 0);
+			ep0_idle(dev);
+		}
+		break;
+	case EP0_END_XFER:
+		if (req)
+			done(ep, req, 0);
+		/* ack control-IN status (maybe in-zlp was skipped)
+		 * also appears after some config change events.
+		 */
+		if (udccs0 & UDCCS0_OPR)
+			UDCCS0 = UDCCS0_OPR;
+		ep0_idle(dev);
+		break;
+	case EP0_STALL:
+		UDCCS0 = UDCCS0_FST;
+		break;
+	}
+	USIR0 = USIR0_IR0;
+}
+
+static void handle_ep(struct pxa2xx_ep *ep)
+{
+	struct pxa2xx_request	*req;
+	int			is_in = ep->bEndpointAddress & USB_DIR_IN;
+	int			completed;
+	u32			udccs, tmp;
+
+	do {
+		completed = 0;
+		if (likely (!list_empty(&ep->queue)))
+			req = list_entry(ep->queue.next,
+					struct pxa2xx_request, queue);
+		else
+			req = NULL;
+
+		// TODO check FST handling
+
+		udccs = *ep->reg_udccs;
+		if (unlikely(is_in)) {	/* irq from TPC, SST, or (ISO) TUR */
+			tmp = UDCCS_BI_TUR;
+			if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
+				tmp |= UDCCS_BI_SST;
+			tmp &= udccs;
+			if (likely (tmp))
+				*ep->reg_udccs = tmp;
+			if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
+				completed = write_fifo(ep, req);
+
+		} else {	/* irq from RPC (or for ISO, ROF) */
+			if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
+				tmp = UDCCS_BO_SST | UDCCS_BO_DME;
+			else
+				tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
+			tmp &= udccs;
+			if (likely(tmp))
+				*ep->reg_udccs = tmp;
+
+			/* fifos can hold packets, ready for reading... */
+			if (likely(req)) {
+#ifdef USE_OUT_DMA
+// TODO didn't yet debug out-dma.  this approach assumes
+// the worst about short packets and RPC; it might be better.
+
+				if (likely(ep->dma >= 0)) {
+					if (!(udccs & UDCCS_BO_RSP)) {
+						*ep->reg_udccs = UDCCS_BO_RPC;
+						ep->dma_irqs++;
+						return;
+					}
+				}
+#endif
+				completed = read_fifo(ep, req);
+			} else
+				pio_irq_disable (ep->bEndpointAddress);
+		}
+		ep->pio_irqs++;
+	} while (completed);
+}
+
+/*
+ *	pxa2xx_udc_irq - interrupt handler
+ *
+ * avoid delays in ep0 processing. the control handshaking isn't always
+ * under software control (pxa250c0 and the pxa255 are better), and delays
+ * could cause usb protocol errors.
+ */
+static irqreturn_t
+pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
+{
+	struct pxa2xx_udc	*dev = _dev;
+	int			handled;
+
+	dev->stats.irqs++;
+	HEX_DISPLAY(dev->stats.irqs);
+	do {
+		u32		udccr = UDCCR;
+
+		handled = 0;
+
+		/* SUSpend Interrupt Request */
+		if (unlikely(udccr & UDCCR_SUSIR)) {
+			udc_ack_int_UDCCR(UDCCR_SUSIR);
+			handled = 1;
+			DBG(DBG_VERBOSE, "USB suspend%s\n", is_usb_connected()
+				? "" : "+disconnect");
+
+			if (!is_usb_connected())
+				stop_activity(dev, dev->driver);
+			else if (dev->gadget.speed != USB_SPEED_UNKNOWN
+					&& dev->driver
+					&& dev->driver->suspend)
+				dev->driver->suspend(&dev->gadget);
+			ep0_idle (dev);
+		}
+
+		/* RESume Interrupt Request */
+		if (unlikely(udccr & UDCCR_RESIR)) {
+			udc_ack_int_UDCCR(UDCCR_RESIR);
+			handled = 1;
+			DBG(DBG_VERBOSE, "USB resume\n");
+
+			if (dev->gadget.speed != USB_SPEED_UNKNOWN
+					&& dev->driver
+					&& dev->driver->resume
+					&& is_usb_connected())
+				dev->driver->resume(&dev->gadget);
+		}
+
+		/* ReSeT Interrupt Request - USB reset */
+		if (unlikely(udccr & UDCCR_RSTIR)) {
+			udc_ack_int_UDCCR(UDCCR_RSTIR);
+			handled = 1;
+
+			if ((UDCCR & UDCCR_UDA) == 0) {
+				DBG(DBG_VERBOSE, "USB reset start\n");
+
+				/* reset driver and endpoints,
+				 * in case that's not yet done
+				 */
+				stop_activity (dev, dev->driver);
+
+			} else {
+				DBG(DBG_VERBOSE, "USB reset end\n");
+				dev->gadget.speed = USB_SPEED_FULL;
+				LED_CONNECTED_ON;
+				memset(&dev->stats, 0, sizeof dev->stats);
+				/* driver and endpoints are still reset */
+			}
+
+		} else {
+			u32	usir0 = USIR0 & ~UICR0;
+			u32	usir1 = USIR1 & ~UICR1;
+			int	i;
+
+			if (unlikely (!usir0 && !usir1))
+				continue;
+
+			DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
+
+			/* control traffic */
+			if (usir0 & USIR0_IR0) {
+				dev->ep[0].pio_irqs++;
+				handle_ep0(dev);
+				handled = 1;
+			}
+
+			/* endpoint data transfers */
+			for (i = 0; i < 8; i++) {
+				u32	tmp = 1 << i;
+
+				if (i && (usir0 & tmp)) {
+					handle_ep(&dev->ep[i]);
+					USIR0 |= tmp;
+					handled = 1;
+				}
+				if (usir1 & tmp) {
+					handle_ep(&dev->ep[i+8]);
+					USIR1 |= tmp;
+					handled = 1;
+				}
+			}
+		}
+
+		/* we could also ask for 1 msec SOF (SIR) interrupts */
+
+	} while (handled);
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void nop_release (struct device *dev)
+{
+	DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
+}
+
+/* this uses load-time allocation and initialization (instead of
+ * doing it at run-time) to save code, eliminate fault paths, and
+ * be more obviously correct.
+ */
+static struct pxa2xx_udc memory = {
+	.gadget = {
+		.ops		= &pxa2xx_udc_ops,
+		.ep0		= &memory.ep[0].ep,
+		.name		= driver_name,
+		.dev = {
+			.bus_id		= "gadget",
+			.release	= nop_release,
+		},
+	},
+
+	/* control endpoint */
+	.ep[0] = {
+		.ep = {
+			.name		= ep0name,
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= EP0_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.reg_udccs	= &UDCCS0,
+		.reg_uddr	= &UDDR0,
+	},
+
+	/* first group of endpoints */
+	.ep[1] = {
+		.ep = {
+			.name		= "ep1in-bulk",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 1,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS1,
+		.reg_uddr	= &UDDR1,
+		drcmr (25)
+	},
+	.ep[2] = {
+		.ep = {
+			.name		= "ep2out-bulk",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = 2,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS2,
+		.reg_ubcr	= &UBCR2,
+		.reg_uddr	= &UDDR2,
+		drcmr (26)
+	},
+#ifndef CONFIG_USB_PXA2XX_SMALL
+	.ep[3] = {
+		.ep = {
+			.name		= "ep3in-iso",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 3,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS3,
+		.reg_uddr	= &UDDR3,
+		drcmr (27)
+	},
+	.ep[4] = {
+		.ep = {
+			.name		= "ep4out-iso",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = 4,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS4,
+		.reg_ubcr	= &UBCR4,
+		.reg_uddr	= &UDDR4,
+		drcmr (28)
+	},
+	.ep[5] = {
+		.ep = {
+			.name		= "ep5in-int",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= INT_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= INT_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 5,
+		.bmAttributes	= USB_ENDPOINT_XFER_INT,
+		.reg_udccs	= &UDCCS5,
+		.reg_uddr	= &UDDR5,
+	},
+
+	/* second group of endpoints */
+	.ep[6] = {
+		.ep = {
+			.name		= "ep6in-bulk",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 6,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS6,
+		.reg_uddr	= &UDDR6,
+		drcmr (30)
+	},
+	.ep[7] = {
+		.ep = {
+			.name		= "ep7out-bulk",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = 7,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS7,
+		.reg_ubcr	= &UBCR7,
+		.reg_uddr	= &UDDR7,
+		drcmr (31)
+	},
+	.ep[8] = {
+		.ep = {
+			.name		= "ep8in-iso",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 8,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS8,
+		.reg_uddr	= &UDDR8,
+		drcmr (32)
+	},
+	.ep[9] = {
+		.ep = {
+			.name		= "ep9out-iso",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = 9,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS9,
+		.reg_ubcr	= &UBCR9,
+		.reg_uddr	= &UDDR9,
+		drcmr (33)
+	},
+	.ep[10] = {
+		.ep = {
+			.name		= "ep10in-int",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= INT_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= INT_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 10,
+		.bmAttributes	= USB_ENDPOINT_XFER_INT,
+		.reg_udccs	= &UDCCS10,
+		.reg_uddr	= &UDDR10,
+	},
+
+	/* third group of endpoints */
+	.ep[11] = {
+		.ep = {
+			.name		= "ep11in-bulk",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 11,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS11,
+		.reg_uddr	= &UDDR11,
+		drcmr (35)
+	},
+	.ep[12] = {
+		.ep = {
+			.name		= "ep12out-bulk",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = 12,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS12,
+		.reg_ubcr	= &UBCR12,
+		.reg_uddr	= &UDDR12,
+		drcmr (36)
+	},
+	.ep[13] = {
+		.ep = {
+			.name		= "ep13in-iso",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 13,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS13,
+		.reg_uddr	= &UDDR13,
+		drcmr (37)
+	},
+	.ep[14] = {
+		.ep = {
+			.name		= "ep14out-iso",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = 14,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS14,
+		.reg_ubcr	= &UBCR14,
+		.reg_uddr	= &UDDR14,
+		drcmr (38)
+	},
+	.ep[15] = {
+		.ep = {
+			.name		= "ep15in-int",
+			.ops		= &pxa2xx_ep_ops,
+			.maxpacket	= INT_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= INT_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 15,
+		.bmAttributes	= USB_ENDPOINT_XFER_INT,
+		.reg_udccs	= &UDCCS15,
+		.reg_uddr	= &UDDR15,
+	},
+#endif /* !CONFIG_USB_PXA2XX_SMALL */
+};
+
+#define CP15R0_VENDOR_MASK	0xffffe000
+
+#if	defined(CONFIG_ARCH_PXA)
+#define CP15R0_XSCALE_VALUE	0x69052000	/* intel/arm/xscale */
+
+#elif	defined(CONFIG_ARCH_IXP4XX)
+#define CP15R0_XSCALE_VALUE	0x69054000	/* intel/arm/ixp4xx */
+
+#endif
+
+#define CP15R0_PROD_MASK	0x000003f0
+#define PXA25x			0x00000100	/* and PXA26x */
+#define PXA210			0x00000120
+
+#define CP15R0_REV_MASK		0x0000000f
+
+#define CP15R0_PRODREV_MASK	(CP15R0_PROD_MASK | CP15R0_REV_MASK)
+
+#define PXA255_A0		0x00000106	/* or PXA260_B1 */
+#define PXA250_C0		0x00000105	/* or PXA26x_B0 */
+#define PXA250_B2		0x00000104
+#define PXA250_B1		0x00000103	/* or PXA260_A0 */
+#define PXA250_B0		0x00000102
+#define PXA250_A1		0x00000101
+#define PXA250_A0		0x00000100
+
+#define PXA210_C0		0x00000125
+#define PXA210_B2		0x00000124
+#define PXA210_B1		0x00000123
+#define PXA210_B0		0x00000122
+#define IXP425_A0		0x000001c1
+
+/*
+ * 	probe - binds to the platform device
+ */
+static int __init pxa2xx_udc_probe(struct device *_dev)
+{
+	struct pxa2xx_udc *dev = &memory;
+	int retval, out_dma = 1;
+	u32 chiprev;
+
+	/* insist on Intel/ARM/XScale */
+	asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
+	if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
+		printk(KERN_ERR "%s: not XScale!\n", driver_name);
+		return -ENODEV;
+	}
+
+	/* trigger chiprev-specific logic */
+	switch (chiprev & CP15R0_PRODREV_MASK) {
+#if	defined(CONFIG_ARCH_PXA)
+	case PXA255_A0:
+		dev->has_cfr = 1;
+		break;
+	case PXA250_A0:
+	case PXA250_A1:
+		/* A0/A1 "not released"; ep 13, 15 unusable */
+		/* fall through */
+	case PXA250_B2: case PXA210_B2:
+	case PXA250_B1: case PXA210_B1:
+	case PXA250_B0: case PXA210_B0:
+		out_dma = 0;
+		/* fall through */
+	case PXA250_C0: case PXA210_C0:
+		break;
+#elif	defined(CONFIG_ARCH_IXP4XX)
+	case IXP425_A0:
+		out_dma = 0;
+		break;
+#endif
+	default:
+		out_dma = 0;
+		printk(KERN_ERR "%s: unrecognized processor: %08x\n",
+			driver_name, chiprev);
+		/* iop3xx, ixp4xx, ... */
+		return -ENODEV;
+	}
+
+	pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
+		dev->has_cfr ? "" : " (!cfr)",
+		out_dma ? "" : " (broken dma-out)",
+		SIZE_STR DMASTR
+		);
+
+#ifdef	USE_DMA
+#ifndef	USE_OUT_DMA
+	out_dma = 0;
+#endif
+	/* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
+	if (!out_dma) {
+		DMSG("disabled OUT dma\n");
+		dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
+		dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
+		dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
+	}
+#endif
+
+	/* other non-static parts of init */
+	dev->dev = _dev;
+	dev->mach = _dev->platform_data;
+
+	init_timer(&dev->timer);
+	dev->timer.function = udc_watchdog;
+	dev->timer.data = (unsigned long) dev;
+
+	device_initialize(&dev->gadget.dev);
+	dev->gadget.dev.parent = _dev;
+	dev->gadget.dev.dma_mask = _dev->dma_mask;
+
+	the_controller = dev;
+	dev_set_drvdata(_dev, dev);
+
+	udc_disable(dev);
+	udc_reinit(dev);
+
+	dev->vbus = is_usb_connected();
+
+	/* irq setup after old hardware state is cleaned up */
+	retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
+			SA_INTERRUPT, driver_name, dev);
+	if (retval != 0) {
+		printk(KERN_ERR "%s: can't get irq %i, err %d\n",
+			driver_name, IRQ_USB, retval);
+		return -EBUSY;
+	}
+	dev->got_irq = 1;
+
+#ifdef CONFIG_ARCH_LUBBOCK
+	if (machine_is_lubbock()) {
+		retval = request_irq(LUBBOCK_USB_DISC_IRQ,
+				lubbock_vbus_irq,
+				SA_INTERRUPT | SA_SAMPLE_RANDOM,
+				driver_name, dev);
+		if (retval != 0) {
+			printk(KERN_ERR "%s: can't get irq %i, err %d\n",
+				driver_name, LUBBOCK_USB_DISC_IRQ, retval);
+lubbock_fail0:
+			free_irq(IRQ_USB, dev);
+			return -EBUSY;
+		}
+		retval = request_irq(LUBBOCK_USB_IRQ,
+				lubbock_vbus_irq,
+				SA_INTERRUPT | SA_SAMPLE_RANDOM,
+				driver_name, dev);
+		if (retval != 0) {
+			printk(KERN_ERR "%s: can't get irq %i, err %d\n",
+				driver_name, LUBBOCK_USB_IRQ, retval);
+			free_irq(LUBBOCK_USB_DISC_IRQ, dev);
+			goto lubbock_fail0;
+		}
+#ifdef DEBUG
+		/* with U-Boot (but not BLOB), hex is off by default */
+		HEX_DISPLAY(dev->stats.irqs);
+		LUB_DISC_BLNK_LED &= 0xff;
+#endif
+	}
+#endif
+	create_proc_files();
+
+	return 0;
+}
+static int __exit pxa2xx_udc_remove(struct device *_dev)
+{
+	struct pxa2xx_udc *dev = dev_get_drvdata(_dev);
+
+	udc_disable(dev);
+	remove_proc_files();
+	usb_gadget_unregister_driver(dev->driver);
+
+	if (dev->got_irq) {
+		free_irq(IRQ_USB, dev);
+		dev->got_irq = 0;
+	}
+	if (machine_is_lubbock()) {
+		free_irq(LUBBOCK_USB_DISC_IRQ, dev);
+		free_irq(LUBBOCK_USB_IRQ, dev);
+	}
+	dev_set_drvdata(_dev, NULL);
+	the_controller = NULL;
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+
+/* USB suspend (controlled by the host) and system suspend (controlled
+ * by the PXA) don't necessarily work well together.  If USB is active,
+ * the 48 MHz clock is required; so the system can't enter 33 MHz idle
+ * mode, or any deeper PM saving state.
+ *
+ * For now, we punt and forcibly disconnect from the USB host when PXA
+ * enters any suspend state.  While we're disconnected, we always disable
+ * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states. 
+ * Boards without software pullup control shouldn't use those states.
+ * VBUS IRQs should probably be ignored so that the PXA device just acts
+ * "dead" to USB hosts until system resume.
+ */
+static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level)
+{
+	struct pxa2xx_udc	*udc = dev_get_drvdata(dev);
+
+	if (level == SUSPEND_POWER_DOWN) {
+		if (!udc->mach->udc_command)
+			WARN("USB host won't detect disconnect!\n");
+		pullup(udc, 0);
+	}
+	return 0;
+}
+
+static int pxa2xx_udc_resume(struct device *dev, u32 level)
+{
+	struct pxa2xx_udc	*udc = dev_get_drvdata(dev);
+
+	if (level == RESUME_POWER_ON)
+		pullup(udc, 1);
+	return 0;
+}
+
+#else
+#define	pxa2xx_udc_suspend	NULL
+#define	pxa2xx_udc_resume	NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static struct device_driver udc_driver = {
+	.name		= "pxa2xx-udc",
+	.bus		= &platform_bus_type,
+	.probe		= pxa2xx_udc_probe,
+	.remove		= __exit_p(pxa2xx_udc_remove),
+	.suspend	= pxa2xx_udc_suspend,
+	.resume		= pxa2xx_udc_resume,
+};
+
+static int __init udc_init(void)
+{
+	printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
+	return driver_register(&udc_driver);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
new file mode 100644
index 0000000..1f3a7d9
--- /dev/null
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -0,0 +1,320 @@
+/*
+ * linux/drivers/usb/gadget/pxa2xx_udc.h
+ * Intel PXA2xx on-chip full speed USB device controller
+ *
+ * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
+ * Copyright (C) 2003 David Brownell
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __LINUX_USB_GADGET_PXA2XX_H
+#define __LINUX_USB_GADGET_PXA2XX_H
+
+#include <linux/types.h>
+
+/*-------------------------------------------------------------------------*/
+
+/* pxa2xx has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
+#define UFNRH_SIR	(1 << 7)	/* SOF interrupt request */
+#define UFNRH_SIM	(1 << 6)	/* SOF interrupt mask */
+#define UFNRH_IPE14	(1 << 5)	/* ISO packet error, ep14 */
+#define UFNRH_IPE9	(1 << 4)	/* ISO packet error, ep9 */
+#define UFNRH_IPE4	(1 << 3)	/* ISO packet error, ep4 */
+
+/* pxa255 has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
+#define	UDCCFR		UDC_RES2	/* UDC Control Function Register */
+#define UDCCFR_AREN	(1 << 7)	/* ACK response enable (now) */
+#define UDCCFR_ACM	(1 << 2)	/* ACK control mode (wait for AREN) */
+
+/* latest pxa255 errata define new "must be one" bits in UDCCFR */
+#define	UDCCFR_MB1	(0xff & ~(UDCCFR_AREN|UDCCFR_ACM))
+
+/*-------------------------------------------------------------------------*/
+
+struct pxa2xx_udc;
+
+struct pxa2xx_ep {
+	struct usb_ep				ep;
+	struct pxa2xx_udc			*dev;
+
+	const struct usb_endpoint_descriptor	*desc;
+	struct list_head			queue;
+	unsigned long				pio_irqs;
+	unsigned long				dma_irqs;
+	short					dma; 
+
+	unsigned short				fifo_size;
+	u8					bEndpointAddress;
+	u8					bmAttributes;
+
+	unsigned				stopped : 1;
+	unsigned				dma_fixup : 1;
+							 
+	/* UDCCS = UDC Control/Status for this EP
+	 * UBCR = UDC Byte Count Remaining (contents of OUT fifo)
+	 * UDDR = UDC Endpoint Data Register (the fifo)
+	 * DRCM = DMA Request Channel Map
+	 */
+	volatile u32				*reg_udccs;
+	volatile u32				*reg_ubcr;
+	volatile u32				*reg_uddr;
+#ifdef USE_DMA
+	volatile u32				*reg_drcmr;
+#define	drcmr(n)  .reg_drcmr = & DRCMR ## n ,
+#else
+#define	drcmr(n)  
+#endif
+};
+
+struct pxa2xx_request {
+	struct usb_request			req;
+	struct list_head			queue;
+};
+
+enum ep0_state { 
+	EP0_IDLE,
+	EP0_IN_DATA_PHASE,
+	EP0_OUT_DATA_PHASE,
+	EP0_END_XFER,
+	EP0_STALL,
+};
+
+#define EP0_FIFO_SIZE	((unsigned)16)
+#define BULK_FIFO_SIZE	((unsigned)64)
+#define ISO_FIFO_SIZE	((unsigned)256)
+#define INT_FIFO_SIZE	((unsigned)8)
+
+struct udc_stats {
+	struct ep0stats {
+		unsigned long		ops;
+		unsigned long		bytes;
+	} read, write;
+	unsigned long			irqs;
+};
+
+#ifdef CONFIG_USB_PXA2XX_SMALL
+/* when memory's tight, SMALL config saves code+data.  */
+#undef	USE_DMA
+#define	PXA_UDC_NUM_ENDPOINTS	3
+#endif
+
+#ifndef	PXA_UDC_NUM_ENDPOINTS
+#define	PXA_UDC_NUM_ENDPOINTS	16
+#endif
+
+struct pxa2xx_udc {
+	struct usb_gadget			gadget;
+	struct usb_gadget_driver		*driver;
+
+	enum ep0_state				ep0state;
+	struct udc_stats			stats;
+	unsigned				got_irq : 1,
+						vbus : 1,
+						pullup : 1,
+						has_cfr : 1,
+						req_pending : 1,
+						req_std : 1,
+						req_config : 1;
+
+#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
+	struct timer_list			timer;
+
+	struct device				*dev;
+	struct pxa2xx_udc_mach_info		*mach;
+	u64					dma_mask;
+	struct pxa2xx_ep			ep [PXA_UDC_NUM_ENDPOINTS];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_LUBBOCK
+#include <asm/arch/lubbock.h>
+/* lubbock can also report usb connect/disconnect irqs */
+
+#ifdef DEBUG
+#define HEX_DISPLAY(n)	if (machine_is_lubbock()) { LUB_HEXLED = (n); }
+#endif
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* LEDs are only for debug */
+#ifndef HEX_DISPLAY
+#define HEX_DISPLAY(n)		do {} while(0)
+#endif
+
+#ifdef DEBUG
+#include <asm/leds.h>
+
+#define LED_CONNECTED_ON	leds_event(led_green_on)
+#define LED_CONNECTED_OFF	do { \
+					leds_event(led_green_off); \
+					HEX_DISPLAY(0); \
+				} while(0)
+#endif
+
+#ifndef LED_CONNECTED_ON
+#define LED_CONNECTED_ON	do {} while(0)
+#define LED_CONNECTED_OFF	do {} while(0)
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static struct pxa2xx_udc *the_controller;
+
+/* one GPIO should be used to detect host disconnect */
+static inline int is_usb_connected(void)
+{
+	if (!the_controller->mach->udc_is_connected)
+		return 1;
+	return the_controller->mach->udc_is_connected();
+}
+
+/* one GPIO should force the host to see this device (or not) */
+static inline void make_usb_disappear(void)
+{
+	if (!the_controller->mach->udc_command)
+		return;
+	the_controller->mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
+}
+
+static inline void let_usb_appear(void)
+{
+	if (!the_controller->mach->udc_command)
+		return;
+	the_controller->mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Debugging support vanishes in non-debug builds.  DBG_NORMAL should be
+ * mostly silent during normal use/testing, with no timing side-effects.
+ */
+#define DBG_NORMAL	1	/* error paths, device state transitions */
+#define DBG_VERBOSE	2	/* add some success path trace info */
+#define DBG_NOISY	3	/* ... even more: request level */
+#define DBG_VERY_NOISY	4	/* ... even more: packet level */
+
+#ifdef DEBUG
+
+static const char *state_name[] = {
+	"EP0_IDLE",
+	"EP0_IN_DATA_PHASE", "EP0_OUT_DATA_PHASE",
+	"EP0_END_XFER", "EP0_STALL"
+};
+
+#define DMSG(stuff...) printk(KERN_DEBUG "udc: " stuff)
+
+#ifdef VERBOSE
+#    define UDC_DEBUG DBG_VERBOSE
+#else
+#    define UDC_DEBUG DBG_NORMAL
+#endif
+
+static void __attribute__ ((__unused__))
+dump_udccr(const char *label)
+{
+	u32	udccr = UDCCR;
+	DMSG("%s %02X =%s%s%s%s%s%s%s%s\n",
+		label, udccr,
+		(udccr & UDCCR_REM) ? " rem" : "",
+		(udccr & UDCCR_RSTIR) ? " rstir" : "",
+		(udccr & UDCCR_SRM) ? " srm" : "",
+		(udccr & UDCCR_SUSIR) ? " susir" : "",
+		(udccr & UDCCR_RESIR) ? " resir" : "",
+		(udccr & UDCCR_RSM) ? " rsm" : "",
+		(udccr & UDCCR_UDA) ? " uda" : "",
+		(udccr & UDCCR_UDE) ? " ude" : "");
+}
+
+static void __attribute__ ((__unused__))
+dump_udccs0(const char *label)
+{
+	u32		udccs0 = UDCCS0;
+
+	DMSG("%s %s %02X =%s%s%s%s%s%s%s%s\n",
+		label, state_name[the_controller->ep0state], udccs0,
+		(udccs0 & UDCCS0_SA) ? " sa" : "",
+		(udccs0 & UDCCS0_RNE) ? " rne" : "",
+		(udccs0 & UDCCS0_FST) ? " fst" : "",
+		(udccs0 & UDCCS0_SST) ? " sst" : "",
+		(udccs0 & UDCCS0_DRWF) ? " dwrf" : "",
+		(udccs0 & UDCCS0_FTF) ? " ftf" : "",
+		(udccs0 & UDCCS0_IPR) ? " ipr" : "",
+		(udccs0 & UDCCS0_OPR) ? " opr" : "");
+}
+
+static void __attribute__ ((__unused__))
+dump_state(struct pxa2xx_udc *dev)
+{
+	u32		tmp;
+	unsigned	i;
+
+	DMSG("%s %s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
+		is_usb_connected() ? "host " : "disconnected",
+		state_name[dev->ep0state],
+		UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
+	dump_udccr("udccr");
+	if (dev->has_cfr) {
+		tmp = UDCCFR;
+		DMSG("udccfr %02X =%s%s\n", tmp,
+			(tmp & UDCCFR_AREN) ? " aren" : "",
+			(tmp & UDCCFR_ACM) ? " acm" : "");
+	}
+
+	if (!dev->driver) {
+		DMSG("no gadget driver bound\n");
+		return;
+	} else
+		DMSG("ep0 driver '%s'\n", dev->driver->driver.name);
+
+	if (!is_usb_connected())
+		return;
+
+	dump_udccs0 ("udccs0");
+	DMSG("ep0 IN %lu/%lu, OUT %lu/%lu\n",
+		dev->stats.write.bytes, dev->stats.write.ops,
+		dev->stats.read.bytes, dev->stats.read.ops);
+
+	for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		if (dev->ep [i].desc == 0)
+			continue;
+		DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs);
+	}
+}
+
+#else
+
+#define DMSG(stuff...)		do{}while(0)
+
+#define	dump_udccr(x)	do{}while(0)
+#define	dump_udccs0(x)	do{}while(0)
+#define	dump_state(x)	do{}while(0)
+
+#define UDC_DEBUG ((unsigned)0)
+
+#endif
+
+#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
+
+#define WARN(stuff...) printk(KERN_WARNING "udc: " stuff)
+#define INFO(stuff...) printk(KERN_INFO "udc: " stuff)
+
+
+#endif /* __LINUX_USB_GADGET_PXA2XX_H */
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
new file mode 100644
index 0000000..6c51978
--- /dev/null
+++ b/drivers/usb/gadget/rndis.c
@@ -0,0 +1,1428 @@
+/* 
+ * RNDIS MSG parser
+ * 
+ * Version:     $Id: rndis.c,v 1.19 2004/03/25 21:33:46 robert Exp $
+ * 
+ * Authors:	Benedikt Spranger, Pengutronix
+ * 		Robert Schwebel, Pengutronix
+ * 
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              version 2, as published by the Free Software Foundation. 
+ * 
+ *		This software was originally developed in conformance with
+ *		Microsoft's Remote NDIS Specification License Agreement.
+ *              
+ * 03/12/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
+ *		Fixed message length bug in init_response
+ * 
+ * 03/25/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
+ * 		Fixed rndis_rm_hdr length bug.
+ *
+ * Copyright (C) 2004 by David Brownell
+ *		updates to merge with Linux 2.6, better match RNDIS spec
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/netdevice.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+
+
+#undef	RNDIS_PM
+#undef	VERBOSE
+
+#include "rndis.h"
+
+
+/* The driver for your USB chip needs to support ep0 OUT to work with
+ * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
+ *
+ * Windows hosts need an INF file like Documentation/usb/linux.inf
+ * and will be happier if you provide the host_addr module parameter.
+ */
+
+#if 0
+#define DEBUG(str,args...) do { \
+	if (rndis_debug) \
+		printk(KERN_DEBUG str , ## args ); \
+	} while (0)
+static int rndis_debug = 0;
+
+module_param (rndis_debug, bool, 0);
+MODULE_PARM_DESC (rndis_debug, "enable debugging");
+
+#else
+
+#define rndis_debug		0
+#define DEBUG(str,args...)	do{}while(0)
+#endif
+
+#define RNDIS_MAX_CONFIGS	1
+
+
+static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS];
+
+/* Driver Version */
+static const __le32 rndis_driver_version = __constant_cpu_to_le32 (1);
+
+/* Function Prototypes */
+static int rndis_init_response (int configNr, rndis_init_msg_type *buf);
+static int rndis_query_response (int configNr, rndis_query_msg_type *buf);
+static int rndis_set_response (int configNr, rndis_set_msg_type *buf);
+static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf);
+static int rndis_keepalive_response (int configNr, 
+				     rndis_keepalive_msg_type *buf);
+
+static rndis_resp_t *rndis_add_response (int configNr, u32 length);
+
+
+/* NDIS Functions */
+static int gen_ndis_query_resp (int configNr, u32 OID, rndis_resp_t *r)
+{
+	int 			retval = -ENOTSUPP;
+	u32 			length = 0;
+	__le32			*tmp;
+	int			i, count;
+	rndis_query_cmplt_type	*resp;
+
+	if (!r) return -ENOMEM;
+	resp = (rndis_query_cmplt_type *) r->buf;
+
+	if (!resp) return -ENOMEM;
+	
+	switch (OID) {
+
+	/* general oids (table 4-1) */
+
+	/* mandatory */
+	case OID_GEN_SUPPORTED_LIST:
+		DEBUG ("%s: OID_GEN_SUPPORTED_LIST\n", __FUNCTION__);
+		length = sizeof (oid_supported_list);
+		count  = length / sizeof (u32);
+		tmp = (__le32 *) ((u8 *)resp + 24);
+		for (i = 0; i < count; i++)
+			tmp[i] = cpu_to_le32 (oid_supported_list[i]);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_GEN_HARDWARE_STATUS:
+		DEBUG("%s: OID_GEN_HARDWARE_STATUS\n", __FUNCTION__);
+		length = 4;
+		/* Bogus question! 
+		 * Hardware must be ready to receive high level protocols.
+		 * BTW: 
+		 * reddite ergo quae sunt Caesaris Caesari
+		 * et quae sunt Dei Deo!
+		 */
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_GEN_MEDIA_SUPPORTED:
+		DEBUG("%s: OID_GEN_MEDIA_SUPPORTED\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = cpu_to_le32 (
+					rndis_per_dev_params [configNr].medium);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_GEN_MEDIA_IN_USE:
+		DEBUG("%s: OID_GEN_MEDIA_IN_USE\n", __FUNCTION__);
+		length = 4;
+		/* one medium, one transport... (maybe you do it better) */
+		*((__le32 *) resp + 6) = cpu_to_le32 (
+					rndis_per_dev_params [configNr].medium);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_GEN_MAXIMUM_FRAME_SIZE:
+		DEBUG("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].dev) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr].dev->mtu);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_GEN_LINK_SPEED:
+		DEBUG("%s: OID_GEN_LINK_SPEED\n", __FUNCTION__);
+		length = 4;
+		if (rndis_per_dev_params [configNr].media_state
+			== NDIS_MEDIA_STATE_DISCONNECTED)
+		    *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		else
+		    *((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr].speed);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_TRANSMIT_BLOCK_SIZE:
+		DEBUG("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].dev) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_GEN_RECEIVE_BLOCK_SIZE:
+		DEBUG("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].dev) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_GEN_VENDOR_ID:
+		DEBUG("%s: OID_GEN_VENDOR_ID\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = cpu_to_le32 (
+			rndis_per_dev_params [configNr].vendorID);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_GEN_VENDOR_DESCRIPTION:
+		DEBUG("%s: OID_GEN_VENDOR_DESCRIPTION\n", __FUNCTION__);
+		length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+		memcpy ((u8 *) resp + 24, 
+			rndis_per_dev_params [configNr].vendorDescr, length);
+		retval = 0;
+		break;
+
+	case OID_GEN_VENDOR_DRIVER_VERSION:
+		DEBUG("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __FUNCTION__);
+		length = 4;
+		/* Created as LE */
+		*((__le32 *) resp + 6) = rndis_driver_version;
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_CURRENT_PACKET_FILTER:
+		DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = cpu_to_le32 (
+					rndis_per_dev_params[configNr].filter);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MAXIMUM_TOTAL_SIZE:
+		DEBUG("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32(
+					RNDIS_MAX_TOTAL_SIZE);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MEDIA_CONNECT_STATUS:
+		DEBUG("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = cpu_to_le32 (
+					rndis_per_dev_params [configNr]
+						.media_state);
+		retval = 0;
+		break;
+
+	case OID_GEN_PHYSICAL_MEDIUM:
+		DEBUG("%s: OID_GEN_PHYSICAL_MEDIUM\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+
+	/* The RNDIS specification is incomplete/wrong.   Some versions
+	 * of MS-Windows expect OIDs that aren't specified there.  Other
+	 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
+	 */
+	case OID_GEN_MAC_OPTIONS:		/* from WinME */
+		DEBUG("%s: OID_GEN_MAC_OPTIONS\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32(
+			  NDIS_MAC_OPTION_RECEIVE_SERIALIZED
+			| NDIS_MAC_OPTION_FULL_DUPLEX);
+		retval = 0;
+		break;
+
+	/* statistics OIDs (table 4-2) */
+
+	/* mandatory */
+	case OID_GEN_XMIT_OK:
+		DEBUG("%s: OID_GEN_XMIT_OK\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+			    rndis_per_dev_params [configNr].stats->tx_packets - 
+			    rndis_per_dev_params [configNr].stats->tx_errors -
+			    rndis_per_dev_params [configNr].stats->tx_dropped);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_RCV_OK:
+		DEBUG("%s: OID_GEN_RCV_OK\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+			    rndis_per_dev_params [configNr].stats->rx_packets - 
+			    rndis_per_dev_params [configNr].stats->rx_errors -
+			    rndis_per_dev_params [configNr].stats->rx_dropped);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_GEN_XMIT_ERROR:
+		DEBUG("%s: OID_GEN_XMIT_ERROR\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->tx_errors);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_GEN_RCV_ERROR:
+		DEBUG("%s: OID_GEN_RCV_ERROR\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->rx_errors);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_GEN_RCV_NO_BUFFER:
+		DEBUG("%s: OID_GEN_RCV_NO_BUFFER\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->rx_dropped);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+
+#ifdef	RNDIS_OPTIONAL_STATS
+	case OID_GEN_DIRECTED_BYTES_XMIT:
+		DEBUG("%s: OID_GEN_DIRECTED_BYTES_XMIT\n", __FUNCTION__);
+		/* 
+		 * Aunt Tilly's size of shoes
+		 * minus antarctica count of penguins
+		 * divided by weight of Alpha Centauri
+		 */
+		if (rndis_per_dev_params [configNr].stats) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				(rndis_per_dev_params [configNr]
+					.stats->tx_packets - 
+				 rndis_per_dev_params [configNr]
+					 .stats->tx_errors -
+				 rndis_per_dev_params [configNr]
+					 .stats->tx_dropped)
+				* 123);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_DIRECTED_FRAMES_XMIT:
+		DEBUG("%s: OID_GEN_DIRECTED_FRAMES_XMIT\n", __FUNCTION__);
+		/* dito */
+		if (rndis_per_dev_params [configNr].stats) {
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				(rndis_per_dev_params [configNr]
+					.stats->tx_packets - 
+				 rndis_per_dev_params [configNr]
+					 .stats->tx_errors -
+				 rndis_per_dev_params [configNr]
+					 .stats->tx_dropped)
+				/ 123);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_MULTICAST_BYTES_XMIT:
+		DEBUG("%s: OID_GEN_MULTICAST_BYTES_XMIT\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->multicast*1234);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_MULTICAST_FRAMES_XMIT:
+		DEBUG("%s: OID_GEN_MULTICAST_FRAMES_XMIT\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->multicast);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_BROADCAST_BYTES_XMIT:
+		DEBUG("%s: OID_GEN_BROADCAST_BYTES_XMIT\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->tx_packets/42*255);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_BROADCAST_FRAMES_XMIT:
+		DEBUG("%s: OID_GEN_BROADCAST_FRAMES_XMIT\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->tx_packets/42);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_DIRECTED_BYTES_RCV:
+		DEBUG("%s: OID_GEN_DIRECTED_BYTES_RCV\n", __FUNCTION__);
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+		
+	case OID_GEN_DIRECTED_FRAMES_RCV:
+		DEBUG("%s: OID_GEN_DIRECTED_FRAMES_RCV\n", __FUNCTION__);
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+		
+	case OID_GEN_MULTICAST_BYTES_RCV:
+		DEBUG("%s: OID_GEN_MULTICAST_BYTES_RCV\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->multicast * 1111);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_MULTICAST_FRAMES_RCV:
+		DEBUG("%s: OID_GEN_MULTICAST_FRAMES_RCV\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->multicast);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_BROADCAST_BYTES_RCV:
+		DEBUG("%s: OID_GEN_BROADCAST_BYTES_RCV\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->rx_packets/42*255);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_BROADCAST_FRAMES_RCV:
+		DEBUG("%s: OID_GEN_BROADCAST_FRAMES_RCV\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->rx_packets/42);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_RCV_CRC_ERROR:
+		DEBUG("%s: OID_GEN_RCV_CRC_ERROR\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats) {
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->rx_crc_errors);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	case OID_GEN_TRANSMIT_QUEUE_LENGTH:
+		DEBUG("%s: OID_GEN_TRANSMIT_QUEUE_LENGTH\n", __FUNCTION__);
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+	/* ieee802.3 OIDs (table 4-3) */
+
+	/* mandatory */
+	case OID_802_3_PERMANENT_ADDRESS:
+		DEBUG("%s: OID_802_3_PERMANENT_ADDRESS\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].dev) {
+			length = ETH_ALEN;
+			memcpy ((u8 *) resp + 24,
+				rndis_per_dev_params [configNr].host_mac,
+				length);
+			retval = 0;
+		} else {
+			*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_802_3_CURRENT_ADDRESS:
+		DEBUG("%s: OID_802_3_CURRENT_ADDRESS\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].dev) {
+			length = ETH_ALEN;
+			memcpy ((u8 *) resp + 24,
+				rndis_per_dev_params [configNr].host_mac,
+				length);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_802_3_MULTICAST_LIST:
+		DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
+		length = 4;
+		/* Multicast base address only */
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0xE0000000);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_802_3_MAXIMUM_LIST_SIZE:
+		DEBUG("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __FUNCTION__);
+		 length = 4;
+		/* Multicast base address only */
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (1);
+		retval = 0;
+		break;
+		
+	case OID_802_3_MAC_OPTIONS:
+		DEBUG("%s: OID_802_3_MAC_OPTIONS\n", __FUNCTION__);
+		break;
+
+	/* ieee802.3 statistics OIDs (table 4-4) */
+
+	/* mandatory */
+	case OID_802_3_RCV_ERROR_ALIGNMENT:
+		DEBUG("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __FUNCTION__);
+		if (rndis_per_dev_params [configNr].stats)
+		{
+			length = 4;
+			*((__le32 *) resp + 6) = cpu_to_le32 (
+				rndis_per_dev_params [configNr]
+					.stats->rx_frame_errors);
+			retval = 0;
+		}
+		break;
+		
+	/* mandatory */
+	case OID_802_3_XMIT_ONE_COLLISION:
+		DEBUG("%s: OID_802_3_XMIT_ONE_COLLISION\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+		
+	/* mandatory */
+	case OID_802_3_XMIT_MORE_COLLISIONS:
+		DEBUG("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __FUNCTION__);
+		length = 4;
+		*((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
+		retval = 0;
+		break;
+		
+#ifdef	RNDIS_OPTIONAL_STATS
+	case OID_802_3_XMIT_DEFERRED:
+		DEBUG("%s: OID_802_3_XMIT_DEFERRED\n", __FUNCTION__);
+		/* TODO */
+		break;
+		
+	case OID_802_3_XMIT_MAX_COLLISIONS:
+		DEBUG("%s: OID_802_3_XMIT_MAX_COLLISIONS\n", __FUNCTION__);
+		/* TODO */
+		break;
+		
+	case OID_802_3_RCV_OVERRUN:
+		DEBUG("%s: OID_802_3_RCV_OVERRUN\n", __FUNCTION__);
+		/* TODO */
+		break;
+		
+	case OID_802_3_XMIT_UNDERRUN:
+		DEBUG("%s: OID_802_3_XMIT_UNDERRUN\n", __FUNCTION__);
+		/* TODO */
+		break;
+		
+	case OID_802_3_XMIT_HEARTBEAT_FAILURE:
+		DEBUG("%s: OID_802_3_XMIT_HEARTBEAT_FAILURE\n", __FUNCTION__);
+		/* TODO */
+		break;
+		
+	case OID_802_3_XMIT_TIMES_CRS_LOST:
+		DEBUG("%s: OID_802_3_XMIT_TIMES_CRS_LOST\n", __FUNCTION__);
+		/* TODO */
+		break;
+		
+	case OID_802_3_XMIT_LATE_COLLISIONS:
+		DEBUG("%s: OID_802_3_XMIT_LATE_COLLISIONS\n", __FUNCTION__);
+		/* TODO */
+		break;		
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+#ifdef	RNDIS_PM
+	/* power management OIDs (table 4-5) */
+	case OID_PNP_CAPABILITIES:
+		DEBUG("%s: OID_PNP_CAPABILITIES\n", __FUNCTION__);
+
+		/* just PM, and remote wakeup on link status change
+		 * (not magic packet or pattern match)
+		 */
+		length = sizeof (struct NDIS_PNP_CAPABILITIES);
+		memset (resp, 0, length);
+		{
+			struct NDIS_PNP_CAPABILITIES *caps = (void *) resp;
+
+			caps->Flags = NDIS_DEVICE_WAKE_UP_ENABLE;
+			caps->WakeUpCapabilities.MinLinkChangeWakeUp 
+				 = NdisDeviceStateD3;
+
+			/* FIXME then use usb_gadget_wakeup(), and
+			 * set USB_CONFIG_ATT_WAKEUP in config desc
+			 */
+		}
+		retval = 0;
+		break;
+	case OID_PNP_QUERY_POWER:
+		DEBUG("%s: OID_PNP_QUERY_POWER\n", __FUNCTION__);
+		/* sure, handle any power state that maps to USB suspend */
+		retval = 0;
+		break;
+#endif
+
+	default:
+		printk (KERN_WARNING "%s: query unknown OID 0x%08X\n", 
+			 __FUNCTION__, OID);
+	}
+	
+	resp->InformationBufferOffset = __constant_cpu_to_le32 (16);
+	resp->InformationBufferLength = cpu_to_le32 (length);
+	resp->MessageLength = cpu_to_le32 (24 + length);
+	r->length = 24 + length;
+	return retval;
+}
+
+static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len, 
+			      rndis_resp_t *r)
+{
+	rndis_set_cmplt_type		*resp;
+	int 				i, retval = -ENOTSUPP;
+	struct rndis_params		*params;
+
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_set_cmplt_type *) r->buf;
+	if (!resp)
+		return -ENOMEM;
+
+	DEBUG("set OID %08x value, len %d:\n", OID, buf_len);
+	for (i = 0; i < buf_len; i += 16) {
+		DEBUG ("%03d: "
+			" %02x %02x %02x %02x"
+			" %02x %02x %02x %02x"
+			" %02x %02x %02x %02x"
+			" %02x %02x %02x %02x"
+			"\n",
+			i,
+			buf[i], buf [i+1],
+				buf[i+2], buf[i+3],
+			buf[i+4], buf [i+5],
+				buf[i+6], buf[i+7],
+			buf[i+8], buf [i+9],
+				buf[i+10], buf[i+11],
+			buf[i+12], buf [i+13],
+				buf[i+14], buf[i+15]);
+	}
+
+	switch (OID) {
+	case OID_GEN_CURRENT_PACKET_FILTER:
+		params = &rndis_per_dev_params [configNr];
+		retval = 0;
+
+		/* FIXME use these NDIS_PACKET_TYPE_* bitflags to
+		 * filter packets in hard_start_xmit()
+		 * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in:
+		 *	PROMISCUOUS, DIRECTED,
+		 *	MULTICAST, ALL_MULTICAST, BROADCAST
+		 */
+		params->filter = le32_to_cpup((__le32 *)buf);
+		DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
+			__FUNCTION__, params->filter);
+
+		/* this call has a significant side effect:  it's
+		 * what makes the packet flow start and stop, like
+		 * activating the CDC Ethernet altsetting.
+		 */
+		if (params->filter) {
+			params->state = RNDIS_DATA_INITIALIZED;
+			netif_carrier_on(params->dev);
+			if (netif_running(params->dev))
+				netif_wake_queue (params->dev);
+		} else {
+			params->state = RNDIS_INITIALIZED;
+			netif_carrier_off (params->dev);
+			netif_stop_queue (params->dev);
+		}
+		break;
+		
+	case OID_802_3_MULTICAST_LIST:
+		/* I think we can ignore this */		
+		DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
+		retval = 0;
+		break;
+#if 0
+	case OID_GEN_RNDIS_CONFIG_PARAMETER:
+		{
+		struct rndis_config_parameter	*param;
+		param = (struct rndis_config_parameter *) buf;
+		DEBUG("%s: OID_GEN_RNDIS_CONFIG_PARAMETER '%*s'\n",
+			__FUNCTION__,
+			min(cpu_to_le32(param->ParameterNameLength),80),
+			buf + param->ParameterNameOffset);
+		retval = 0;
+		}
+		break;
+#endif
+
+#ifdef	RNDIS_PM
+	case OID_PNP_SET_POWER:
+		DEBUG ("OID_PNP_SET_POWER\n");
+		/* sure, handle any power state that maps to USB suspend */
+		retval = 0;
+		break;
+
+	case OID_PNP_ENABLE_WAKE_UP:
+		/* always-connected ... */
+		DEBUG ("OID_PNP_ENABLE_WAKE_UP\n");
+		retval = 0;
+		break;
+
+	// no PM resume patterns supported (specified where?)
+	// so OID_PNP_{ADD,REMOVE}_WAKE_UP_PATTERN always fails
+#endif
+
+	default:
+		printk (KERN_WARNING "%s: set unknown OID 0x%08X, size %d\n", 
+			 __FUNCTION__, OID, buf_len);
+	}
+	
+	return retval;
+}
+
+/* 
+ * Response Functions 
+ */
+
+static int rndis_init_response (int configNr, rndis_init_msg_type *buf)
+{
+	rndis_init_cmplt_type	*resp; 
+	rndis_resp_t            *r;
+	
+	if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
+	
+	r = rndis_add_response (configNr, sizeof (rndis_init_cmplt_type));
+	
+	if (!r) return -ENOMEM;
+	
+	resp = (rndis_init_cmplt_type *) r->buf;
+	
+	if (!resp) return -ENOMEM;
+	
+	resp->MessageType = __constant_cpu_to_le32 (
+			REMOTE_NDIS_INITIALIZE_CMPLT);
+	resp->MessageLength = __constant_cpu_to_le32 (52);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
+	resp->MajorVersion = __constant_cpu_to_le32 (RNDIS_MAJOR_VERSION);
+	resp->MinorVersion = __constant_cpu_to_le32 (RNDIS_MINOR_VERSION);
+	resp->DeviceFlags = __constant_cpu_to_le32 (RNDIS_DF_CONNECTIONLESS);
+	resp->Medium = __constant_cpu_to_le32 (RNDIS_MEDIUM_802_3);
+	resp->MaxPacketsPerTransfer = __constant_cpu_to_le32 (1);
+	resp->MaxTransferSize = cpu_to_le32 (
+		  rndis_per_dev_params [configNr].dev->mtu
+		+ sizeof (struct ethhdr)
+		+ sizeof (struct rndis_packet_msg_type)
+		+ 22);
+	resp->PacketAlignmentFactor = __constant_cpu_to_le32 (0);
+	resp->AFListOffset = __constant_cpu_to_le32 (0);
+	resp->AFListSize = __constant_cpu_to_le32 (0);
+	
+	if (rndis_per_dev_params [configNr].ack)
+	    rndis_per_dev_params [configNr].ack (
+	    		rndis_per_dev_params [configNr].dev);
+	
+	return 0;
+}
+
+static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
+{
+	rndis_query_cmplt_type *resp;
+	rndis_resp_t            *r;
+	
+	// DEBUG("%s: OID = %08X\n", __FUNCTION__, cpu_to_le32(buf->OID));
+	if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
+	
+	/* 
+	 * we need more memory: 
+	 * oid_supported_list is the largest answer 
+	 */
+	r = rndis_add_response (configNr, sizeof (oid_supported_list));
+	
+	if (!r) return -ENOMEM;
+	resp = (rndis_query_cmplt_type *) r->buf;
+	
+	if (!resp) return -ENOMEM;
+	
+	resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_QUERY_CMPLT);
+	resp->MessageLength = __constant_cpu_to_le32 (24);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	
+	if (gen_ndis_query_resp (configNr, le32_to_cpu (buf->OID), r)) {
+		/* OID not supported */
+		resp->Status = __constant_cpu_to_le32 (
+				RNDIS_STATUS_NOT_SUPPORTED);
+		resp->InformationBufferLength = __constant_cpu_to_le32 (0);
+		resp->InformationBufferOffset = __constant_cpu_to_le32 (0);
+	} else
+		resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
+	
+	if (rndis_per_dev_params [configNr].ack)
+	    rndis_per_dev_params [configNr].ack (
+	    		rndis_per_dev_params [configNr].dev);
+	return 0;
+}
+
+static int rndis_set_response (int configNr, rndis_set_msg_type *buf)
+{
+	u32			BufLength, BufOffset;
+	rndis_set_cmplt_type	*resp;
+	rndis_resp_t		*r;
+	
+	r = rndis_add_response (configNr, sizeof (rndis_set_cmplt_type));
+	
+	if (!r) return -ENOMEM;
+	resp = (rndis_set_cmplt_type *) r->buf;
+	if (!resp) return -ENOMEM;
+
+	BufLength = le32_to_cpu (buf->InformationBufferLength);
+	BufOffset = le32_to_cpu (buf->InformationBufferOffset);
+
+#ifdef	VERBOSE
+	DEBUG("%s: Length: %d\n", __FUNCTION__, BufLength);
+	DEBUG("%s: Offset: %d\n", __FUNCTION__, BufOffset);
+	DEBUG("%s: InfoBuffer: ", __FUNCTION__);
+	
+	for (i = 0; i < BufLength; i++) {
+		DEBUG ("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
+	}
+	
+	DEBUG ("\n");
+#endif
+	
+	resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_SET_CMPLT);
+	resp->MessageLength = __constant_cpu_to_le32 (16);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	if (gen_ndis_set_resp (configNr, le32_to_cpu (buf->OID), 
+			       ((u8 *) buf) + 8 + BufOffset, BufLength, r))
+	    resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_NOT_SUPPORTED);
+	else resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
+	
+	if (rndis_per_dev_params [configNr].ack)
+	    rndis_per_dev_params [configNr].ack (
+	    		rndis_per_dev_params [configNr].dev);
+	
+	return 0;
+}
+
+static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf)
+{
+	rndis_reset_cmplt_type	*resp;
+	rndis_resp_t		*r;
+	
+	r = rndis_add_response (configNr, sizeof (rndis_reset_cmplt_type));
+	
+	if (!r) return -ENOMEM;
+	resp = (rndis_reset_cmplt_type *) r->buf;
+	if (!resp) return -ENOMEM;
+	
+	resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_RESET_CMPLT);
+	resp->MessageLength = __constant_cpu_to_le32 (16);
+	resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
+	/* resent information */
+	resp->AddressingReset = __constant_cpu_to_le32 (1);
+	
+	if (rndis_per_dev_params [configNr].ack)
+	    rndis_per_dev_params [configNr].ack (
+	    		rndis_per_dev_params [configNr].dev);
+
+	return 0;
+}
+
+static int rndis_keepalive_response (int configNr,
+				     rndis_keepalive_msg_type *buf)
+{
+	rndis_keepalive_cmplt_type	*resp;
+	rndis_resp_t			*r;
+
+	/* host "should" check only in RNDIS_DATA_INITIALIZED state */
+
+	r = rndis_add_response (configNr, sizeof (rndis_keepalive_cmplt_type));
+	resp = (rndis_keepalive_cmplt_type *) r->buf;
+	if (!resp) return -ENOMEM;
+		
+	resp->MessageType = __constant_cpu_to_le32 (
+			REMOTE_NDIS_KEEPALIVE_CMPLT);
+	resp->MessageLength = __constant_cpu_to_le32 (16);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
+	
+	if (rndis_per_dev_params [configNr].ack)
+	    rndis_per_dev_params [configNr].ack (
+	    		rndis_per_dev_params [configNr].dev);
+	
+	return 0;
+}
+
+
+/* 
+ * Device to Host Comunication 
+ */
+static int rndis_indicate_status_msg (int configNr, u32 status)
+{
+	rndis_indicate_status_msg_type	*resp;	
+	rndis_resp_t			*r;
+	
+	if (rndis_per_dev_params [configNr].state == RNDIS_UNINITIALIZED)
+	    return -ENOTSUPP;
+	
+	r = rndis_add_response (configNr, 
+				sizeof (rndis_indicate_status_msg_type));
+	if (!r) return -ENOMEM;
+	
+	resp = (rndis_indicate_status_msg_type *) r->buf;
+	if (!resp) return -ENOMEM;
+	
+	resp->MessageType = __constant_cpu_to_le32 (
+			REMOTE_NDIS_INDICATE_STATUS_MSG);
+	resp->MessageLength = __constant_cpu_to_le32 (20);
+	resp->Status = cpu_to_le32 (status);
+	resp->StatusBufferLength = __constant_cpu_to_le32 (0);
+	resp->StatusBufferOffset = __constant_cpu_to_le32 (0);
+	
+	if (rndis_per_dev_params [configNr].ack) 
+	    rndis_per_dev_params [configNr].ack (
+	    		rndis_per_dev_params [configNr].dev);
+	return 0;
+}
+
+int rndis_signal_connect (int configNr)
+{
+	rndis_per_dev_params [configNr].media_state
+			= NDIS_MEDIA_STATE_CONNECTED;
+	return rndis_indicate_status_msg (configNr, 
+					  RNDIS_STATUS_MEDIA_CONNECT);
+}
+
+int rndis_signal_disconnect (int configNr)
+{
+	rndis_per_dev_params [configNr].media_state
+			= NDIS_MEDIA_STATE_DISCONNECTED;
+	return rndis_indicate_status_msg (configNr,
+					  RNDIS_STATUS_MEDIA_DISCONNECT);
+}
+
+void rndis_set_host_mac (int configNr, const u8 *addr)
+{
+	rndis_per_dev_params [configNr].host_mac = addr;
+}
+
+/* 
+ * Message Parser 
+ */
+int rndis_msg_parser (u8 configNr, u8 *buf)
+{
+	u32 MsgType, MsgLength;
+	__le32 *tmp;
+	struct rndis_params		*params;
+	
+	if (!buf)
+		return -ENOMEM;
+	
+	tmp = (__le32 *) buf; 
+	MsgType   = le32_to_cpup(tmp++);
+	MsgLength = le32_to_cpup(tmp++);
+	
+	if (configNr >= RNDIS_MAX_CONFIGS)
+		return -ENOTSUPP;
+	params = &rndis_per_dev_params [configNr];
+	
+	/* For USB: responses may take up to 10 seconds */
+	switch (MsgType)
+	{
+	case REMOTE_NDIS_INITIALIZE_MSG:
+		DEBUG("%s: REMOTE_NDIS_INITIALIZE_MSG\n", 
+			__FUNCTION__ );
+		params->state = RNDIS_INITIALIZED;
+		return  rndis_init_response (configNr,
+					     (rndis_init_msg_type *) buf);
+		
+	case REMOTE_NDIS_HALT_MSG:
+		DEBUG("%s: REMOTE_NDIS_HALT_MSG\n",
+			__FUNCTION__ );
+		params->state = RNDIS_UNINITIALIZED;
+		if (params->dev) {
+			netif_carrier_off (params->dev);
+			netif_stop_queue (params->dev);
+		}
+		return 0;
+		
+	case REMOTE_NDIS_QUERY_MSG:
+		return rndis_query_response (configNr, 
+					     (rndis_query_msg_type *) buf);
+		
+	case REMOTE_NDIS_SET_MSG:
+		return rndis_set_response (configNr, 
+					   (rndis_set_msg_type *) buf);
+		
+	case REMOTE_NDIS_RESET_MSG:
+		DEBUG("%s: REMOTE_NDIS_RESET_MSG\n", 
+			__FUNCTION__ );
+		return rndis_reset_response (configNr,
+					     (rndis_reset_msg_type *) buf);
+
+	case REMOTE_NDIS_KEEPALIVE_MSG:
+		/* For USB: host does this every 5 seconds */
+#ifdef	VERBOSE
+		DEBUG("%s: REMOTE_NDIS_KEEPALIVE_MSG\n", 
+			__FUNCTION__ );
+#endif
+		return rndis_keepalive_response (configNr,
+						 (rndis_keepalive_msg_type *) 
+						 buf);
+		
+	default: 
+		/* At least Windows XP emits some undefined RNDIS messages.
+		 * In one case those messages seemed to relate to the host
+		 * suspending itself.
+		 */
+		printk (KERN_WARNING
+			"%s: unknown RNDIS message 0x%08X len %d\n", 
+			__FUNCTION__ , MsgType, MsgLength);
+		{
+			unsigned i;
+			for (i = 0; i < MsgLength; i += 16) {
+				DEBUG ("%03d: "
+					" %02x %02x %02x %02x"
+					" %02x %02x %02x %02x"
+					" %02x %02x %02x %02x"
+					" %02x %02x %02x %02x"
+					"\n",
+					i,
+					buf[i], buf [i+1],
+						buf[i+2], buf[i+3],
+					buf[i+4], buf [i+5],
+						buf[i+6], buf[i+7],
+					buf[i+8], buf [i+9],
+						buf[i+10], buf[i+11],
+					buf[i+12], buf [i+13],
+						buf[i+14], buf[i+15]);
+			}
+		}
+		break;
+	}
+	
+	return -ENOTSUPP;
+}
+
+int rndis_register (int (* rndis_control_ack) (struct net_device *))
+{
+	u8 i;
+	
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+		if (!rndis_per_dev_params [i].used) {
+			rndis_per_dev_params [i].used = 1;
+			rndis_per_dev_params [i].ack = rndis_control_ack;
+			DEBUG("%s: configNr = %d\n", __FUNCTION__, i);
+			return i;
+		}
+	}
+	DEBUG("failed\n");
+	
+	return -1;
+}
+
+void rndis_deregister (int configNr)
+{
+	DEBUG("%s: \n", __FUNCTION__ );
+	
+	if (configNr >= RNDIS_MAX_CONFIGS) return;
+	rndis_per_dev_params [configNr].used = 0;
+	
+	return;
+}
+
+int rndis_set_param_dev (u8 configNr, struct net_device *dev, 
+			 struct net_device_stats *stats)
+{
+	DEBUG("%s:\n", __FUNCTION__ );
+	if (!dev || !stats) return -1;
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+	
+	rndis_per_dev_params [configNr].dev = dev;
+	rndis_per_dev_params [configNr].stats = stats;
+	
+	return 0;
+}
+
+int rndis_set_param_vendor (u8 configNr, u32 vendorID, const char *vendorDescr)
+{
+	DEBUG("%s:\n", __FUNCTION__ );
+	if (!vendorDescr) return -1;
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+	
+	rndis_per_dev_params [configNr].vendorID = vendorID;
+	rndis_per_dev_params [configNr].vendorDescr = vendorDescr;
+	
+	return 0;
+}
+
+int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed)
+{
+	DEBUG("%s:\n", __FUNCTION__ );
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+	
+	rndis_per_dev_params [configNr].medium = medium;
+	rndis_per_dev_params [configNr].speed = speed;
+	
+	return 0;
+}
+
+void rndis_add_hdr (struct sk_buff *skb)
+{
+	struct rndis_packet_msg_type	*header;
+
+	if (!skb)
+		return;
+	header = (void *) skb_push (skb, sizeof *header);
+	memset (header, 0, sizeof *header);
+	header->MessageType = __constant_cpu_to_le32 (1);
+	header->MessageLength = cpu_to_le32(skb->len);
+	header->DataOffset = __constant_cpu_to_le32 (36);
+	header->OOBDataOffset = cpu_to_le32(skb->len - 44);
+}
+
+void rndis_free_response (int configNr, u8 *buf)
+{
+	rndis_resp_t		*r;
+	struct list_head	*act, *tmp;
+	
+	list_for_each_safe (act, tmp, 
+			    &(rndis_per_dev_params [configNr].resp_queue))
+	{
+		r = list_entry (act, rndis_resp_t, list);
+		if (r && r->buf == buf) {
+			list_del (&r->list);
+			kfree (r);
+		}
+	}
+}
+
+u8 *rndis_get_next_response (int configNr, u32 *length)
+{
+	rndis_resp_t		*r;
+	struct list_head 	*act, *tmp;
+	
+	if (!length) return NULL;
+	
+	list_for_each_safe (act, tmp, 
+			    &(rndis_per_dev_params [configNr].resp_queue))
+	{
+		r = list_entry (act, rndis_resp_t, list);
+		if (!r->send) {
+			r->send = 1;
+			*length = r->length;
+			return r->buf;
+		}
+	}
+	
+	return NULL;
+}
+
+static rndis_resp_t *rndis_add_response (int configNr, u32 length)
+{
+	rndis_resp_t	*r;
+	
+	r = kmalloc (sizeof (rndis_resp_t) + length, GFP_ATOMIC);
+	if (!r) return NULL;
+	
+	r->buf = (u8 *) (r + 1);
+	r->length = length;
+	r->send = 0;
+	
+	list_add_tail (&r->list, 
+		       &(rndis_per_dev_params [configNr].resp_queue));
+	return r;
+}
+
+int rndis_rm_hdr (u8 *buf, u32 *length)
+{
+	u32 i, messageLen, dataOffset;
+	__le32 *tmp;
+	
+	tmp = (__le32 *) buf; 
+
+	if (!buf || !length) return -1;
+	if (le32_to_cpup(tmp++) != 1) return -1;
+	
+	messageLen = le32_to_cpup(tmp++);
+	dataOffset = le32_to_cpup(tmp++) + 8;
+
+	if (messageLen < dataOffset || messageLen > *length) return -1;
+	
+	for (i = dataOffset; i < messageLen; i++)
+		buf [i - dataOffset] = buf [i];
+		
+	*length = messageLen - dataOffset;
+	
+	return 0;
+}
+
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+
+static int rndis_proc_read (char *page, char **start, off_t off, int count, int *eof, 
+		     void *data)
+{
+	char *out = page;
+	int len;
+	rndis_params *param = (rndis_params *) data;
+	
+	out += snprintf (out, count, 
+			 "Config Nr. %d\n"
+			 "used      : %s\n"
+			 "state     : %s\n"
+			 "medium    : 0x%08X\n"
+			 "speed     : %d\n"
+			 "cable     : %s\n"
+			 "vendor ID : 0x%08X\n"
+			 "vendor    : %s\n", 
+			 param->confignr, (param->used) ? "y" : "n", 
+			 ({ char *s = "?";
+			 switch (param->state) {
+			 case RNDIS_UNINITIALIZED:
+				s = "RNDIS_UNINITIALIZED"; break;
+			 case RNDIS_INITIALIZED:
+				s = "RNDIS_INITIALIZED"; break;
+			 case RNDIS_DATA_INITIALIZED:
+				s = "RNDIS_DATA_INITIALIZED"; break;
+			}; s; }),
+			 param->medium, 
+			 (param->media_state) ? 0 : param->speed*100, 
+			 (param->media_state) ? "disconnected" : "connected",
+			 param->vendorID, param->vendorDescr);      
+	
+	len = out - page;
+	len -= off;
+	
+	if (len < count) {
+		*eof = 1;
+		if (len <= 0)
+			return 0;
+	} else
+		len = count;
+	
+	*start = page + off;
+	return len;
+}
+
+static int rndis_proc_write (struct file *file, const char __user *buffer, 
+		      unsigned long count, void *data)
+{
+	rndis_params *p = data;
+	u32 speed = 0;
+	int i, fl_speed = 0;
+	
+	for (i = 0; i < count; i++) {
+		char c;
+		if (get_user(c, buffer))
+			return -EFAULT;
+		switch (c) {
+		case '0':
+		case '1':
+		case '2':
+		case '3':
+		case '4':
+		case '5':
+		case '6':
+		case '7':
+		case '8':
+		case '9':
+			fl_speed = 1;
+			speed = speed*10 + c - '0';
+			break;
+		case 'C':
+		case 'c':
+			rndis_signal_connect (p->confignr);
+			break;
+		case 'D':
+		case 'd':
+			rndis_signal_disconnect(p->confignr);
+			break;
+		default: 
+			if (fl_speed) p->speed = speed;
+			else DEBUG ("%c is not valid\n", c);
+			break;
+		}
+		
+		buffer++;
+	}
+	
+	return count;
+}
+
+#define	NAME_TEMPLATE	"driver/rndis-%03d"
+
+static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
+
+#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+
+int __init rndis_init (void)
+{
+	u8 i;
+
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+		char name [20];
+
+		sprintf (name, NAME_TEMPLATE, i);
+		if (!(rndis_connect_state [i]
+				= create_proc_entry (name, 0660, NULL))) 
+		{
+			DEBUG ("%s :remove entries", __FUNCTION__);
+			while (i) {
+				sprintf (name, NAME_TEMPLATE, --i);
+				remove_proc_entry (name, NULL);
+			}
+			DEBUG ("\n");
+			return -EIO;
+		}
+
+		rndis_connect_state [i]->nlink = 1;
+		rndis_connect_state [i]->write_proc = rndis_proc_write;
+		rndis_connect_state [i]->read_proc = rndis_proc_read;
+		rndis_connect_state [i]->data = (void *)
+				(rndis_per_dev_params + i);
+#endif
+		rndis_per_dev_params [i].confignr = i;
+		rndis_per_dev_params [i].used = 0;
+		rndis_per_dev_params [i].state = RNDIS_UNINITIALIZED;
+		rndis_per_dev_params [i].media_state
+				= NDIS_MEDIA_STATE_DISCONNECTED;
+		INIT_LIST_HEAD (&(rndis_per_dev_params [i].resp_queue));
+	}
+	
+	return 0;
+}
+
+void rndis_exit (void)
+{
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+	u8 i;
+	char name [20];
+	
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+		sprintf (name, NAME_TEMPLATE, i);
+		remove_proc_entry (name, NULL);
+	}
+#endif
+}
+
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
new file mode 100644
index 0000000..8225018
--- /dev/null
+++ b/drivers/usb/gadget/rndis.h
@@ -0,0 +1,348 @@
+/* 
+ * RNDIS	Definitions for Remote NDIS
+ * 
+ * Version:	$Id: rndis.h,v 1.15 2004/03/25 21:33:46 robert Exp $
+ * 
+ * Authors:	Benedikt Spranger, Pengutronix
+ * 		Robert Schwebel, Pengutronix
+ * 
+ * 		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		version 2, as published by the Free Software Foundation. 
+ * 
+ *		This software was originally developed in conformance with
+ *		Microsoft's Remote NDIS Specification License Agreement.
+ */
+
+#ifndef _LINUX_RNDIS_H
+#define _LINUX_RNDIS_H
+
+#include "ndis.h"
+
+#define RNDIS_MAXIMUM_FRAME_SIZE	1518
+#define RNDIS_MAX_TOTAL_SIZE		1558
+
+/* Remote NDIS Versions */
+#define RNDIS_MAJOR_VERSION		1
+#define RNDIS_MINOR_VERSION		0
+
+/* Status Values */
+#define RNDIS_STATUS_SUCCESS		0x00000000U	/* Success           */
+#define RNDIS_STATUS_FAILURE		0xC0000001U	/* Unspecified error */
+#define RNDIS_STATUS_INVALID_DATA	0xC0010015U	/* Invalid data      */
+#define RNDIS_STATUS_NOT_SUPPORTED	0xC00000BBU	/* Unsupported request */
+#define RNDIS_STATUS_MEDIA_CONNECT	0x4001000BU	/* Device connected  */
+#define RNDIS_STATUS_MEDIA_DISCONNECT	0x4001000CU	/* Device disconnected */
+/* For all not specified status messages:
+ * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx 
+ */
+
+/* Message Set for Connectionless (802.3) Devices */
+#define REMOTE_NDIS_INITIALIZE_MSG	0x00000002U	/* Initialize device */
+#define REMOTE_NDIS_HALT_MSG		0x00000003U
+#define REMOTE_NDIS_QUERY_MSG		0x00000004U
+#define REMOTE_NDIS_SET_MSG		0x00000005U
+#define REMOTE_NDIS_RESET_MSG		0x00000006U
+#define REMOTE_NDIS_INDICATE_STATUS_MSG	0x00000007U
+#define REMOTE_NDIS_KEEPALIVE_MSG	0x00000008U
+
+/* Message completion */
+#define REMOTE_NDIS_INITIALIZE_CMPLT	0x80000002U
+#define REMOTE_NDIS_QUERY_CMPLT		0x80000004U
+#define REMOTE_NDIS_SET_CMPLT		0x80000005U
+#define REMOTE_NDIS_RESET_CMPLT		0x80000006U
+#define REMOTE_NDIS_KEEPALIVE_CMPLT	0x80000008U
+
+/* Device Flags */
+#define RNDIS_DF_CONNECTIONLESS		0x00000001U
+#define RNDIS_DF_CONNECTION_ORIENTED	0x00000002U
+
+#define RNDIS_MEDIUM_802_3		0x00000000U
+
+/* from drivers/net/sk98lin/h/skgepnmi.h */
+#define OID_PNP_CAPABILITIES			0xFD010100
+#define OID_PNP_SET_POWER			0xFD010101
+#define OID_PNP_QUERY_POWER			0xFD010102
+#define OID_PNP_ADD_WAKE_UP_PATTERN		0xFD010103
+#define OID_PNP_REMOVE_WAKE_UP_PATTERN		0xFD010104
+#define OID_PNP_ENABLE_WAKE_UP			0xFD010106
+
+
+/* supported OIDs */
+static const u32 oid_supported_list [] = 
+{
+	/* the general stuff */
+	OID_GEN_SUPPORTED_LIST,
+	OID_GEN_HARDWARE_STATUS,
+	OID_GEN_MEDIA_SUPPORTED,
+	OID_GEN_MEDIA_IN_USE,
+	OID_GEN_MAXIMUM_FRAME_SIZE,
+	OID_GEN_LINK_SPEED,
+	OID_GEN_TRANSMIT_BLOCK_SIZE,
+	OID_GEN_RECEIVE_BLOCK_SIZE,
+	OID_GEN_VENDOR_ID,
+	OID_GEN_VENDOR_DESCRIPTION,
+	OID_GEN_VENDOR_DRIVER_VERSION,
+	OID_GEN_CURRENT_PACKET_FILTER,
+	OID_GEN_MAXIMUM_TOTAL_SIZE,
+	OID_GEN_MEDIA_CONNECT_STATUS,
+	OID_GEN_PHYSICAL_MEDIUM,
+#if 0
+	OID_GEN_RNDIS_CONFIG_PARAMETER,
+#endif
+	
+	/* the statistical stuff */
+	OID_GEN_XMIT_OK,
+	OID_GEN_RCV_OK,
+	OID_GEN_XMIT_ERROR,
+	OID_GEN_RCV_ERROR,
+	OID_GEN_RCV_NO_BUFFER,
+#ifdef	RNDIS_OPTIONAL_STATS
+	OID_GEN_DIRECTED_BYTES_XMIT,
+	OID_GEN_DIRECTED_FRAMES_XMIT,
+	OID_GEN_MULTICAST_BYTES_XMIT,
+	OID_GEN_MULTICAST_FRAMES_XMIT,
+	OID_GEN_BROADCAST_BYTES_XMIT,
+	OID_GEN_BROADCAST_FRAMES_XMIT,
+	OID_GEN_DIRECTED_BYTES_RCV,
+	OID_GEN_DIRECTED_FRAMES_RCV,
+	OID_GEN_MULTICAST_BYTES_RCV,
+	OID_GEN_MULTICAST_FRAMES_RCV,
+	OID_GEN_BROADCAST_BYTES_RCV,
+	OID_GEN_BROADCAST_FRAMES_RCV,
+	OID_GEN_RCV_CRC_ERROR,
+	OID_GEN_TRANSMIT_QUEUE_LENGTH,
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+    	/* mandatory 802.3 */
+	/* the general stuff */
+	OID_802_3_PERMANENT_ADDRESS,
+	OID_802_3_CURRENT_ADDRESS,
+	OID_802_3_MULTICAST_LIST,
+	OID_802_3_MAC_OPTIONS,
+	OID_802_3_MAXIMUM_LIST_SIZE,
+	
+	/* the statistical stuff */
+	OID_802_3_RCV_ERROR_ALIGNMENT,
+	OID_802_3_XMIT_ONE_COLLISION,
+	OID_802_3_XMIT_MORE_COLLISIONS,
+#ifdef	RNDIS_OPTIONAL_STATS
+	OID_802_3_XMIT_DEFERRED,
+	OID_802_3_XMIT_MAX_COLLISIONS,
+	OID_802_3_RCV_OVERRUN,
+	OID_802_3_XMIT_UNDERRUN,
+	OID_802_3_XMIT_HEARTBEAT_FAILURE,
+	OID_802_3_XMIT_TIMES_CRS_LOST,
+	OID_802_3_XMIT_LATE_COLLISIONS,
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+#ifdef	RNDIS_PM
+	/* PM and wakeup are mandatory for USB: */
+
+	/* power management */
+	OID_PNP_CAPABILITIES,
+	OID_PNP_QUERY_POWER,
+	OID_PNP_SET_POWER,
+
+	/* wake up host */
+	OID_PNP_ENABLE_WAKE_UP,
+	OID_PNP_ADD_WAKE_UP_PATTERN,
+	OID_PNP_REMOVE_WAKE_UP_PATTERN,
+#endif
+};
+
+
+typedef struct rndis_init_msg_type 
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	MajorVersion;
+	__le32	MinorVersion;
+	__le32	MaxTransferSize;
+} rndis_init_msg_type;
+
+typedef struct rndis_init_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+	__le32	MajorVersion;
+	__le32	MinorVersion;
+	__le32	DeviceFlags;
+	__le32	Medium;
+	__le32	MaxPacketsPerTransfer;
+	__le32	MaxTransferSize;
+	__le32	PacketAlignmentFactor;
+	__le32	AFListOffset;
+	__le32	AFListSize;
+} rndis_init_cmplt_type;
+
+typedef struct rndis_halt_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+} rndis_halt_msg_type;
+
+typedef struct rndis_query_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	OID;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+	__le32	DeviceVcHandle;
+} rndis_query_msg_type;
+
+typedef struct rndis_query_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+} rndis_query_cmplt_type;
+
+typedef struct rndis_set_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	OID;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+	__le32	DeviceVcHandle;
+} rndis_set_msg_type;
+
+typedef struct rndis_set_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+} rndis_set_cmplt_type;
+
+typedef struct rndis_reset_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Reserved;
+} rndis_reset_msg_type;
+
+typedef struct rndis_reset_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Status;
+	__le32	AddressingReset;
+} rndis_reset_cmplt_type;
+
+typedef struct rndis_indicate_status_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Status;
+	__le32	StatusBufferLength;
+	__le32	StatusBufferOffset;
+} rndis_indicate_status_msg_type;
+
+typedef struct rndis_keepalive_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+} rndis_keepalive_msg_type;
+
+typedef struct rndis_keepalive_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+} rndis_keepalive_cmplt_type;
+
+struct rndis_packet_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	DataOffset;
+	__le32	DataLength;
+	__le32	OOBDataOffset;
+	__le32	OOBDataLength;
+	__le32	NumOOBDataElements;
+	__le32	PerPacketInfoOffset;
+	__le32	PerPacketInfoLength;
+	__le32	VcHandle;
+	__le32	Reserved;
+};
+
+struct rndis_config_parameter
+{
+	__le32	ParameterNameOffset;
+	__le32	ParameterNameLength;
+	__le32	ParameterType;
+	__le32	ParameterValueOffset;
+	__le32	ParameterValueLength;
+};
+
+/* implementation specific */
+enum rndis_state
+{
+	RNDIS_UNINITIALIZED,
+	RNDIS_INITIALIZED,
+	RNDIS_DATA_INITIALIZED,
+};
+
+typedef struct rndis_resp_t
+{
+	struct list_head	list;
+	u8			*buf;
+	u32			length;
+	int			send;
+} rndis_resp_t;
+
+typedef struct rndis_params
+{
+	u8			confignr;
+	int			used;
+	enum rndis_state	state;
+	u32			filter;
+	u32			medium;
+	u32			speed;
+	u32			media_state;
+	const u8		*host_mac;
+	struct net_device 	*dev;
+	struct net_device_stats *stats;
+	u32			vendorID;
+	const char		*vendorDescr;
+	int 			(*ack) (struct net_device *);
+	struct list_head	resp_queue;
+} rndis_params;
+
+/* RNDIS Message parser and other useless functions */
+int  rndis_msg_parser (u8 configNr, u8 *buf);
+int  rndis_register (int (*rndis_control_ack) (struct net_device *));
+void rndis_deregister (int configNr);
+int  rndis_set_param_dev (u8 configNr, struct net_device *dev,
+			 struct net_device_stats *stats);
+int  rndis_set_param_vendor (u8 configNr, u32 vendorID, 
+			    const char *vendorDescr);
+int  rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
+void rndis_add_hdr (struct sk_buff *skb);
+int  rndis_rm_hdr (u8 *buf, u32 *length);
+u8   *rndis_get_next_response (int configNr, u32 *length);
+void rndis_free_response (int configNr, u8 *buf);
+
+int  rndis_signal_connect (int configNr);
+int  rndis_signal_disconnect (int configNr);
+int  rndis_state (int configNr);
+extern void rndis_set_host_mac (int configNr, const u8 *addr);
+
+int __init rndis_init (void);
+void rndis_exit (void);
+
+#endif  /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
new file mode 100644
index 0000000..2af3f78
--- /dev/null
+++ b/drivers/usb/gadget/serial.c
@@ -0,0 +1,2436 @@
+/*
+ * g_serial.c -- USB gadget serial driver
+ *
+ * Copyright 2003 (C) Al Borchers (alborchers@steinerpoint.com)
+ *
+ * This code is based in part on the Gadget Zero driver, which
+ * is Copyright (C) 2003 by David Brownell, all rights reserved.
+ *
+ * This code also borrows from usbserial.c, which is
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/wait.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_cdc.h>
+#include <linux/usb_gadget.h>
+
+#include "gadget_chips.h"
+
+
+/* Wait Cond */
+
+#define __wait_cond_interruptible(wq, condition, lock, flags, ret)	\
+do {									\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			spin_unlock_irqrestore(lock, flags);		\
+			schedule();					\
+			spin_lock_irqsave(lock, flags);			\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+	
+#define wait_cond_interruptible(wq, condition, lock, flags)		\
+({									\
+	int __ret = 0;							\
+	if (!(condition))						\
+		__wait_cond_interruptible(wq, condition, lock, flags,	\
+						__ret);			\
+	__ret;								\
+})
+
+#define __wait_cond_interruptible_timeout(wq, condition, lock, flags, 	\
+						timeout, ret)		\
+do {									\
+	signed long __timeout = timeout;				\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (__timeout == 0)					\
+			break;						\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			spin_unlock_irqrestore(lock, flags);		\
+			__timeout = schedule_timeout(__timeout);	\
+			spin_lock_irqsave(lock, flags);			\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+	
+#define wait_cond_interruptible_timeout(wq, condition, lock, flags,	\
+						timeout)		\
+({									\
+	int __ret = 0;							\
+	if (!(condition))						\
+		__wait_cond_interruptible_timeout(wq, condition, lock,	\
+						flags, timeout, __ret);	\
+	__ret;								\
+})
+
+
+/* Defines */
+
+#define GS_VERSION_STR			"v2.0"
+#define GS_VERSION_NUM			0x0200
+
+#define GS_LONG_NAME			"Gadget Serial"
+#define GS_SHORT_NAME			"g_serial"
+
+#define GS_MAJOR			127
+#define GS_MINOR_START			0
+
+#define GS_NUM_PORTS			16
+
+#define GS_NUM_CONFIGS			1
+#define GS_NO_CONFIG_ID			0
+#define GS_BULK_CONFIG_ID		1
+#define GS_ACM_CONFIG_ID		2
+
+#define GS_MAX_NUM_INTERFACES		2
+#define GS_BULK_INTERFACE_ID		0
+#define GS_CONTROL_INTERFACE_ID		0
+#define GS_DATA_INTERFACE_ID		1
+
+#define GS_MAX_DESC_LEN			256
+
+#define GS_DEFAULT_READ_Q_SIZE		32
+#define GS_DEFAULT_WRITE_Q_SIZE		32
+
+#define GS_DEFAULT_WRITE_BUF_SIZE	8192
+#define GS_TMP_BUF_SIZE			8192
+
+#define GS_CLOSE_TIMEOUT		15
+
+#define GS_DEFAULT_USE_ACM		0
+
+#define GS_DEFAULT_DTE_RATE		9600
+#define GS_DEFAULT_DATA_BITS		8
+#define GS_DEFAULT_PARITY		USB_CDC_NO_PARITY
+#define GS_DEFAULT_CHAR_FORMAT		USB_CDC_1_STOP_BITS
+
+/* select highspeed/fullspeed, hiding highspeed if not configured */
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+#define GS_SPEED_SELECT(is_hs,hs,fs) ((is_hs) ? (hs) : (fs))
+#else
+#define GS_SPEED_SELECT(is_hs,hs,fs) (fs)
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+
+/* debug settings */
+#ifdef GS_DEBUG
+static int debug = 1;
+
+#define gs_debug(format, arg...) \
+	do { if (debug) printk(KERN_DEBUG format, ## arg); } while(0)
+#define gs_debug_level(level, format, arg...) \
+	do { if (debug>=level) printk(KERN_DEBUG format, ## arg); } while(0)
+
+#else
+
+#define gs_debug(format, arg...) \
+	do { } while(0)
+#define gs_debug_level(level, format, arg...) \
+	do { } while(0)
+
+#endif /* GS_DEBUG */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+#define GS_VENDOR_ID			0x0525	/* NetChip */
+#define GS_PRODUCT_ID			0xa4a6	/* Linux-USB Serial Gadget */
+#define GS_CDC_PRODUCT_ID		0xa4a7	/* ... as CDC-ACM */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		8
+
+
+/* Structures */
+
+struct gs_dev;
+
+/* circular buffer */
+struct gs_buf {
+	unsigned int		buf_size;
+	char			*buf_buf;
+	char			*buf_get;
+	char			*buf_put;
+};
+
+/* list of requests */
+struct gs_req_entry {
+	struct list_head	re_entry;
+	struct usb_request	*re_req;
+};
+
+/* the port structure holds info for each port, one for each minor number */
+struct gs_port {
+	struct gs_dev 		*port_dev;	/* pointer to device struct */
+	struct tty_struct	*port_tty;	/* pointer to tty struct */
+	spinlock_t		port_lock;
+	int 			port_num;
+	int			port_open_count;
+	int			port_in_use;	/* open/close in progress */
+	wait_queue_head_t	port_write_wait;/* waiting to write */
+	struct gs_buf		*port_write_buf;
+	struct usb_cdc_line_coding	port_line_coding;
+};
+
+/* the device structure holds info for the USB device */
+struct gs_dev {
+	struct usb_gadget	*dev_gadget;	/* gadget device pointer */
+	spinlock_t		dev_lock;	/* lock for set/reset config */
+	int			dev_config;	/* configuration number */
+	struct usb_ep		*dev_notify_ep;	/* address of notify endpoint */
+	struct usb_ep		*dev_in_ep;	/* address of in endpoint */
+	struct usb_ep		*dev_out_ep;	/* address of out endpoint */
+	struct usb_endpoint_descriptor		/* desciptor of notify ep */
+				*dev_notify_ep_desc;
+	struct usb_endpoint_descriptor		/* descriptor of in endpoint */
+				*dev_in_ep_desc;
+	struct usb_endpoint_descriptor		/* descriptor of out endpoint */
+				*dev_out_ep_desc;
+	struct usb_request	*dev_ctrl_req;	/* control request */
+	struct list_head	dev_req_list;	/* list of write requests */
+	int			dev_sched_port;	/* round robin port scheduled */
+	struct gs_port		*dev_port[GS_NUM_PORTS]; /* the ports */
+};
+
+
+/* Functions */
+
+/* module */
+static int __init gs_module_init(void);
+static void __exit gs_module_exit(void);
+
+/* tty driver */
+static int gs_open(struct tty_struct *tty, struct file *file);
+static void gs_close(struct tty_struct *tty, struct file *file);
+static int gs_write(struct tty_struct *tty, 
+	const unsigned char *buf, int count);
+static void gs_put_char(struct tty_struct *tty, unsigned char ch);
+static void gs_flush_chars(struct tty_struct *tty);
+static int gs_write_room(struct tty_struct *tty);
+static int gs_chars_in_buffer(struct tty_struct *tty);
+static void gs_throttle(struct tty_struct * tty);
+static void gs_unthrottle(struct tty_struct * tty);
+static void gs_break(struct tty_struct *tty, int break_state);
+static int  gs_ioctl(struct tty_struct *tty, struct file *file,
+	unsigned int cmd, unsigned long arg);
+static void gs_set_termios(struct tty_struct *tty, struct termios *old);
+
+static int gs_send(struct gs_dev *dev);
+static int gs_send_packet(struct gs_dev *dev, char *packet,
+	unsigned int size);
+static int gs_recv_packet(struct gs_dev *dev, char *packet,
+	unsigned int size);
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req);
+static void gs_write_complete(struct usb_ep *ep, struct usb_request *req);
+
+/* gadget driver */
+static int gs_bind(struct usb_gadget *gadget);
+static void gs_unbind(struct usb_gadget *gadget);
+static int gs_setup(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl);
+static int gs_setup_standard(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl);
+static int gs_setup_class(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl);
+static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req);
+static void gs_disconnect(struct usb_gadget *gadget);
+static int gs_set_config(struct gs_dev *dev, unsigned config);
+static void gs_reset_config(struct gs_dev *dev);
+static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
+		u8 type, unsigned int index, int is_otg);
+
+static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
+	int kmalloc_flags);
+static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
+
+static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
+	int kmalloc_flags);
+static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
+
+static int gs_alloc_ports(struct gs_dev *dev, int kmalloc_flags);
+static void gs_free_ports(struct gs_dev *dev);
+
+/* circular buffer */
+static struct gs_buf *gs_buf_alloc(unsigned int size, int kmalloc_flags);
+static void gs_buf_free(struct gs_buf *gb);
+static void gs_buf_clear(struct gs_buf *gb);
+static unsigned int gs_buf_data_avail(struct gs_buf *gb);
+static unsigned int gs_buf_space_avail(struct gs_buf *gb);
+static unsigned int gs_buf_put(struct gs_buf *gb, const char *buf,
+	unsigned int count);
+static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
+	unsigned int count);
+
+/* external functions */
+extern int net2280_set_fifo_mode(struct usb_gadget *gadget, int mode);
+
+
+/* Globals */
+
+static struct gs_dev *gs_device;
+
+static const char *EP_IN_NAME;
+static const char *EP_OUT_NAME;
+static const char *EP_NOTIFY_NAME;
+
+static struct semaphore	gs_open_close_sem[GS_NUM_PORTS];
+
+static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
+static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
+
+static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
+
+static unsigned int use_acm = GS_DEFAULT_USE_ACM;
+
+
+/* tty driver struct */
+static struct tty_operations gs_tty_ops = {
+	.open =			gs_open,
+	.close =		gs_close,
+	.write =		gs_write,
+	.put_char =		gs_put_char,
+	.flush_chars =		gs_flush_chars,
+	.write_room =		gs_write_room,
+	.ioctl =		gs_ioctl,
+	.set_termios =		gs_set_termios,
+	.throttle =		gs_throttle,
+	.unthrottle =		gs_unthrottle,
+	.break_ctl =		gs_break,
+	.chars_in_buffer =	gs_chars_in_buffer,
+};
+static struct tty_driver *gs_tty_driver;
+
+/* gadget driver struct */
+static struct usb_gadget_driver gs_gadget_driver = {
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	.speed =		USB_SPEED_HIGH,
+#else
+	.speed =		USB_SPEED_FULL,
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+	.function =		GS_LONG_NAME,
+	.bind =			gs_bind,
+	.unbind =		gs_unbind,
+	.setup =		gs_setup,
+	.disconnect =		gs_disconnect,
+	.driver = {
+		.name =		GS_SHORT_NAME,
+		/* .shutdown = ... */
+		/* .suspend = ...  */
+		/* .resume = ...   */
+	},
+};
+
+
+/* USB descriptors */
+
+#define GS_MANUFACTURER_STR_ID	1
+#define GS_PRODUCT_STR_ID	2
+#define GS_SERIAL_STR_ID	3
+#define GS_BULK_CONFIG_STR_ID	4
+#define GS_ACM_CONFIG_STR_ID	5
+#define GS_CONTROL_STR_ID	6
+#define GS_DATA_STR_ID		7
+
+/* static strings, in UTF-8 */
+static char manufacturer[50];
+static struct usb_string gs_strings[] = {
+	{ GS_MANUFACTURER_STR_ID, manufacturer },
+	{ GS_PRODUCT_STR_ID, GS_LONG_NAME },
+	{ GS_SERIAL_STR_ID, "0" },
+	{ GS_BULK_CONFIG_STR_ID, "Gadget Serial Bulk" },
+	{ GS_ACM_CONFIG_STR_ID, "Gadget Serial CDC ACM" },
+	{ GS_CONTROL_STR_ID, "Gadget Serial Control" },
+	{ GS_DATA_STR_ID, "Gadget Serial Data" },
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings gs_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		gs_strings,
+};
+
+static struct usb_device_descriptor gs_device_desc = {
+	.bLength =		USB_DT_DEVICE_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE,
+	.bcdUSB =		__constant_cpu_to_le16(0x0200),
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	.idVendor =		__constant_cpu_to_le16(GS_VENDOR_ID),
+	.idProduct =		__constant_cpu_to_le16(GS_PRODUCT_ID),
+	.iManufacturer =	GS_MANUFACTURER_STR_ID,
+	.iProduct =		GS_PRODUCT_STR_ID,
+	.iSerialNumber =	GS_SERIAL_STR_ID,
+	.bNumConfigurations =	GS_NUM_CONFIGS,
+};
+
+static struct usb_otg_descriptor gs_otg_descriptor = {
+	.bLength =		sizeof(gs_otg_descriptor),
+	.bDescriptorType =	USB_DT_OTG,
+	.bmAttributes =		USB_OTG_SRP,
+};
+
+static struct usb_config_descriptor gs_bulk_config_desc = {
+	.bLength =		USB_DT_CONFIG_SIZE,
+	.bDescriptorType =	USB_DT_CONFIG,
+	/* .wTotalLength computed dynamically */
+	.bNumInterfaces =	1,
+	.bConfigurationValue =	GS_BULK_CONFIG_ID,
+	.iConfiguration =	GS_BULK_CONFIG_STR_ID,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		1,
+};
+
+static struct usb_config_descriptor gs_acm_config_desc = {
+	.bLength =		USB_DT_CONFIG_SIZE,
+	.bDescriptorType =	USB_DT_CONFIG,
+	/* .wTotalLength computed dynamically */
+	.bNumInterfaces =	2,
+	.bConfigurationValue =	GS_ACM_CONFIG_ID,
+	.iConfiguration =	GS_ACM_CONFIG_STR_ID,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		1,
+};
+
+static const struct usb_interface_descriptor gs_bulk_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bInterfaceNumber =	GS_BULK_INTERFACE_ID,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	.iInterface =		GS_DATA_STR_ID,
+};
+
+static const struct usb_interface_descriptor gs_control_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bInterfaceNumber =	GS_CONTROL_INTERFACE_ID,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	.iInterface =		GS_CONTROL_STR_ID,
+};
+
+static const struct usb_interface_descriptor gs_data_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bInterfaceNumber =	GS_DATA_INTERFACE_ID,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	.iInterface =		GS_DATA_STR_ID,
+};
+
+static const struct usb_cdc_header_desc gs_header_desc = {
+	.bLength =		sizeof(gs_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		__constant_cpu_to_le16(0x0110),
+};
+
+static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = {
+	.bLength =  		sizeof(gs_call_mgmt_descriptor),
+	.bDescriptorType = 	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = 	USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities = 	0,
+	.bDataInterface = 	1,	/* index of data interface */
+};
+
+static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
+	.bLength =  		sizeof(gs_acm_descriptor),
+	.bDescriptorType = 	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = 	USB_CDC_ACM_TYPE,
+	.bmCapabilities = 	0,
+};
+
+static const struct usb_cdc_union_desc gs_union_desc = {
+	.bLength =		sizeof(gs_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	.bMasterInterface0 =	0,	/* index of control interface */
+	.bSlaveInterface0 =	1,	/* index of data interface */
+};
+ 
+static struct usb_endpoint_descriptor gs_fullspeed_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor gs_fullspeed_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor gs_fullspeed_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static const struct usb_descriptor_header *gs_bulk_fullspeed_function[] = {
+	(struct usb_descriptor_header *) &gs_otg_descriptor,
+	(struct usb_descriptor_header *) &gs_bulk_interface_desc,
+	(struct usb_descriptor_header *) &gs_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &gs_fullspeed_out_desc,
+	NULL,
+};
+
+static const struct usb_descriptor_header *gs_acm_fullspeed_function[] = {
+	(struct usb_descriptor_header *) &gs_otg_descriptor,
+	(struct usb_descriptor_header *) &gs_control_interface_desc,
+	(struct usb_descriptor_header *) &gs_header_desc,
+	(struct usb_descriptor_header *) &gs_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gs_acm_descriptor,
+	(struct usb_descriptor_header *) &gs_union_desc,
+	(struct usb_descriptor_header *) &gs_fullspeed_notify_desc,
+	(struct usb_descriptor_header *) &gs_data_interface_desc,
+	(struct usb_descriptor_header *) &gs_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &gs_fullspeed_out_desc,
+	NULL,
+};
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+static struct usb_endpoint_descriptor gs_highspeed_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_endpoint_descriptor gs_highspeed_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor gs_highspeed_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_qualifier_descriptor gs_qualifier_desc = {
+	.bLength =		sizeof(struct usb_qualifier_descriptor),
+	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
+	.bcdUSB =		__constant_cpu_to_le16 (0x0200),
+	/* assumes ep0 uses the same value for both speeds ... */
+	.bNumConfigurations =	GS_NUM_CONFIGS,
+};
+
+static const struct usb_descriptor_header *gs_bulk_highspeed_function[] = {
+	(struct usb_descriptor_header *) &gs_otg_descriptor,
+	(struct usb_descriptor_header *) &gs_bulk_interface_desc,
+	(struct usb_descriptor_header *) &gs_highspeed_in_desc,
+	(struct usb_descriptor_header *) &gs_highspeed_out_desc,
+	NULL,
+};
+
+static const struct usb_descriptor_header *gs_acm_highspeed_function[] = {
+	(struct usb_descriptor_header *) &gs_otg_descriptor,
+	(struct usb_descriptor_header *) &gs_control_interface_desc,
+	(struct usb_descriptor_header *) &gs_header_desc,
+	(struct usb_descriptor_header *) &gs_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gs_acm_descriptor,
+	(struct usb_descriptor_header *) &gs_union_desc,
+	(struct usb_descriptor_header *) &gs_highspeed_notify_desc,
+	(struct usb_descriptor_header *) &gs_data_interface_desc,
+	(struct usb_descriptor_header *) &gs_highspeed_in_desc,
+	(struct usb_descriptor_header *) &gs_highspeed_out_desc,
+	NULL,
+};
+
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+
+
+/* Module */
+MODULE_DESCRIPTION(GS_LONG_NAME);
+MODULE_AUTHOR("Al Borchers");
+MODULE_LICENSE("GPL");
+
+#ifdef GS_DEBUG
+module_param(debug, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
+#endif
+
+module_param(read_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
+
+module_param(write_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
+
+module_param(write_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
+
+module_param(use_acm, uint, S_IRUGO);
+MODULE_PARM_DESC(use_acm, "Use CDC ACM, 0=no, 1=yes, default=no");
+
+module_init(gs_module_init);
+module_exit(gs_module_exit);
+
+/*
+*  gs_module_init
+*
+*  Register as a USB gadget driver and a tty driver.
+*/
+static int __init gs_module_init(void)
+{
+	int i;
+	int retval;
+
+	retval = usb_gadget_register_driver(&gs_gadget_driver);
+	if (retval) {
+		printk(KERN_ERR "gs_module_init: cannot register gadget driver, ret=%d\n", retval);
+		return retval;
+	}
+
+	gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
+	if (!gs_tty_driver)
+		return -ENOMEM;
+	gs_tty_driver->owner = THIS_MODULE;
+	gs_tty_driver->driver_name = GS_SHORT_NAME;
+	gs_tty_driver->name = "ttygs";
+	gs_tty_driver->devfs_name = "usb/ttygs/";
+	gs_tty_driver->major = GS_MAJOR;
+	gs_tty_driver->minor_start = GS_MINOR_START;
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
+	gs_tty_driver->init_termios = tty_std_termios;
+	gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	for (i=0; i < GS_NUM_PORTS; i++)
+		sema_init(&gs_open_close_sem[i], 1);
+
+	retval = tty_register_driver(gs_tty_driver);
+	if (retval) {
+		usb_gadget_unregister_driver(&gs_gadget_driver);
+		put_tty_driver(gs_tty_driver);
+		printk(KERN_ERR "gs_module_init: cannot register tty driver, ret=%d\n", retval);
+		return retval;
+	}
+
+	printk(KERN_INFO "gs_module_init: %s %s loaded\n", GS_LONG_NAME, GS_VERSION_STR);
+	return 0;
+}
+
+/*
+* gs_module_exit
+*
+* Unregister as a tty driver and a USB gadget driver.
+*/
+static void __exit gs_module_exit(void)
+{
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	usb_gadget_unregister_driver(&gs_gadget_driver);
+
+	printk(KERN_INFO "gs_module_exit: %s %s unloaded\n", GS_LONG_NAME, GS_VERSION_STR);
+}
+
+/* TTY Driver */
+
+/*
+ * gs_open
+ */
+static int gs_open(struct tty_struct *tty, struct file *file)
+{
+	int port_num;
+	unsigned long flags;
+	struct gs_port *port;
+	struct gs_dev *dev;
+	struct gs_buf *buf;
+	struct semaphore *sem;
+	int ret;
+
+	port_num = tty->index;
+
+	gs_debug("gs_open: (%d,%p,%p)\n", port_num, tty, file);
+
+	if (port_num < 0 || port_num >= GS_NUM_PORTS) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) invalid port number\n",
+			port_num, tty, file);
+		return -ENODEV;
+	}
+
+	dev = gs_device;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL device pointer\n",
+			port_num, tty, file);
+		return -ENODEV;
+	}
+
+	sem = &gs_open_close_sem[port_num];
+	if (down_interruptible(sem)) {
+		printk(KERN_ERR
+		"gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
+			port_num, tty, file);
+		return -ERESTARTSYS;
+	}
+
+	spin_lock_irqsave(&dev->dev_lock, flags);
+
+	if (dev->dev_config == GS_NO_CONFIG_ID) {
+		printk(KERN_ERR
+			"gs_open: (%d,%p,%p) device is not connected\n",
+			port_num, tty, file);
+		ret = -ENODEV;
+		goto exit_unlock_dev;
+	}
+
+	port = dev->dev_port[port_num];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL port pointer\n",
+			port_num, tty, file);
+		ret = -ENODEV;
+		goto exit_unlock_dev;
+	}
+
+	spin_lock(&port->port_lock);
+	spin_unlock(&dev->dev_lock);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (1)\n",
+			port_num, tty, file);
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	if (port->port_open_count > 0) {
+		++port->port_open_count;
+		gs_debug("gs_open: (%d,%p,%p) already open\n",
+			port_num, tty, file);
+		ret = 0;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = NULL;
+
+	/* mark port as in use, we can drop port lock and sleep if necessary */
+	port->port_in_use = 1;
+
+	/* allocate write buffer on first open */
+	if (port->port_write_buf == NULL) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		buf = gs_buf_alloc(write_buf_size, GFP_KERNEL);
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		/* might have been disconnected while asleep, check */
+		if (port->port_dev == NULL) {
+			printk(KERN_ERR
+				"gs_open: (%d,%p,%p) port disconnected (2)\n",
+				port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -EIO;
+			goto exit_unlock_port;
+		}
+
+		if ((port->port_write_buf=buf) == NULL) {
+			printk(KERN_ERR "gs_open: (%d,%p,%p) cannot allocate port write buffer\n",
+				port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -ENOMEM;
+			goto exit_unlock_port;
+		}
+
+	}
+
+	/* wait for carrier detect (not implemented) */
+
+	/* might have been disconnected while asleep, check */
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (3)\n",
+			port_num, tty, file);
+		port->port_in_use = 0;
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = port;
+	port->port_tty = tty;
+	port->port_open_count = 1;
+	port->port_in_use = 0;
+
+	gs_debug("gs_open: (%d,%p,%p) completed\n", port_num, tty, file);
+
+	ret = 0;
+
+exit_unlock_port:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	up(sem);
+	return ret;
+
+exit_unlock_dev:
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+	up(sem);
+	return ret;
+
+}
+
+/*
+ * gs_close
+ */
+static void gs_close(struct tty_struct *tty, struct file *file)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+	struct semaphore *sem;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_close: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
+
+	sem = &gs_open_close_sem[port->port_num];
+	down(sem);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR
+			"gs_close: (%d,%p,%p) port is already closed\n",
+			port->port_num, tty, file);
+		goto exit;
+	}
+
+	if (port->port_open_count > 1) {
+		--port->port_open_count;
+		goto exit;
+	}
+
+	/* free disconnected port on final close */
+	if (port->port_dev == NULL) {
+		kfree(port);
+		goto exit;
+	}
+
+	/* mark port as closed but in use, we can drop port lock */
+	/* and sleep if necessary */
+	port->port_in_use = 1;
+	port->port_open_count = 0;
+
+	/* wait for write buffer to drain, or */
+	/* at most GS_CLOSE_TIMEOUT seconds */
+	if (gs_buf_data_avail(port->port_write_buf) > 0) {
+		wait_cond_interruptible_timeout(port->port_write_wait,
+		port->port_dev == NULL
+		|| gs_buf_data_avail(port->port_write_buf) == 0,
+		&port->port_lock, flags, GS_CLOSE_TIMEOUT * HZ);
+	}
+
+	/* free disconnected port on final close */
+	/* (might have happened during the above sleep) */
+	if (port->port_dev == NULL) {
+		kfree(port);
+		goto exit;
+	}
+
+	gs_buf_clear(port->port_write_buf);
+
+	tty->driver_data = NULL;
+	port->port_tty = NULL;
+	port->port_in_use = 0;
+
+	gs_debug("gs_close: (%d,%p,%p) completed\n",
+		port->port_num, tty, file);
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	up(sem);
+}
+
+/*
+ * gs_write
+ */
+static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+	int ret;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_write: NULL port pointer\n");
+		return -EIO;
+	}
+
+	gs_debug("gs_write: (%d,%p) writing %d bytes\n", port->port_num, tty,
+		count);
+
+	if (count == 0)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is not connected\n",
+			port->port_num, tty);
+		ret = -EIO;
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is closed\n",
+			port->port_num, tty);
+		ret = -EBADF;
+		goto exit;
+	}
+
+	count = gs_buf_put(port->port_write_buf, buf, count);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_send(gs_device);
+
+	gs_debug("gs_write: (%d,%p) wrote %d bytes\n", port->port_num, tty,
+		count);
+
+	return count;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	return ret;
+}
+
+/*
+ * gs_put_char
+ */
+static void gs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_put_char: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n", port->port_num, tty, ch, __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2));
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is not connected\n",
+			port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is closed\n",
+			port->port_num, tty);
+		goto exit;
+	}
+
+	gs_buf_put(port->port_write_buf, &ch, 1);
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_flush_chars
+ */
+static void gs_flush_chars(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_flush_chars: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR
+			"gs_flush_chars: (%d,%p) port is not connected\n",
+			port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_flush_chars: (%d,%p) port is closed\n",
+			port->port_num, tty);
+		goto exit;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_send(gs_device);
+
+	return;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_write_room
+ */
+static int gs_write_room(struct tty_struct *tty)
+{
+
+	int room = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	&& port->port_write_buf != NULL)
+		room = gs_buf_space_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write_room: (%d,%p) room=%d\n",
+		port->port_num, tty, room);
+
+	return room;
+}
+
+/*
+ * gs_chars_in_buffer
+ */
+static int gs_chars_in_buffer(struct tty_struct *tty)
+{
+	int chars = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	&& port->port_write_buf != NULL)
+		chars = gs_buf_data_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+		port->port_num, tty, chars);
+
+	return chars;
+}
+
+/*
+ * gs_throttle
+ */
+static void gs_throttle(struct tty_struct *tty)
+{
+}
+
+/*
+ * gs_unthrottle
+ */
+static void gs_unthrottle(struct tty_struct *tty)
+{
+}
+
+/*
+ * gs_break
+ */
+static void gs_break(struct tty_struct *tty, int break_state)
+{
+}
+
+/*
+ * gs_ioctl
+ */
+static int gs_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_ioctl: NULL port pointer\n");
+		return -EIO;
+	}
+
+	gs_debug("gs_ioctl: (%d,%p,%p) cmd=0x%4.4x, arg=%lu\n",
+		port->port_num, tty, file, cmd, arg);
+
+	/* handle ioctls */
+
+	/* could not handle ioctl */
+	return -ENOIOCTLCMD;
+}
+
+/*
+ * gs_set_termios
+ */
+static void gs_set_termios(struct tty_struct *tty, struct termios *old)
+{
+}
+
+/*
+* gs_send
+*
+* This function finds available write requests, calls
+* gs_send_packet to fill these packets with data, and
+* continues until either there are no more write requests
+* available or no more data to send.  This function is
+* run whenever data arrives or write requests are available.
+*/
+static int gs_send(struct gs_dev *dev)
+{
+	int ret,len;
+	unsigned long flags;
+	struct usb_ep *ep;
+	struct usb_request *req;
+	struct gs_req_entry *req_entry;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_send: NULL device pointer\n");
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&dev->dev_lock, flags);
+
+	ep = dev->dev_in_ep;
+
+	while(!list_empty(&dev->dev_req_list)) {
+
+		req_entry = list_entry(dev->dev_req_list.next,
+			struct gs_req_entry, re_entry);
+
+		req = req_entry->re_req;
+
+		len = gs_send_packet(dev, req->buf, ep->maxpacket);
+
+		if (len > 0) {
+gs_debug_level(3, "gs_send: len=%d, 0x%2.2x 0x%2.2x 0x%2.2x ...\n", len, *((unsigned char *)req->buf), *((unsigned char *)req->buf+1), *((unsigned char *)req->buf+2));
+			list_del(&req_entry->re_entry);
+			req->length = len;
+			if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
+				printk(KERN_ERR
+				"gs_send: cannot queue read request, ret=%d\n",
+					ret);
+				break;
+			}
+		} else {
+			break;
+		}
+
+	}
+
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+
+	return 0;
+}
+
+/*
+ * gs_send_packet
+ *
+ * If there is data to send, a packet is built in the given
+ * buffer and the size is returned.  If there is no data to
+ * send, 0 is returned.  If there is any error a negative
+ * error number is returned.
+ *
+ * Called during USB completion routine, on interrupt time.
+ *
+ * We assume that disconnect will not happen until all completion
+ * routines have completed, so we can assume that the dev_port
+ * array does not change during the lifetime of this function.
+ */
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size)
+{
+	unsigned int len;
+	struct gs_port *port;
+
+	/* TEMPORARY -- only port 0 is supported right now */
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR
+			"gs_send_packet: port=%d, NULL port pointer\n",
+			0);
+		return -EIO;
+	}
+
+	spin_lock(&port->port_lock);
+
+	len = gs_buf_data_avail(port->port_write_buf);
+	if (len < size)
+		size = len;
+
+	if (size == 0)
+		goto exit;
+
+	size = gs_buf_get(port->port_write_buf, packet, size);
+
+	if (port->port_tty)
+		wake_up_interruptible(&port->port_tty->write_wait);
+
+exit:
+	spin_unlock(&port->port_lock);
+	return size;
+}
+
+/*
+ * gs_recv_packet
+ *
+ * Called for each USB packet received.  Reads the packet
+ * header and stuffs the data in the appropriate tty buffer.
+ * Returns 0 if successful, or a negative error number.
+ *
+ * Called during USB completion routine, on interrupt time.
+ *
+ * We assume that disconnect will not happen until all completion
+ * routines have completed, so we can assume that the dev_port
+ * array does not change during the lifetime of this function.
+ */
+static int gs_recv_packet(struct gs_dev *dev, char *packet, unsigned int size)
+{
+	unsigned int len;
+	struct gs_port *port;
+	int ret;
+
+	/* TEMPORARY -- only port 0 is supported right now */
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_recv_packet: port=%d, NULL port pointer\n",
+			port->port_num);
+		return -EIO;
+	}
+
+	spin_lock(&port->port_lock);
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_recv_packet: port=%d, port is closed\n",
+			port->port_num);
+		ret = -EIO;
+		goto exit;
+	}
+
+	if (port->port_tty == NULL) {
+		printk(KERN_ERR "gs_recv_packet: port=%d, NULL tty pointer\n",
+			port->port_num);
+		ret = -EIO;
+		goto exit;
+	}
+
+	if (port->port_tty->magic != TTY_MAGIC) {
+		printk(KERN_ERR "gs_recv_packet: port=%d, bad tty magic\n",
+			port->port_num);
+		ret = -EIO;
+		goto exit;
+	}
+
+	len = (unsigned int)(TTY_FLIPBUF_SIZE - port->port_tty->flip.count);
+	if (len < size)
+		size = len;
+
+	if (size > 0) {
+		memcpy(port->port_tty->flip.char_buf_ptr, packet, size);
+		port->port_tty->flip.char_buf_ptr += size;
+		port->port_tty->flip.count += size;
+		tty_flip_buffer_push(port->port_tty);
+		wake_up_interruptible(&port->port_tty->read_wait);
+	}
+
+	ret = 0;
+
+exit:
+	spin_unlock(&port->port_lock);
+	return ret;
+}
+
+/*
+* gs_read_complete
+*/
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	int ret;
+	struct gs_dev *dev = ep->driver_data;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_read_complete: NULL device pointer\n");
+		return;
+	}
+
+	switch(req->status) {
+	case 0:
+ 		/* normal completion */
+		gs_recv_packet(dev, req->buf, req->actual);
+requeue:
+		req->length = ep->maxpacket;
+		if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
+			printk(KERN_ERR
+			"gs_read_complete: cannot queue read request, ret=%d\n",
+				ret);
+		}
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_read_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	default:
+		/* unexpected */
+		printk(KERN_ERR
+		"gs_read_complete: unexpected status error, status=%d\n",
+			req->status);
+		goto requeue;
+		break;
+	}
+}
+
+/*
+* gs_write_complete
+*/
+static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = ep->driver_data;
+	struct gs_req_entry *gs_req = req->context;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_write_complete: NULL device pointer\n");
+		return;
+	}
+
+	switch(req->status) {
+	case 0:
+		/* normal completion */
+requeue:
+		if (gs_req == NULL) {
+			printk(KERN_ERR
+				"gs_write_complete: NULL request pointer\n");
+			return;
+		}
+
+		spin_lock(&dev->dev_lock);
+		list_add(&gs_req->re_entry, &dev->dev_req_list);
+		spin_unlock(&dev->dev_lock);
+
+		gs_send(dev);
+
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_write_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	default:
+		printk(KERN_ERR
+		"gs_write_complete: unexpected status error, status=%d\n",
+			req->status);
+		goto requeue;
+		break;
+	}
+}
+
+/* Gadget Driver */
+
+/*
+ * gs_bind
+ *
+ * Called on module load.  Allocates and initializes the device
+ * structure and a control request.
+ */
+static int gs_bind(struct usb_gadget *gadget)
+{
+	int ret;
+	struct usb_ep *ep;
+	struct gs_dev *dev;
+
+	/* device specific */
+	if (gadget_is_net2280(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0001);
+	} else if (gadget_is_pxa(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0002);
+	} else if (gadget_is_sh(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0003);
+		/* sh doesn't support multiple interfaces or configs */
+		use_acm = 0;
+	} else if (gadget_is_sa1100(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0004);
+		/* sa1100 doesn't support necessary endpoints */
+		use_acm = 0;
+	} else if (gadget_is_goku(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0005);
+	} else if (gadget_is_mq11xx(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0006);
+	} else if (gadget_is_omap(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0007);
+	} else if (gadget_is_lh7a40x(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0008);
+	} else if (gadget_is_n9604(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0009);
+	} else if (gadget_is_pxa27x(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0011);
+	} else if (gadget_is_s3c2410(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0012);
+	} else if (gadget_is_at91(gadget)) {
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0013);
+	} else {
+		printk(KERN_WARNING "gs_bind: controller '%s' not recognized\n",
+			gadget->name);
+		/* unrecognized, but safe unless bulk is REALLY quirky */
+		gs_device_desc.bcdDevice =
+			__constant_cpu_to_le16(GS_VERSION_NUM|0x0099);
+	}
+
+	usb_ep_autoconfig_reset(gadget);
+
+	ep = usb_ep_autoconfig(gadget, &gs_fullspeed_in_desc);
+	if (!ep)
+		goto autoconf_fail;
+	EP_IN_NAME = ep->name;
+	ep->driver_data = ep;	/* claim the endpoint */
+
+	ep = usb_ep_autoconfig(gadget, &gs_fullspeed_out_desc);
+	if (!ep)
+		goto autoconf_fail;
+	EP_OUT_NAME = ep->name;
+	ep->driver_data = ep;	/* claim the endpoint */
+
+	if (use_acm) {
+		ep = usb_ep_autoconfig(gadget, &gs_fullspeed_notify_desc);
+		if (!ep) {
+			printk(KERN_ERR "gs_bind: cannot run ACM on %s\n", gadget->name);
+			goto autoconf_fail;
+		}
+		gs_device_desc.idProduct = __constant_cpu_to_le16(
+						GS_CDC_PRODUCT_ID),
+		EP_NOTIFY_NAME = ep->name;
+		ep->driver_data = ep;	/* claim the endpoint */
+	}
+
+	gs_device_desc.bDeviceClass = use_acm
+		? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC;
+	gs_device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	gs_qualifier_desc.bDeviceClass = use_acm
+		? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC;
+	/* assume ep0 uses the same packet size for both speeds */
+	gs_qualifier_desc.bMaxPacketSize0 = gs_device_desc.bMaxPacketSize0;
+	/* assume endpoints are dual-speed */
+	gs_highspeed_notify_desc.bEndpointAddress =
+		gs_fullspeed_notify_desc.bEndpointAddress;
+	gs_highspeed_in_desc.bEndpointAddress =
+		gs_fullspeed_in_desc.bEndpointAddress;
+	gs_highspeed_out_desc.bEndpointAddress =
+		gs_fullspeed_out_desc.bEndpointAddress;
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+
+	usb_gadget_set_selfpowered(gadget);
+
+	if (gadget->is_otg) {
+		gs_otg_descriptor.bmAttributes |= USB_OTG_HNP,
+		gs_bulk_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+		gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	gs_device = dev = kmalloc(sizeof(struct gs_dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
+		system_utsname.sysname, system_utsname.release,
+		gadget->name);
+
+	memset(dev, 0, sizeof(struct gs_dev));
+	dev->dev_gadget = gadget;
+	spin_lock_init(&dev->dev_lock);
+	INIT_LIST_HEAD(&dev->dev_req_list);
+	set_gadget_data(gadget, dev);
+
+	if ((ret=gs_alloc_ports(dev, GFP_KERNEL)) != 0) {
+		printk(KERN_ERR "gs_bind: cannot allocate ports\n");
+		gs_unbind(gadget);
+		return ret;
+	}
+
+	/* preallocate control response and buffer */
+	dev->dev_ctrl_req = gs_alloc_req(gadget->ep0, GS_MAX_DESC_LEN,
+		GFP_KERNEL);
+	if (dev->dev_ctrl_req == NULL) {
+		gs_unbind(gadget);
+		return -ENOMEM;
+	}
+	dev->dev_ctrl_req->complete = gs_setup_complete;
+
+	gadget->ep0->driver_data = dev;
+
+	printk(KERN_INFO "gs_bind: %s %s bound\n",
+		GS_LONG_NAME, GS_VERSION_STR);
+
+	return 0;
+
+autoconf_fail:
+	printk(KERN_ERR "gs_bind: cannot autoconfigure on %s\n", gadget->name);
+	return -ENODEV;
+}
+
+/*
+ * gs_unbind
+ *
+ * Called on module unload.  Frees the control request and device
+ * structure.
+ */
+static void gs_unbind(struct usb_gadget *gadget)
+{
+	struct gs_dev *dev = get_gadget_data(gadget);
+
+	gs_device = NULL;
+
+	/* read/write requests already freed, only control request remains */
+	if (dev != NULL) {
+		if (dev->dev_ctrl_req != NULL) {
+			gs_free_req(gadget->ep0, dev->dev_ctrl_req);
+			dev->dev_ctrl_req = NULL;
+		}
+		gs_free_ports(dev);
+		kfree(dev);
+		set_gadget_data(gadget, NULL);
+	}
+
+	printk(KERN_INFO "gs_unbind: %s %s unbound\n", GS_LONG_NAME,
+		GS_VERSION_STR);
+}
+
+/*
+ * gs_setup
+ *
+ * Implements all the control endpoint functionality that's not
+ * handled in hardware or the hardware driver.
+ *
+ * Returns the size of the data sent to the host, or a negative
+ * error number.
+ */
+static int gs_setup(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl)
+{
+	int ret = -EOPNOTSUPP;
+	struct gs_dev *dev = get_gadget_data(gadget);
+	struct usb_request *req = dev->dev_ctrl_req;
+	u16 wIndex = ctrl->wIndex;
+	u16 wValue = ctrl->wValue;
+	u16 wLength = ctrl->wLength;
+
+	switch (ctrl->bRequestType & USB_TYPE_MASK) {
+	case USB_TYPE_STANDARD:
+		ret = gs_setup_standard(gadget,ctrl);
+		break;
+
+	case USB_TYPE_CLASS:
+		ret = gs_setup_class(gadget,ctrl);
+		break;
+
+	default:
+		printk(KERN_ERR "gs_setup: unknown request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+		break;
+	}
+
+	/* respond with data transfer before status phase? */
+	if (ret >= 0) {
+		req->length = ret;
+		req->zero = ret < wLength
+				&& (ret % gadget->ep0->maxpacket) == 0;
+		ret = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0) {
+			printk(KERN_ERR "gs_setup: cannot queue response, ret=%d\n",
+				ret);
+			req->status = 0;
+			gs_setup_complete(gadget->ep0, req);
+		}
+	}
+
+	/* device either stalls (ret < 0) or reports success */
+	return ret;
+}
+
+static int gs_setup_standard(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl)
+{
+	int ret = -EOPNOTSUPP;
+	struct gs_dev *dev = get_gadget_data(gadget);
+	struct usb_request *req = dev->dev_ctrl_req;
+	u16 wIndex = ctrl->wIndex;
+	u16 wValue = ctrl->wValue;
+	u16 wLength = ctrl->wLength;
+
+	switch (ctrl->bRequest) {
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			break;
+
+		switch (wValue >> 8) {
+		case USB_DT_DEVICE:
+			ret = min(wLength,
+				(u16)sizeof(struct usb_device_descriptor));
+			memcpy(req->buf, &gs_device_desc, ret);
+			break;
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!gadget->is_dualspeed)
+				break;
+			ret = min(wLength,
+				(u16)sizeof(struct usb_qualifier_descriptor));
+			memcpy(req->buf, &gs_qualifier_desc, ret);
+			break;
+
+		case USB_DT_OTHER_SPEED_CONFIG:
+			if (!gadget->is_dualspeed)
+				break;
+			/* fall through */
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+		case USB_DT_CONFIG:
+			ret = gs_build_config_buf(req->buf, gadget->speed,
+				wValue >> 8, wValue & 0xff,
+				gadget->is_otg);
+			if (ret >= 0)
+				ret = min(wLength, (u16)ret);
+			break;
+
+		case USB_DT_STRING:
+			/* wIndex == language code. */
+			ret = usb_gadget_get_string(&gs_string_table,
+				wValue & 0xff, req->buf);
+			if (ret >= 0)
+				ret = min(wLength, (u16)ret);
+			break;
+		}
+		break;
+
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != 0)
+			break;
+		spin_lock(&dev->dev_lock);
+		ret = gs_set_config(dev, wValue);
+		spin_unlock(&dev->dev_lock);
+		break;
+
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			break;
+		*(u8 *)req->buf = dev->dev_config;
+		ret = min(wLength, (u16)1);
+		break;
+
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != USB_RECIP_INTERFACE
+				|| !dev->dev_config
+				|| wIndex >= GS_MAX_NUM_INTERFACES)
+			break;
+		if (dev->dev_config == GS_BULK_CONFIG_ID
+				&& wIndex != GS_BULK_INTERFACE_ID)
+			break;
+		/* no alternate interface settings */
+		if (wValue != 0)
+			break;
+		spin_lock(&dev->dev_lock);
+		/* PXA hardware partially handles SET_INTERFACE;
+		 * we need to kluge around that interference.  */
+		if (gadget_is_pxa(gadget)) {
+			ret = gs_set_config(dev, use_acm ?
+				GS_ACM_CONFIG_ID : GS_BULK_CONFIG_ID);
+			goto set_interface_done;
+		}
+		if (dev->dev_config != GS_BULK_CONFIG_ID
+				&& wIndex == GS_CONTROL_INTERFACE_ID) {
+			if (dev->dev_notify_ep) {
+				usb_ep_disable(dev->dev_notify_ep);
+				usb_ep_enable(dev->dev_notify_ep, dev->dev_notify_ep_desc);
+			}
+		} else {
+			usb_ep_disable(dev->dev_in_ep);
+			usb_ep_disable(dev->dev_out_ep);
+			usb_ep_enable(dev->dev_in_ep, dev->dev_in_ep_desc);
+			usb_ep_enable(dev->dev_out_ep, dev->dev_out_ep_desc);
+		}
+		ret = 0;
+set_interface_done:
+		spin_unlock(&dev->dev_lock);
+		break;
+
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)
+		|| dev->dev_config == GS_NO_CONFIG_ID)
+			break;
+		if (wIndex >= GS_MAX_NUM_INTERFACES
+				|| (dev->dev_config == GS_BULK_CONFIG_ID
+				&& wIndex != GS_BULK_INTERFACE_ID)) {
+			ret = -EDOM;
+			break;
+		}
+		/* no alternate interface settings */
+		*(u8 *)req->buf = 0;
+		ret = min(wLength, (u16)1);
+		break;
+
+	default:
+		printk(KERN_ERR "gs_setup: unknown standard request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+		break;
+	}
+
+	return ret;
+}
+
+static int gs_setup_class(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl)
+{
+	int ret = -EOPNOTSUPP;
+	struct gs_dev *dev = get_gadget_data(gadget);
+	struct gs_port *port = dev->dev_port[0];	/* ACM only has one port */
+	struct usb_request *req = dev->dev_ctrl_req;
+	u16 wIndex = ctrl->wIndex;
+	u16 wValue = ctrl->wValue;
+	u16 wLength = ctrl->wLength;
+
+	switch (ctrl->bRequest) {
+	case USB_CDC_REQ_SET_LINE_CODING:
+		ret = min(wLength,
+			(u16)sizeof(struct usb_cdc_line_coding));
+		if (port) {
+			spin_lock(&port->port_lock);
+			memcpy(&port->port_line_coding, req->buf, ret);
+			spin_unlock(&port->port_lock);
+		}
+		break;
+
+	case USB_CDC_REQ_GET_LINE_CODING:
+		port = dev->dev_port[0];	/* ACM only has one port */
+		ret = min(wLength,
+			(u16)sizeof(struct usb_cdc_line_coding));
+		if (port) {
+			spin_lock(&port->port_lock);
+			memcpy(req->buf, &port->port_line_coding, ret);
+			spin_unlock(&port->port_lock);
+		}
+		break;
+
+	case USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		ret = 0;
+		break;
+
+	default:
+		printk(KERN_ERR "gs_setup: unknown class request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * gs_setup_complete
+ */
+static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length) {
+		printk(KERN_ERR "gs_setup_complete: status error, status=%d, actual=%d, length=%d\n",
+			req->status, req->actual, req->length);
+	}
+}
+
+/*
+ * gs_disconnect
+ *
+ * Called when the device is disconnected.  Frees the closed
+ * ports and disconnects open ports.  Open ports will be freed
+ * on close.  Then reallocates the ports for the next connection.
+ */
+static void gs_disconnect(struct usb_gadget *gadget)
+{
+	unsigned long flags;
+	struct gs_dev *dev = get_gadget_data(gadget);
+
+	spin_lock_irqsave(&dev->dev_lock, flags);
+
+	gs_reset_config(dev);
+
+	/* free closed ports and disconnect open ports */
+	/* (open ports will be freed when closed) */
+	gs_free_ports(dev);
+
+	/* re-allocate ports for the next connection */
+	if (gs_alloc_ports(dev, GFP_ATOMIC) != 0)
+		printk(KERN_ERR "gs_disconnect: cannot re-allocate ports\n");
+
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+
+	printk(KERN_INFO "gs_disconnect: %s disconnected\n", GS_LONG_NAME);
+}
+
+/*
+ * gs_set_config
+ *
+ * Configures the device by enabling device specific
+ * optimizations, setting up the endpoints, allocating
+ * read and write requests and queuing read requests.
+ *
+ * The device lock must be held when calling this function.
+ */
+static int gs_set_config(struct gs_dev *dev, unsigned config)
+{
+	int i;
+	int ret = 0;
+	struct usb_gadget *gadget = dev->dev_gadget;
+	struct usb_ep *ep;
+	struct usb_endpoint_descriptor *ep_desc;
+	struct usb_request *req;
+	struct gs_req_entry *req_entry;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_set_config: NULL device pointer\n");
+		return 0;
+	}
+
+	if (config == dev->dev_config)
+		return 0;
+
+	gs_reset_config(dev);
+
+	switch (config) {
+	case GS_NO_CONFIG_ID:
+		return 0;
+	case GS_BULK_CONFIG_ID:
+		if (use_acm)
+			return -EINVAL;
+		/* device specific optimizations */
+		if (gadget_is_net2280(gadget))
+			net2280_set_fifo_mode(gadget, 1);
+		break;
+	case GS_ACM_CONFIG_ID:
+		if (!use_acm)
+			return -EINVAL;
+		/* device specific optimizations */
+		if (gadget_is_net2280(gadget))
+			net2280_set_fifo_mode(gadget, 1);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dev->dev_config = config;
+
+	gadget_for_each_ep(ep, gadget) {
+
+		if (EP_NOTIFY_NAME
+		&& strcmp(ep->name, EP_NOTIFY_NAME) == 0) {
+			ep_desc = GS_SPEED_SELECT(
+				gadget->speed == USB_SPEED_HIGH,
+				&gs_highspeed_notify_desc,
+				&gs_fullspeed_notify_desc);
+			ret = usb_ep_enable(ep,ep_desc);
+			if (ret == 0) {
+				ep->driver_data = dev;
+				dev->dev_notify_ep = ep;
+				dev->dev_notify_ep_desc = ep_desc;
+			} else {
+				printk(KERN_ERR "gs_set_config: cannot enable notify endpoint %s, ret=%d\n",
+					ep->name, ret);
+				goto exit_reset_config;
+			}
+		}
+
+		else if (strcmp(ep->name, EP_IN_NAME) == 0) {
+			ep_desc = GS_SPEED_SELECT(
+				gadget->speed == USB_SPEED_HIGH,
+ 				&gs_highspeed_in_desc,
+				&gs_fullspeed_in_desc);
+			ret = usb_ep_enable(ep,ep_desc);
+			if (ret == 0) {
+				ep->driver_data = dev;
+				dev->dev_in_ep = ep;
+				dev->dev_in_ep_desc = ep_desc;
+			} else {
+				printk(KERN_ERR "gs_set_config: cannot enable in endpoint %s, ret=%d\n",
+					ep->name, ret);
+				goto exit_reset_config;
+			}
+		}
+
+		else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
+			ep_desc = GS_SPEED_SELECT(
+				gadget->speed == USB_SPEED_HIGH,
+				&gs_highspeed_out_desc,
+				&gs_fullspeed_out_desc);
+			ret = usb_ep_enable(ep,ep_desc);
+			if (ret == 0) {
+				ep->driver_data = dev;
+				dev->dev_out_ep = ep;
+				dev->dev_out_ep_desc = ep_desc;
+			} else {
+				printk(KERN_ERR "gs_set_config: cannot enable out endpoint %s, ret=%d\n",
+					ep->name, ret);
+				goto exit_reset_config;
+			}
+		}
+
+	}
+
+	if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL
+	|| (config != GS_BULK_CONFIG_ID && dev->dev_notify_ep == NULL)) {
+		printk(KERN_ERR "gs_set_config: cannot find endpoints\n");
+		ret = -ENODEV;
+		goto exit_reset_config;
+	}
+
+	/* allocate and queue read requests */
+	ep = dev->dev_out_ep;
+	for (i=0; i<read_q_size && ret == 0; i++) {
+		if ((req=gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC))) {
+			req->complete = gs_read_complete;
+			if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
+				printk(KERN_ERR "gs_set_config: cannot queue read request, ret=%d\n",
+					ret);
+			}
+		} else {
+			printk(KERN_ERR "gs_set_config: cannot allocate read requests\n");
+			ret = -ENOMEM;
+			goto exit_reset_config;
+		}
+	}
+
+	/* allocate write requests, and put on free list */
+	ep = dev->dev_in_ep;
+	for (i=0; i<write_q_size; i++) {
+		if ((req_entry=gs_alloc_req_entry(ep, ep->maxpacket, GFP_ATOMIC))) {
+			req_entry->re_req->complete = gs_write_complete;
+			list_add(&req_entry->re_entry, &dev->dev_req_list);
+		} else {
+			printk(KERN_ERR "gs_set_config: cannot allocate write requests\n");
+			ret = -ENOMEM;
+			goto exit_reset_config;
+		}
+	}
+
+	printk(KERN_INFO "gs_set_config: %s configured, %s speed %s config\n",
+		GS_LONG_NAME,
+		gadget->speed == USB_SPEED_HIGH ? "high" : "full",
+		config == GS_BULK_CONFIG_ID ? "BULK" : "CDC-ACM");
+
+	return 0;
+
+exit_reset_config:
+	gs_reset_config(dev);
+	return ret;
+}
+
+/*
+ * gs_reset_config
+ *
+ * Mark the device as not configured, disable all endpoints,
+ * which forces completion of pending I/O and frees queued
+ * requests, and free the remaining write requests on the
+ * free list.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_reset_config(struct gs_dev *dev)
+{
+	struct gs_req_entry *req_entry;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_reset_config: NULL device pointer\n");
+		return;
+	}
+
+	if (dev->dev_config == GS_NO_CONFIG_ID)
+		return;
+
+	dev->dev_config = GS_NO_CONFIG_ID;
+
+	/* free write requests on the free list */
+	while(!list_empty(&dev->dev_req_list)) {
+		req_entry = list_entry(dev->dev_req_list.next,
+			struct gs_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		gs_free_req_entry(dev->dev_in_ep, req_entry);
+	}
+
+	/* disable endpoints, forcing completion of pending i/o; */
+	/* completion handlers free their requests in this case */
+	if (dev->dev_notify_ep) {
+		usb_ep_disable(dev->dev_notify_ep);
+		dev->dev_notify_ep = NULL;
+	}
+	if (dev->dev_in_ep) {
+		usb_ep_disable(dev->dev_in_ep);
+		dev->dev_in_ep = NULL;
+	}
+	if (dev->dev_out_ep) {
+		usb_ep_disable(dev->dev_out_ep);
+		dev->dev_out_ep = NULL;
+	}
+}
+
+/*
+ * gs_build_config_buf
+ *
+ * Builds the config descriptors in the given buffer and returns the
+ * length, or a negative error number.
+ */
+static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
+	u8 type, unsigned int index, int is_otg)
+{
+	int len;
+	int high_speed;
+	const struct usb_config_descriptor *config_desc;
+	const struct usb_descriptor_header **function;
+
+	if (index >= gs_device_desc.bNumConfigurations)
+		return -EINVAL;
+
+	/* other speed switches high and full speed */
+	high_speed = (speed == USB_SPEED_HIGH);
+	if (type == USB_DT_OTHER_SPEED_CONFIG)
+		high_speed = !high_speed;
+
+	if (use_acm) {
+		config_desc = &gs_acm_config_desc;
+		function = GS_SPEED_SELECT(high_speed,
+			gs_acm_highspeed_function,
+			gs_acm_fullspeed_function);
+	} else {
+		config_desc = &gs_bulk_config_desc;
+		function = GS_SPEED_SELECT(high_speed,
+			gs_bulk_highspeed_function,
+			gs_bulk_fullspeed_function);
+	}
+
+	/* for now, don't advertise srp-only devices */
+	if (!is_otg)
+		function++;
+
+	len = usb_gadget_config_buf(config_desc, buf, GS_MAX_DESC_LEN, function);
+	if (len < 0)
+		return len;
+
+	((struct usb_config_descriptor *)buf)->bDescriptorType = type;
+
+	return len;
+}
+
+/*
+ * gs_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len, int kmalloc_flags)
+{
+	struct usb_request *req;
+
+	if (ep == NULL)
+		return NULL;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			return NULL;
+		}
+	}
+
+	return req;
+}
+
+/*
+ * gs_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	if (ep != NULL && req != NULL) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/*
+ * gs_alloc_req_entry
+ *
+ * Allocates a request and its buffer, using the given
+ * endpoint, buffer len, and kmalloc flags.
+ */
+static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len, int kmalloc_flags)
+{
+	struct gs_req_entry	*req;
+
+	req = kmalloc(sizeof(struct gs_req_entry), kmalloc_flags);
+	if (req == NULL)
+		return NULL;
+
+	req->re_req = gs_alloc_req(ep, len, kmalloc_flags);
+	if (req->re_req == NULL) {
+		kfree(req);
+		return NULL;
+	}
+
+	req->re_req->context = req;
+
+	return req;
+}
+
+/*
+ * gs_free_req_entry
+ *
+ * Frees a request and its buffer.
+ */
+static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
+{
+	if (ep != NULL && req != NULL) {
+		if (req->re_req != NULL)
+			gs_free_req(ep, req->re_req);
+		kfree(req);
+	}
+}
+
+/*
+ * gs_alloc_ports
+ *
+ * Allocate all ports and set the gs_dev struct to point to them.
+ * Return 0 if successful, or a negative error number.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static int gs_alloc_ports(struct gs_dev *dev, int kmalloc_flags)
+{
+	int i;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return -EIO;
+
+	for (i=0; i<GS_NUM_PORTS; i++) {
+		if ((port=(struct gs_port *)kmalloc(sizeof(struct gs_port), kmalloc_flags)) == NULL)
+			return -ENOMEM;
+
+		memset(port, 0, sizeof(struct gs_port));
+		port->port_dev = dev;
+		port->port_num = i;
+		port->port_line_coding.dwDTERate = cpu_to_le32(GS_DEFAULT_DTE_RATE);
+		port->port_line_coding.bCharFormat = GS_DEFAULT_CHAR_FORMAT;
+		port->port_line_coding.bParityType = GS_DEFAULT_PARITY;
+		port->port_line_coding.bDataBits = GS_DEFAULT_DATA_BITS;
+		spin_lock_init(&port->port_lock);
+		init_waitqueue_head(&port->port_write_wait);
+
+		dev->dev_port[i] = port;
+	}
+
+	return 0;
+}
+
+/*
+ * gs_free_ports
+ *
+ * Free all closed ports.  Open ports are disconnected by
+ * freeing their write buffers, setting their device pointers
+ * and the pointers to them in the device to NULL.  These
+ * ports will be freed when closed.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static void gs_free_ports(struct gs_dev *dev)
+{
+	int i;
+	unsigned long flags;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return;
+
+	for (i=0; i<GS_NUM_PORTS; i++) {
+		if ((port=dev->dev_port[i]) != NULL) {
+			dev->dev_port[i] = NULL;
+
+			spin_lock_irqsave(&port->port_lock, flags);
+
+			if (port->port_write_buf != NULL) {
+				gs_buf_free(port->port_write_buf);
+				port->port_write_buf = NULL;
+			}
+
+			if (port->port_open_count > 0 || port->port_in_use) {
+				port->port_dev = NULL;
+				wake_up_interruptible(&port->port_write_wait);
+				if (port->port_tty) {
+					wake_up_interruptible(&port->port_tty->read_wait);
+					wake_up_interruptible(&port->port_tty->write_wait);
+				}
+				spin_unlock_irqrestore(&port->port_lock, flags);
+			} else {
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				kfree(port);
+			}
+
+		}
+	}
+}
+
+/* Circular Buffer */
+
+/*
+ * gs_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static struct gs_buf *gs_buf_alloc(unsigned int size, int kmalloc_flags)
+{
+	struct gs_buf *gb;
+
+	if (size == 0)
+		return NULL;
+
+	gb = (struct gs_buf *)kmalloc(sizeof(struct gs_buf), kmalloc_flags);
+	if (gb == NULL)
+		return NULL;
+
+	gb->buf_buf = kmalloc(size, kmalloc_flags);
+	if (gb->buf_buf == NULL) {
+		kfree(gb);
+		return NULL;
+	}
+
+	gb->buf_size = size;
+	gb->buf_get = gb->buf_put = gb->buf_buf;
+
+	return gb;
+}
+
+/*
+ * gs_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+void gs_buf_free(struct gs_buf *gb)
+{
+	if (gb != NULL) {
+		if (gb->buf_buf != NULL)
+			kfree(gb->buf_buf);
+		kfree(gb);
+	}
+}
+
+/*
+ * gs_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+void gs_buf_clear(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		gb->buf_get = gb->buf_put;
+		/* equivalent to a get of all data available */
+}
+
+/*
+ * gs_buf_data_avail
+ *
+ * Return the number of bytes of data available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_data_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_space_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len  = gs_buf_space_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf+len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else /* count == len */
+			gb->buf_put = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+ * gs_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_data_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_get;
+	if (count > len) {
+		memcpy(buf, gb->buf_get, len);
+		memcpy(buf+len, gb->buf_buf, count - len);
+		gb->buf_get = gb->buf_buf + count - len;
+	} else {
+		memcpy(buf, gb->buf_get, count);
+		if (count < len)
+			gb->buf_get += count;
+		else /* count == len */
+			gb->buf_get = gb->buf_buf;
+	}
+
+	return count;
+}
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
new file mode 100644
index 0000000..b173576
--- /dev/null
+++ b/drivers/usb/gadget/usbstring.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/init.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include <asm/unaligned.h>
+
+
+static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
+{
+	int	count = 0;
+	u8	c;
+	u16	uchar;
+
+	/* this insists on correct encodings, though not minimal ones.
+	 * BUT it currently rejects legit 4-byte UTF-8 code points,
+	 * which need surrogate pairs.  (Unicode 3.1 can use them.)
+	 */
+	while (len != 0 && (c = (u8) *s++) != 0) {
+		if (unlikely(c & 0x80)) {
+			// 2-byte sequence:
+			// 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
+			if ((c & 0xe0) == 0xc0) {
+				uchar = (c & 0x1f) << 6;
+
+				c = (u8) *s++;
+				if ((c & 0xc0) != 0xc0)
+					goto fail;
+				c &= 0x3f;
+				uchar |= c;
+
+			// 3-byte sequence (most CJKV characters):
+			// zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
+			} else if ((c & 0xf0) == 0xe0) {
+				uchar = (c & 0x0f) << 12;
+
+				c = (u8) *s++;
+				if ((c & 0xc0) != 0xc0)
+					goto fail;
+				c &= 0x3f;
+				uchar |= c << 6;
+
+				c = (u8) *s++;
+				if ((c & 0xc0) != 0xc0)
+					goto fail;
+				c &= 0x3f;
+				uchar |= c;
+
+				/* no bogus surrogates */
+				if (0xd800 <= uchar && uchar <= 0xdfff)
+					goto fail;
+
+			// 4-byte sequence (surrogate pairs, currently rare):
+			// 11101110wwwwzzzzyy + 110111yyyyxxxxxx
+			//     = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
+			// (uuuuu = wwww + 1)
+			// FIXME accept the surrogate code points (only)
+
+			} else
+				goto fail;
+		} else
+			uchar = c;
+		put_unaligned (cpu_to_le16 (uchar), cp++);
+		count++;
+		len--;
+	}
+	return count;
+fail:
+	return -1;
+}
+
+
+/**
+ * usb_gadget_get_string - fill out a string descriptor 
+ * @table: of c strings encoded using UTF-8
+ * @id: string id, from low byte of wValue in get string descriptor
+ * @buf: at least 256 bytes
+ *
+ * Finds the UTF-8 string matching the ID, and converts it into a
+ * string descriptor in utf16-le.
+ * Returns length of descriptor (always even) or negative errno
+ *
+ * If your driver needs stings in multiple languages, you'll probably
+ * "switch (wIndex) { ... }"  in your ep0 string descriptor logic,
+ * using this routine after choosing which set of UTF-8 strings to use.
+ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with
+ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
+ * characters (which are also widely used in C strings).
+ */
+int
+usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
+{
+	struct usb_string	*s;
+	int			len;
+
+	/* descriptor 0 has the language id */
+	if (id == 0) {
+		buf [0] = 4;
+		buf [1] = USB_DT_STRING;
+		buf [2] = (u8) table->language;
+		buf [3] = (u8) (table->language >> 8);
+		return 4;
+	}
+	for (s = table->strings; s && s->s; s++)
+		if (s->id == id)
+			break;
+
+	/* unrecognized: stall. */
+	if (!s || !s->s)
+		return -EINVAL;
+
+	/* string descriptors have length, tag, then UTF16-LE text */
+	len = min ((size_t) 126, strlen (s->s));
+	memset (buf + 2, 0, 2 * len);	/* zero all the bytes */
+	len = utf8_to_utf16le(s->s, (__le16 *)&buf[2], len);
+	if (len < 0)
+		return -EINVAL;
+	buf [0] = (len + 1) * 2;
+	buf [1] = USB_DT_STRING;
+	return buf [0];
+}
+
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
new file mode 100644
index 0000000..6e49432
--- /dev/null
+++ b/drivers/usb/gadget/zero.c
@@ -0,0 +1,1357 @@
+/*
+ * zero.c -- Gadget Zero, for USB development
+ *
+ * Copyright (C) 2003-2004 David Brownell
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * Gadget Zero only needs two bulk endpoints, and is an example of how you
+ * can write a hardware-agnostic gadget driver running inside a USB device.
+ *
+ * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't
+ * affect most of the driver.
+ *
+ * Use it with the Linux host/master side "usbtest" driver to get a basic
+ * functional test of your device-side usb stack, or with "usb-skeleton".
+ *
+ * It supports two similar configurations.  One sinks whatever the usb host
+ * writes, and in return sources zeroes.  The other loops whatever the host
+ * writes back, so the host can read it.  Module options include:
+ *
+ *   buflen=N		default N=4096, buffer size used
+ *   qlen=N		default N=32, how many buffers in the loopback queue
+ *   loopdefault	default false, list loopback config first
+ *
+ * Many drivers will only have one configuration, letting them be much
+ * simpler if they also don't support high speed operation (like this
+ * driver does).
+ */
+
+#define DEBUG 1
+// #define VERBOSE
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb_ch9.h>
+#include <linux/usb_gadget.h>
+
+#include "gadget_chips.h"
+
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_VERSION		"St Patrick's Day 2004"
+
+static const char shortname [] = "zero";
+static const char longname [] = "Gadget Zero";
+
+static const char source_sink [] = "source and sink data";
+static const char loopback [] = "loop input to output";
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * driver assumes self-powered hardware, and
+ * has no way for users to trigger remote wakeup.
+ *
+ * this version autoconfigures as much as possible,
+ * which is reasonable for most "bulk-only" drivers.
+ */
+static const char *EP_IN_NAME;		/* source */
+static const char *EP_OUT_NAME;		/* sink */
+
+/*-------------------------------------------------------------------------*/
+
+/* big enough to hold our biggest descriptor */
+#define USB_BUFSIZ	256
+
+struct zero_dev {
+	spinlock_t		lock;
+	struct usb_gadget	*gadget;
+	struct usb_request	*req;		/* for control responses */
+
+	/* when configured, we have one of two configs:
+	 * - source data (in to host) and sink it (out from host)
+	 * - or loop it back (out from host back in to host)
+	 */
+	u8			config;
+	struct usb_ep		*in_ep, *out_ep;
+
+	/* autoresume timer */
+	struct timer_list	resume;
+};
+
+#define xprintk(d,level,fmt,args...) \
+	dev_printk(level , &(d)->gadget->dev , fmt , ## args)
+
+#ifdef DEBUG
+#define DBG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDBG	DBG
+#else
+#define VDBG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* VERBOSE */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARN(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+static unsigned buflen = 4096;
+static unsigned qlen = 32;
+static unsigned pattern = 0;
+
+module_param (buflen, uint, S_IRUGO|S_IWUSR);
+module_param (qlen, uint, S_IRUGO|S_IWUSR);
+module_param (pattern, uint, S_IRUGO|S_IWUSR);
+
+/*
+ * if it's nonzero, autoresume says how many seconds to wait
+ * before trying to wake up the host after suspend.
+ */
+static unsigned autoresume = 0;
+module_param (autoresume, uint, 0);
+
+/*
+ * Normally the "loopback" configuration is second (index 1) so
+ * it's not the default.  Here's where to change that order, to
+ * work better with hosts where config changes are problematic.
+ * Or controllers (like superh) that only support one config.
+ */
+static int loopdefault = 0;
+
+module_param (loopdefault, bool, S_IRUGO|S_IWUSR);
+
+/*-------------------------------------------------------------------------*/
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+#ifndef	CONFIG_USB_ZERO_HNPTEST
+#define DRIVER_VENDOR_NUM	0x0525		/* NetChip */
+#define DRIVER_PRODUCT_NUM	0xa4a0		/* Linux-USB "Gadget Zero" */
+#else
+#define DRIVER_VENDOR_NUM	0x1a0a		/* OTG test device IDs */
+#define DRIVER_PRODUCT_NUM	0xbadd
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * DESCRIPTORS ... most are static, but strings and (full)
+ * configuration descriptors are built on demand.
+ */
+
+#define STRING_MANUFACTURER		25
+#define STRING_PRODUCT			42
+#define STRING_SERIAL			101
+#define STRING_SOURCE_SINK		250
+#define STRING_LOOPBACK			251
+
+/*
+ * This device advertises two configurations; these numbers work
+ * on a pxa250 as well as more flexible hardware.
+ */
+#define	CONFIG_SOURCE_SINK	3
+#define	CONFIG_LOOPBACK		2
+
+static struct usb_device_descriptor
+device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		__constant_cpu_to_le16 (0x0200),
+	.bDeviceClass =		USB_CLASS_VENDOR_SPEC,
+
+	.idVendor =		__constant_cpu_to_le16 (DRIVER_VENDOR_NUM),
+	.idProduct =		__constant_cpu_to_le16 (DRIVER_PRODUCT_NUM),
+	.iManufacturer =	STRING_MANUFACTURER,
+	.iProduct =		STRING_PRODUCT,
+	.iSerialNumber =	STRING_SERIAL,
+	.bNumConfigurations =	2,
+};
+
+static struct usb_config_descriptor
+source_sink_config = {
+	.bLength =		sizeof source_sink_config,
+	.bDescriptorType =	USB_DT_CONFIG,
+
+	/* compute wTotalLength on the fly */
+	.bNumInterfaces =	1,
+	.bConfigurationValue =	CONFIG_SOURCE_SINK,
+	.iConfiguration =	STRING_SOURCE_SINK,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		1,	/* self-powered */
+};
+
+static struct usb_config_descriptor
+loopback_config = {
+	.bLength =		sizeof loopback_config,
+	.bDescriptorType =	USB_DT_CONFIG,
+
+	/* compute wTotalLength on the fly */
+	.bNumInterfaces =	1,
+	.bConfigurationValue =	CONFIG_LOOPBACK,
+	.iConfiguration =	STRING_LOOPBACK,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		1,	/* self-powered */
+};
+
+static struct usb_otg_descriptor
+otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	.bmAttributes =		USB_OTG_SRP,
+};
+
+/* one interface in each configuration */
+
+static const struct usb_interface_descriptor
+source_sink_intf = {
+	.bLength =		sizeof source_sink_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.iInterface =		STRING_SOURCE_SINK,
+};
+
+static const struct usb_interface_descriptor
+loopback_intf = {
+	.bLength =		sizeof loopback_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.iInterface =		STRING_LOOPBACK,
+};
+
+/* two full speed bulk endpoints; their use is config-dependent */
+
+static struct usb_endpoint_descriptor
+fs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor
+fs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static const struct usb_descriptor_header *fs_source_sink_function [] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	(struct usb_descriptor_header *) &source_sink_intf,
+	(struct usb_descriptor_header *) &fs_sink_desc,
+	(struct usb_descriptor_header *) &fs_source_desc,
+	NULL,
+};
+
+static const struct usb_descriptor_header *fs_loopback_function [] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	(struct usb_descriptor_header *) &loopback_intf,
+	(struct usb_descriptor_header *) &fs_sink_desc,
+	(struct usb_descriptor_header *) &fs_source_desc,
+	NULL,
+};
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+
+/*
+ * usb 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ *
+ * that means alternate endpoint descriptors (bigger packets)
+ * and a "device qualifier" ... plus more construction options
+ * for the config descriptor.
+ */
+
+static struct usb_endpoint_descriptor
+hs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16 (512),
+};
+
+static struct usb_endpoint_descriptor
+hs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16 (512),
+};
+
+static struct usb_qualifier_descriptor
+dev_qualifier = {
+	.bLength =		sizeof dev_qualifier,
+	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
+
+	.bcdUSB =		__constant_cpu_to_le16 (0x0200),
+	.bDeviceClass =		USB_CLASS_VENDOR_SPEC,
+
+	.bNumConfigurations =	2,
+};
+
+static const struct usb_descriptor_header *hs_source_sink_function [] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	(struct usb_descriptor_header *) &source_sink_intf,
+	(struct usb_descriptor_header *) &hs_source_desc,
+	(struct usb_descriptor_header *) &hs_sink_desc,
+	NULL,
+};
+
+static const struct usb_descriptor_header *hs_loopback_function [] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	(struct usb_descriptor_header *) &loopback_intf,
+	(struct usb_descriptor_header *) &hs_source_desc,
+	(struct usb_descriptor_header *) &hs_sink_desc,
+	NULL,
+};
+
+/* maxpacket and other transfer characteristics vary by speed. */
+#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
+
+#else
+
+/* if there's no high speed support, maxpacket doesn't change. */
+#define ep_desc(g,hs,fs) fs
+
+#endif	/* !CONFIG_USB_GADGET_DUALSPEED */
+
+static char				manufacturer [50];
+static char				serial [40];
+
+/* static strings, in UTF-8 */
+static struct usb_string		strings [] = {
+	{ STRING_MANUFACTURER, manufacturer, },
+	{ STRING_PRODUCT, longname, },
+	{ STRING_SERIAL, serial, },
+	{ STRING_LOOPBACK, loopback, },
+	{ STRING_SOURCE_SINK, source_sink, },
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings	stringtab = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings,
+};
+
+/*
+ * config descriptors are also handcrafted.  these must agree with code
+ * that sets configurations, and with code managing interfaces and their
+ * altsettings.  other complexity may come from:
+ *
+ *  - high speed support, including "other speed config" rules
+ *  - multiple configurations
+ *  - interfaces with alternate settings
+ *  - embedded class or vendor-specific descriptors
+ *
+ * this handles high speed, and has a second config that could as easily
+ * have been an alternate interface setting (on most hardware).
+ *
+ * NOTE:  to demonstrate (and test) more USB capabilities, this driver
+ * should include an altsetting to test interrupt transfers, including
+ * high bandwidth modes at high speed.  (Maybe work like Intel's test
+ * device?)
+ */
+static int
+config_buf (struct usb_gadget *gadget,
+		u8 *buf, u8 type, unsigned index)
+{
+	int				is_source_sink;
+	int				len;
+	const struct usb_descriptor_header **function;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	int				hs = (gadget->speed == USB_SPEED_HIGH);
+#endif
+
+	/* two configurations will always be index 0 and index 1 */
+	if (index > 1)
+		return -EINVAL;
+	is_source_sink = loopdefault ? (index == 1) : (index == 0);
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	if (type == USB_DT_OTHER_SPEED_CONFIG)
+		hs = !hs;
+	if (hs)
+		function = is_source_sink
+			? hs_source_sink_function
+			: hs_loopback_function;
+	else
+#endif
+		function = is_source_sink
+			? fs_source_sink_function
+			: fs_loopback_function;
+
+	/* for now, don't advertise srp-only devices */
+	if (!gadget->is_otg)
+		function++;
+
+	len = usb_gadget_config_buf (is_source_sink
+					? &source_sink_config
+					: &loopback_config,
+			buf, USB_BUFSIZ, function);
+	if (len < 0)
+		return len;
+	((struct usb_config_descriptor *) buf)->bDescriptorType = type;
+	return len;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+alloc_ep_req (struct usb_ep *ep, unsigned length)
+{
+	struct usb_request	*req;
+
+	req = usb_ep_alloc_request (ep, GFP_ATOMIC);
+	if (req) {
+		req->length = length;
+		req->buf = usb_ep_alloc_buffer (ep, length,
+				&req->dma, GFP_ATOMIC);
+		if (!req->buf) {
+			usb_ep_free_request (ep, req);
+			req = NULL;
+		}
+	}
+	return req;
+}
+
+static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->buf)
+		usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
+	usb_ep_free_request (ep, req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* optionally require specific source/sink data patterns  */
+
+static int
+check_read_data (
+	struct zero_dev		*dev,
+	struct usb_ep		*ep,
+	struct usb_request	*req
+)
+{
+	unsigned	i;
+	u8		*buf = req->buf;
+
+	for (i = 0; i < req->actual; i++, buf++) {
+		switch (pattern) {
+		/* all-zeroes has no synchronization issues */
+		case 0:
+			if (*buf == 0)
+				continue;
+			break;
+		/* mod63 stays in sync with short-terminated transfers,
+		 * or otherwise when host and gadget agree on how large
+		 * each usb transfer request should be.  resync is done
+		 * with set_interface or set_config.
+		 */
+		case 1:
+			if (*buf == (u8)(i % 63))
+				continue;
+			break;
+		}
+		ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf);
+		usb_ep_set_halt (ep);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+reinit_write_data (
+	struct zero_dev		*dev,
+	struct usb_ep		*ep,
+	struct usb_request	*req
+)
+{
+	unsigned	i;
+	u8		*buf = req->buf;
+
+	switch (pattern) {
+	case 0:
+		memset (req->buf, 0, req->length);
+		break;
+	case 1:
+		for  (i = 0; i < req->length; i++)
+			*buf++ = (u8) (i % 63);
+		break;
+	}
+}
+
+/* if there is only one request in the queue, there'll always be an
+ * irq delay between end of one request and start of the next.
+ * that prevents using hardware dma queues.
+ */
+static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct zero_dev	*dev = ep->driver_data;
+	int		status = req->status;
+
+	switch (status) {
+
+	case 0: 			/* normal completion? */
+		if (ep == dev->out_ep)
+			check_read_data (dev, ep, req);
+		else
+			reinit_write_data (dev, ep, req);
+		break;
+
+	/* this endpoint is normally active while we're configured */
+	case -ECONNABORTED: 		/* hardware forced ep reset */
+	case -ECONNRESET:		/* request dequeued */
+	case -ESHUTDOWN:		/* disconnect from host */
+		VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status,
+				req->actual, req->length);
+		if (ep == dev->out_ep)
+			check_read_data (dev, ep, req);
+		free_ep_req (ep, req);
+		return;
+
+	case -EOVERFLOW:		/* buffer overrun on read means that
+					 * we didn't provide a big enough
+					 * buffer.
+					 */
+	default:
+#if 1
+		DBG (dev, "%s complete --> %d, %d/%d\n", ep->name,
+				status, req->actual, req->length);
+#endif
+	case -EREMOTEIO:		/* short read */
+		break;
+	}
+
+	status = usb_ep_queue (ep, req, GFP_ATOMIC);
+	if (status) {
+		ERROR (dev, "kill %s:  resubmit %d bytes --> %d\n",
+				ep->name, req->length, status);
+		usb_ep_set_halt (ep);
+		/* FIXME recover later ... somehow */
+	}
+}
+
+static struct usb_request *
+source_sink_start_ep (struct usb_ep *ep, int gfp_flags)
+{
+	struct usb_request	*req;
+	int			status;
+
+	req = alloc_ep_req (ep, buflen);
+	if (!req)
+		return NULL;
+
+	memset (req->buf, 0, req->length);
+	req->complete = source_sink_complete;
+
+	if (strcmp (ep->name, EP_IN_NAME) == 0)
+		reinit_write_data (ep->driver_data, ep, req);
+
+	status = usb_ep_queue (ep, req, gfp_flags);
+	if (status) {
+		struct zero_dev	*dev = ep->driver_data;
+
+		ERROR (dev, "start %s --> %d\n", ep->name, status);
+		free_ep_req (ep, req);
+		req = NULL;
+	}
+
+	return req;
+}
+
+static int
+set_source_sink_config (struct zero_dev *dev, int gfp_flags)
+{
+	int			result = 0;
+	struct usb_ep		*ep;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	gadget_for_each_ep (ep, gadget) {
+		const struct usb_endpoint_descriptor	*d;
+
+		/* one endpoint writes (sources) zeroes in (to the host) */
+		if (strcmp (ep->name, EP_IN_NAME) == 0) {
+			d = ep_desc (gadget, &hs_source_desc, &fs_source_desc);
+			result = usb_ep_enable (ep, d);
+			if (result == 0) {
+				ep->driver_data = dev;
+				if (source_sink_start_ep (ep, gfp_flags) != 0) {
+					dev->in_ep = ep;
+					continue;
+				}
+				usb_ep_disable (ep);
+				result = -EIO;
+			}
+
+		/* one endpoint reads (sinks) anything out (from the host) */
+		} else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
+			d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc);
+			result = usb_ep_enable (ep, d);
+			if (result == 0) {
+				ep->driver_data = dev;
+				if (source_sink_start_ep (ep, gfp_flags) != 0) {
+					dev->out_ep = ep;
+					continue;
+				}
+				usb_ep_disable (ep);
+				result = -EIO;
+			}
+
+		/* ignore any other endpoints */
+		} else
+			continue;
+
+		/* stop on error */
+		ERROR (dev, "can't start %s, result %d\n", ep->name, result);
+		break;
+	}
+	if (result == 0)
+		DBG (dev, "buflen %d\n", buflen);
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct zero_dev	*dev = ep->driver_data;
+	int		status = req->status;
+
+	switch (status) {
+
+	case 0: 			/* normal completion? */
+		if (ep == dev->out_ep) {
+			/* loop this OUT packet back IN to the host */
+			req->zero = (req->actual < req->length);
+			req->length = req->actual;
+			status = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
+			if (status == 0)
+				return;
+
+			/* "should never get here" */
+			ERROR (dev, "can't loop %s to %s: %d\n",
+				ep->name, dev->in_ep->name,
+				status);
+		}
+
+		/* queue the buffer for some later OUT packet */
+		req->length = buflen;
+		status = usb_ep_queue (dev->out_ep, req, GFP_ATOMIC);
+		if (status == 0)
+			return;
+
+		/* "should never get here" */
+		/* FALLTHROUGH */
+
+	default:
+		ERROR (dev, "%s loop complete --> %d, %d/%d\n", ep->name,
+				status, req->actual, req->length);
+		/* FALLTHROUGH */
+
+	/* NOTE:  since this driver doesn't maintain an explicit record
+	 * of requests it submitted (just maintains qlen count), we
+	 * rely on the hardware driver to clean up on disconnect or
+	 * endpoint disable.
+	 */
+	case -ECONNABORTED: 		/* hardware forced ep reset */
+	case -ECONNRESET:		/* request dequeued */
+	case -ESHUTDOWN:		/* disconnect from host */
+		free_ep_req (ep, req);
+		return;
+	}
+}
+
+static int
+set_loopback_config (struct zero_dev *dev, int gfp_flags)
+{
+	int			result = 0;
+	struct usb_ep		*ep;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	gadget_for_each_ep (ep, gadget) {
+		const struct usb_endpoint_descriptor	*d;
+
+		/* one endpoint writes data back IN to the host */
+		if (strcmp (ep->name, EP_IN_NAME) == 0) {
+			d = ep_desc (gadget, &hs_source_desc, &fs_source_desc);
+			result = usb_ep_enable (ep, d);
+			if (result == 0) {
+				ep->driver_data = dev;
+				dev->in_ep = ep;
+				continue;
+			}
+
+		/* one endpoint just reads OUT packets */
+		} else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
+			d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc);
+			result = usb_ep_enable (ep, d);
+			if (result == 0) {
+				ep->driver_data = dev;
+				dev->out_ep = ep;
+				continue;
+			}
+
+		/* ignore any other endpoints */
+		} else
+			continue;
+
+		/* stop on error */
+		ERROR (dev, "can't enable %s, result %d\n", ep->name, result);
+		break;
+	}
+
+	/* allocate a bunch of read buffers and queue them all at once.
+	 * we buffer at most 'qlen' transfers; fewer if any need more
+	 * than 'buflen' bytes each.
+	 */
+	if (result == 0) {
+		struct usb_request	*req;
+		unsigned		i;
+
+		ep = dev->out_ep;
+		for (i = 0; i < qlen && result == 0; i++) {
+			req = alloc_ep_req (ep, buflen);
+			if (req) {
+				req->complete = loopback_complete;
+				result = usb_ep_queue (ep, req, GFP_ATOMIC);
+				if (result)
+					DBG (dev, "%s queue req --> %d\n",
+							ep->name, result);
+			} else
+				result = -ENOMEM;
+		}
+	}
+	if (result == 0)
+		DBG (dev, "qlen %d, buflen %d\n", qlen, buflen);
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void zero_reset_config (struct zero_dev *dev)
+{
+	if (dev->config == 0)
+		return;
+
+	DBG (dev, "reset config\n");
+
+	/* just disable endpoints, forcing completion of pending i/o.
+	 * all our completion handlers free their requests in this case.
+	 */
+	if (dev->in_ep) {
+		usb_ep_disable (dev->in_ep);
+		dev->in_ep = NULL;
+	}
+	if (dev->out_ep) {
+		usb_ep_disable (dev->out_ep);
+		dev->out_ep = NULL;
+	}
+	dev->config = 0;
+	del_timer (&dev->resume);
+}
+
+/* change our operational config.  this code must agree with the code
+ * that returns config descriptors, and altsetting code.
+ *
+ * it's also responsible for power management interactions. some
+ * configurations might not work with our current power sources.
+ *
+ * note that some device controller hardware will constrain what this
+ * code can do, perhaps by disallowing more than one configuration or
+ * by limiting configuration choices (like the pxa2xx).
+ */
+static int
+zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags)
+{
+	int			result = 0;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	if (number == dev->config)
+		return 0;
+
+	if (gadget_is_sa1100 (gadget) && dev->config) {
+		/* tx fifo is full, but we can't clear it...*/
+		INFO (dev, "can't change configurations\n");
+		return -ESPIPE;
+	}
+	zero_reset_config (dev);
+
+	switch (number) {
+	case CONFIG_SOURCE_SINK:
+		result = set_source_sink_config (dev, gfp_flags);
+		break;
+	case CONFIG_LOOPBACK:
+		result = set_loopback_config (dev, gfp_flags);
+		break;
+	default:
+		result = -EINVAL;
+		/* FALL THROUGH */
+	case 0:
+		return result;
+	}
+
+	if (!result && (!dev->in_ep || !dev->out_ep))
+		result = -ENODEV;
+	if (result)
+		zero_reset_config (dev);
+	else {
+		char *speed;
+
+		switch (gadget->speed) {
+		case USB_SPEED_LOW:	speed = "low"; break;
+		case USB_SPEED_FULL:	speed = "full"; break;
+		case USB_SPEED_HIGH:	speed = "high"; break;
+		default: 		speed = "?"; break;
+		}
+
+		dev->config = number;
+		INFO (dev, "%s speed config #%d: %s\n", speed, number,
+				(number == CONFIG_SOURCE_SINK)
+					? source_sink : loopback);
+	}
+	return result;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length)
+		DBG ((struct zero_dev *) ep->driver_data,
+				"setup complete --> %d, %d/%d\n",
+				req->status, req->actual, req->length);
+}
+
+/*
+ * The setup() callback implements all the ep0 functionality that's
+ * not handled lower down, in hardware or the hardware driver (like
+ * device and endpoint feature flags, and their status).  It's all
+ * housekeeping for the gadget function we're implementing.  Most of
+ * the work is in config-specific setup.
+ */
+static int
+zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct zero_dev		*dev = get_gadget_data (gadget);
+	struct usb_request	*req = dev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = ctrl->wIndex;
+	u16			w_value = ctrl->wValue;
+	u16			w_length = ctrl->wLength;
+
+	/* usually this stores reply data in the pre-allocated ep0 buffer,
+	 * but config change events will reconfigure hardware.
+	 */
+	req->zero = 0;
+	switch (ctrl->bRequest) {
+
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unknown;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			value = min (w_length, (u16) sizeof device_desc);
+			memcpy (req->buf, &device_desc, value);
+			break;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!gadget->is_dualspeed)
+				break;
+			value = min (w_length, (u16) sizeof dev_qualifier);
+			memcpy (req->buf, &dev_qualifier, value);
+			break;
+
+		case USB_DT_OTHER_SPEED_CONFIG:
+			if (!gadget->is_dualspeed)
+				break;
+			// FALLTHROUGH
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+		case USB_DT_CONFIG:
+			value = config_buf (gadget, req->buf,
+					w_value >> 8,
+					w_value & 0xff);
+			if (value >= 0)
+				value = min (w_length, (u16) value);
+			break;
+
+		case USB_DT_STRING:
+			/* wIndex == language code.
+			 * this driver only handles one language, you can
+			 * add string tables for other languages, using
+			 * any UTF-8 characters
+			 */
+			value = usb_gadget_get_string (&stringtab,
+					w_value & 0xff, req->buf);
+			if (value >= 0)
+				value = min (w_length, (u16) value);
+			break;
+		}
+		break;
+
+	/* currently two configs, two speeds */
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != 0)
+			goto unknown;
+		if (gadget->a_hnp_support)
+			DBG (dev, "HNP available\n");
+		else if (gadget->a_alt_hnp_support)
+			DBG (dev, "HNP needs a different root port\n");
+		else
+			VDBG (dev, "HNP inactive\n");
+		spin_lock (&dev->lock);
+		value = zero_set_config (dev, w_value, GFP_ATOMIC);
+		spin_unlock (&dev->lock);
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unknown;
+		*(u8 *)req->buf = dev->config;
+		value = min (w_length, (u16) 1);
+		break;
+
+	/* until we add altsetting support, or other interfaces,
+	 * only 0/0 are possible.  pxa2xx only supports 0/0 (poorly)
+	 * and already killed pending endpoint I/O.
+	 */
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != USB_RECIP_INTERFACE)
+			goto unknown;
+		spin_lock (&dev->lock);
+		if (dev->config && w_index == 0 && w_value == 0) {
+			u8		config = dev->config;
+
+			/* resets interface configuration, forgets about
+			 * previous transaction state (queued bufs, etc)
+			 * and re-inits endpoint state (toggle etc)
+			 * no response queued, just zero status == success.
+			 * if we had more than one interface we couldn't
+			 * use this "reset the config" shortcut.
+			 */
+			zero_reset_config (dev);
+			zero_set_config (dev, config, GFP_ATOMIC);
+			value = 0;
+		}
+		spin_unlock (&dev->lock);
+		break;
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
+			goto unknown;
+		if (!dev->config)
+			break;
+		if (w_index != 0) {
+			value = -EDOM;
+			break;
+		}
+		*(u8 *)req->buf = 0;
+		value = min (w_length, (u16) 1);
+		break;
+
+	/*
+	 * These are the same vendor-specific requests supported by
+	 * Intel's USB 2.0 compliance test devices.  We exceed that
+	 * device spec by allowing multiple-packet requests.
+	 */
+	case 0x5b:	/* control WRITE test -- fill the buffer */
+		if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR))
+			goto unknown;
+		if (w_value || w_index)
+			break;
+		/* just read that many bytes into the buffer */
+		if (w_length > USB_BUFSIZ)
+			break;
+		value = w_length;
+		break;
+	case 0x5c:	/* control READ test -- return the buffer */
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR))
+			goto unknown;
+		if (w_value || w_index)
+			break;
+		/* expect those bytes are still in the buffer; send back */
+		if (w_length > USB_BUFSIZ
+				|| w_length != req->length)
+			break;
+		value = w_length;
+		break;
+
+	default:
+unknown:
+		VDBG (dev,
+			"unknown control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer before status phase? */
+	if (value >= 0) {
+		req->length = value;
+		req->zero = value < w_length;
+		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG (dev, "ep_queue --> %d\n", value);
+			req->status = 0;
+			zero_setup_complete (gadget->ep0, req);
+		}
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static void
+zero_disconnect (struct usb_gadget *gadget)
+{
+	struct zero_dev		*dev = get_gadget_data (gadget);
+	unsigned long		flags;
+
+	spin_lock_irqsave (&dev->lock, flags);
+	zero_reset_config (dev);
+
+	/* a more significant application might have some non-usb
+	 * activities to quiesce here, saving resources like power
+	 * or pushing the notification up a network stack.
+	 */
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* next we may get setup() calls to enumerate new connections;
+	 * or an unbind() during shutdown (including removing module).
+	 */
+}
+
+static void
+zero_autoresume (unsigned long _dev)
+{
+	struct zero_dev	*dev = (struct zero_dev *) _dev;
+	int		status;
+
+	/* normally the host would be woken up for something
+	 * more significant than just a timer firing...
+	 */
+	if (dev->gadget->speed != USB_SPEED_UNKNOWN) {
+		status = usb_gadget_wakeup (dev->gadget);
+		DBG (dev, "wakeup --> %d\n", status);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+zero_unbind (struct usb_gadget *gadget)
+{
+	struct zero_dev		*dev = get_gadget_data (gadget);
+
+	DBG (dev, "unbind\n");
+
+	/* we've already been disconnected ... no i/o is active */
+	if (dev->req)
+		free_ep_req (gadget->ep0, dev->req);
+	del_timer_sync (&dev->resume);
+	kfree (dev);
+	set_gadget_data (gadget, NULL);
+}
+
+static int
+zero_bind (struct usb_gadget *gadget)
+{
+	struct zero_dev		*dev;
+	struct usb_ep		*ep;
+
+	/* Bulk-only drivers like this one SHOULD be able to
+	 * autoconfigure on any sane usb controller driver,
+	 * but there may also be important quirks to address.
+	 */
+	usb_ep_autoconfig_reset (gadget);
+	ep = usb_ep_autoconfig (gadget, &fs_source_desc);
+	if (!ep) {
+autoconf_fail:
+		printk (KERN_ERR "%s: can't autoconfigure on %s\n",
+			shortname, gadget->name);
+		return -ENODEV;
+	}
+	EP_IN_NAME = ep->name;
+	ep->driver_data = ep;	/* claim */
+	
+	ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
+	if (!ep)
+		goto autoconf_fail;
+	EP_OUT_NAME = ep->name;
+	ep->driver_data = ep;	/* claim */
+
+
+	/*
+	 * DRIVER POLICY CHOICE:  you may want to do this differently.
+	 * One thing to avoid is reusing a bcdDevice revision code
+	 * with different host-visible configurations or behavior
+	 * restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc
+	 */
+	if (gadget_is_net2280 (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
+	} else if (gadget_is_pxa (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0203);
+#if 0
+	} else if (gadget_is_sh(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0204);
+		/* SH has only one configuration; see "loopdefault" */
+		device_desc.bNumConfigurations = 1;
+		/* FIXME make 1 == default.bConfigurationValue */
+#endif
+	} else if (gadget_is_sa1100 (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0205);
+	} else if (gadget_is_goku (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0206);
+	} else if (gadget_is_mq11xx (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0207);
+	} else if (gadget_is_omap (gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
+	} else if (gadget_is_lh7a40x(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
+	} else if (gadget_is_n9604(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0210);
+	} else if (gadget_is_pxa27x(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0211);
+	} else if (gadget_is_s3c2410(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0212);
+	} else if (gadget_is_at91(gadget)) {
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x0213);
+	} else {
+		/* gadget zero is so simple (for now, no altsettings) that
+		 * it SHOULD NOT have problems with bulk-capable hardware.
+		 * so warn about unrcognized controllers, don't panic.
+		 *
+		 * things like configuration and altsetting numbering
+		 * can need hardware-specific attention though.
+		 */
+		printk (KERN_WARNING "%s: controller '%s' not recognized\n",
+			shortname, gadget->name);
+		device_desc.bcdDevice = __constant_cpu_to_le16 (0x9999);
+	}
+
+
+	/* ok, we made sense of the hardware ... */
+	dev = kmalloc (sizeof *dev, SLAB_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+	memset (dev, 0, sizeof *dev);
+	spin_lock_init (&dev->lock);
+	dev->gadget = gadget;
+	set_gadget_data (gadget, dev);
+
+	/* preallocate control response and buffer */
+	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
+	if (!dev->req)
+		goto enomem;
+	dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ,
+				&dev->req->dma, GFP_KERNEL);
+	if (!dev->req->buf)
+		goto enomem;
+
+	dev->req->complete = zero_setup_complete;
+
+	device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	/* assume ep0 uses the same value for both speeds ... */
+	dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
+
+	/* and that all endpoints are dual-speed */
+	hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
+	hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
+#endif
+
+	if (gadget->is_otg) {
+		otg_descriptor.bmAttributes |= USB_OTG_HNP,
+		source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+		loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	if (gadget->is_otg) {
+		otg_descriptor.bmAttributes |= USB_OTG_HNP,
+		source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+		loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	usb_gadget_set_selfpowered (gadget);
+
+	init_timer (&dev->resume);
+	dev->resume.function = zero_autoresume;
+	dev->resume.data = (unsigned long) dev;
+	if (autoresume) {
+		source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+		loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	gadget->ep0->driver_data = dev;
+
+	INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname);
+	INFO (dev, "using %s, OUT %s IN %s\n", gadget->name,
+		EP_OUT_NAME, EP_IN_NAME);
+
+	snprintf (manufacturer, sizeof manufacturer, "%s %s with %s",
+		system_utsname.sysname, system_utsname.release,
+		gadget->name);
+
+	return 0;
+
+enomem:
+	zero_unbind (gadget);
+	return -ENOMEM;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+zero_suspend (struct usb_gadget *gadget)
+{
+	struct zero_dev		*dev = get_gadget_data (gadget);
+
+	if (gadget->speed == USB_SPEED_UNKNOWN)
+		return;
+
+	if (autoresume) {
+		mod_timer (&dev->resume, jiffies + (HZ * autoresume));
+		DBG (dev, "suspend, wakeup in %d seconds\n", autoresume);
+	} else
+		DBG (dev, "suspend\n");
+}
+
+static void
+zero_resume (struct usb_gadget *gadget)
+{
+	struct zero_dev		*dev = get_gadget_data (gadget);
+
+	DBG (dev, "resume\n");
+	del_timer (&dev->resume);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver zero_driver = {
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	.speed		= USB_SPEED_HIGH,
+#else
+	.speed		= USB_SPEED_FULL,
+#endif
+	.function	= (char *) longname,
+	.bind		= zero_bind,
+	.unbind		= zero_unbind,
+
+	.setup		= zero_setup,
+	.disconnect	= zero_disconnect,
+
+	.suspend	= zero_suspend,
+	.resume		= zero_resume,
+
+	.driver 	= {
+		.name		= (char *) shortname,
+		// .shutdown = ...
+		// .suspend = ...
+		// .resume = ...
+	},
+};
+
+MODULE_AUTHOR ("David Brownell");
+MODULE_LICENSE ("Dual BSD/GPL");
+
+
+static int __init init (void)
+{
+	/* a real value would likely come through some id prom
+	 * or module option.  this one takes at least two packets.
+	 */
+	strlcpy (serial, "0123456789.0123456789.0123456789", sizeof serial);
+
+	return usb_gadget_register_driver (&zero_driver);
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	usb_gadget_unregister_driver (&zero_driver);
+}
+module_exit (cleanup);
+