| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Toshiba TC86C001 ("Goku-S") USB Device Controller driver | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2000-2002 Lineo | 
|  | 5 | *      by Stuart Lynne, Tom Rushworth, and Bruce Balden | 
|  | 6 | * Copyright (C) 2002 Toshiba Corporation | 
|  | 7 | * Copyright (C) 2003 MontaVista Software (source@mvista.com) | 
|  | 8 | * | 
|  | 9 | * This file is licensed under the terms of the GNU General Public | 
|  | 10 | * License version 2.  This program is licensed "as is" without any | 
|  | 11 | * warranty of any kind, whether express or implied. | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | /* | 
|  | 15 | * This device has ep0 and three semi-configurable bulk/interrupt endpoints. | 
|  | 16 | * | 
|  | 17 | *  - Endpoint numbering is fixed: ep{1,2,3}-bulk | 
|  | 18 | *  - Gadget drivers can choose ep maxpacket (8/16/32/64) | 
|  | 19 | *  - Gadget drivers can choose direction (IN, OUT) | 
|  | 20 | *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers). | 
|  | 21 | */ | 
|  | 22 |  | 
|  | 23 | #undef DEBUG | 
|  | 24 | // #define	VERBOSE		/* extra debug messages (success too) */ | 
|  | 25 | // #define	USB_TRACE	/* packet-level success messages */ | 
|  | 26 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/kernel.h> | 
|  | 28 | #include <linux/module.h> | 
|  | 29 | #include <linux/pci.h> | 
|  | 30 | #include <linux/delay.h> | 
|  | 31 | #include <linux/ioport.h> | 
|  | 32 | #include <linux/sched.h> | 
|  | 33 | #include <linux/slab.h> | 
|  | 34 | #include <linux/smp_lock.h> | 
|  | 35 | #include <linux/errno.h> | 
|  | 36 | #include <linux/init.h> | 
|  | 37 | #include <linux/timer.h> | 
|  | 38 | #include <linux/list.h> | 
|  | 39 | #include <linux/interrupt.h> | 
|  | 40 | #include <linux/proc_fs.h> | 
|  | 41 | #include <linux/device.h> | 
| David Brownell | 5f84813 | 2006-12-16 15:34:53 -0800 | [diff] [blame] | 42 | #include <linux/usb/ch9.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/usb_gadget.h> | 
|  | 44 |  | 
|  | 45 | #include <asm/byteorder.h> | 
|  | 46 | #include <asm/io.h> | 
|  | 47 | #include <asm/irq.h> | 
|  | 48 | #include <asm/system.h> | 
|  | 49 | #include <asm/unaligned.h> | 
|  | 50 |  | 
|  | 51 |  | 
|  | 52 | #include "goku_udc.h" | 
|  | 53 |  | 
|  | 54 | #define	DRIVER_DESC		"TC86C001 USB Device Controller" | 
|  | 55 | #define	DRIVER_VERSION		"30-Oct 2003" | 
|  | 56 |  | 
|  | 57 | #define	DMA_ADDR_INVALID	(~(dma_addr_t)0) | 
|  | 58 |  | 
|  | 59 | static const char driver_name [] = "goku_udc"; | 
|  | 60 | static const char driver_desc [] = DRIVER_DESC; | 
|  | 61 |  | 
|  | 62 | MODULE_AUTHOR("source@mvista.com"); | 
|  | 63 | MODULE_DESCRIPTION(DRIVER_DESC); | 
|  | 64 | MODULE_LICENSE("GPL"); | 
|  | 65 |  | 
|  | 66 |  | 
|  | 67 | /* | 
|  | 68 | * IN dma behaves ok under testing, though the IN-dma abort paths don't | 
|  | 69 | * seem to behave quite as expected.  Used by default. | 
|  | 70 | * | 
|  | 71 | * OUT dma documents design problems handling the common "short packet" | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 72 | * transfer termination policy; it couldn't be enabled by default, even | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | * if the OUT-dma abort problems had a resolution. | 
|  | 74 | */ | 
|  | 75 | static unsigned use_dma = 1; | 
|  | 76 |  | 
|  | 77 | #if 0 | 
|  | 78 | //#include <linux/moduleparam.h> | 
|  | 79 | /* "modprobe goku_udc use_dma=1" etc | 
|  | 80 | *	0 to disable dma | 
|  | 81 | *	1 to use IN dma only (normal operation) | 
|  | 82 | *	2 to use IN and OUT dma | 
|  | 83 | */ | 
|  | 84 | module_param(use_dma, uint, S_IRUGO); | 
|  | 85 | #endif | 
|  | 86 |  | 
|  | 87 | /*-------------------------------------------------------------------------*/ | 
|  | 88 |  | 
|  | 89 | static void nuke(struct goku_ep *, int status); | 
|  | 90 |  | 
|  | 91 | static inline void | 
|  | 92 | command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum) | 
|  | 93 | { | 
|  | 94 | writel(COMMAND_EP(epnum) | command, ®s->Command); | 
|  | 95 | udelay(300); | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | static int | 
|  | 99 | goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | 
|  | 100 | { | 
|  | 101 | struct goku_udc	*dev; | 
|  | 102 | struct goku_ep	*ep; | 
|  | 103 | u32		mode; | 
|  | 104 | u16		max; | 
|  | 105 | unsigned long	flags; | 
|  | 106 |  | 
|  | 107 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 108 | if (!_ep || !desc || ep->desc | 
|  | 109 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 
|  | 110 | return -EINVAL; | 
|  | 111 | dev = ep->dev; | 
|  | 112 | if (ep == &dev->ep[0]) | 
|  | 113 | return -EINVAL; | 
|  | 114 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | 
|  | 115 | return -ESHUTDOWN; | 
|  | 116 | if (ep->num != (desc->bEndpointAddress & 0x0f)) | 
|  | 117 | return -EINVAL; | 
|  | 118 |  | 
|  | 119 | switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { | 
|  | 120 | case USB_ENDPOINT_XFER_BULK: | 
|  | 121 | case USB_ENDPOINT_XFER_INT: | 
|  | 122 | break; | 
|  | 123 | default: | 
|  | 124 | return -EINVAL; | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK) | 
|  | 128 | != EPxSTATUS_EP_INVALID) | 
|  | 129 | return -EBUSY; | 
|  | 130 |  | 
|  | 131 | /* enabling the no-toggle interrupt mode would need an api hook */ | 
|  | 132 | mode = 0; | 
|  | 133 | max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)); | 
|  | 134 | switch (max) { | 
|  | 135 | case 64:	mode++; | 
|  | 136 | case 32:	mode++; | 
|  | 137 | case 16:	mode++; | 
|  | 138 | case 8:		mode <<= 3; | 
|  | 139 | break; | 
|  | 140 | default: | 
|  | 141 | return -EINVAL; | 
|  | 142 | } | 
|  | 143 | mode |= 2 << 1;		/* bulk, or intr-with-toggle */ | 
|  | 144 |  | 
|  | 145 | /* ep1/ep2 dma direction is chosen early; it works in the other | 
|  | 146 | * direction, with pio.  be cautious with out-dma. | 
|  | 147 | */ | 
|  | 148 | ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0; | 
|  | 149 | if (ep->is_in) { | 
|  | 150 | mode |= 1; | 
|  | 151 | ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT); | 
|  | 152 | } else { | 
|  | 153 | ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT); | 
|  | 154 | if (ep->dma) | 
|  | 155 | DBG(dev, "%s out-dma hides short packets\n", | 
|  | 156 | ep->ep.name); | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | spin_lock_irqsave(&ep->dev->lock, flags); | 
|  | 160 |  | 
|  | 161 | /* ep1 and ep2 can do double buffering and/or dma */ | 
|  | 162 | if (ep->num < 3) { | 
|  | 163 | struct goku_udc_regs __iomem	*regs = ep->dev->regs; | 
|  | 164 | u32				tmp; | 
|  | 165 |  | 
|  | 166 | /* double buffer except (for now) with pio in */ | 
|  | 167 | tmp = ((ep->dma || !ep->is_in) | 
|  | 168 | ? 0x10	/* double buffered */ | 
|  | 169 | : 0x11	/* single buffer */ | 
|  | 170 | ) << ep->num; | 
|  | 171 | tmp |= readl(®s->EPxSingle); | 
|  | 172 | writel(tmp, ®s->EPxSingle); | 
|  | 173 |  | 
|  | 174 | tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num; | 
|  | 175 | tmp |= readl(®s->EPxBCS); | 
|  | 176 | writel(tmp, ®s->EPxBCS); | 
|  | 177 | } | 
|  | 178 | writel(mode, ep->reg_mode); | 
|  | 179 | command(ep->dev->regs, COMMAND_RESET, ep->num); | 
|  | 180 | ep->ep.maxpacket = max; | 
|  | 181 | ep->stopped = 0; | 
|  | 182 | ep->desc = desc; | 
|  | 183 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 
|  | 184 |  | 
|  | 185 | DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name, | 
|  | 186 | ep->is_in ? "IN" : "OUT", | 
|  | 187 | ep->dma ? "dma" : "pio", | 
|  | 188 | max); | 
|  | 189 |  | 
|  | 190 | return 0; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep) | 
|  | 194 | { | 
|  | 195 | struct goku_udc		*dev = ep->dev; | 
|  | 196 |  | 
|  | 197 | if (regs) { | 
|  | 198 | command(regs, COMMAND_INVALID, ep->num); | 
|  | 199 | if (ep->num) { | 
|  | 200 | if (ep->num == UDC_MSTWR_ENDPOINT) | 
|  | 201 | dev->int_enable &= ~(INT_MSTWREND | 
|  | 202 | |INT_MSTWRTMOUT); | 
|  | 203 | else if (ep->num == UDC_MSTRD_ENDPOINT) | 
|  | 204 | dev->int_enable &= ~INT_MSTRDEND; | 
|  | 205 | dev->int_enable &= ~INT_EPxDATASET (ep->num); | 
|  | 206 | } else | 
|  | 207 | dev->int_enable &= ~INT_EP0; | 
|  | 208 | writel(dev->int_enable, ®s->int_enable); | 
|  | 209 | readl(®s->int_enable); | 
|  | 210 | if (ep->num < 3) { | 
|  | 211 | struct goku_udc_regs __iomem	*r = ep->dev->regs; | 
|  | 212 | u32				tmp; | 
|  | 213 |  | 
|  | 214 | tmp = readl(&r->EPxSingle); | 
|  | 215 | tmp &= ~(0x11 << ep->num); | 
|  | 216 | writel(tmp, &r->EPxSingle); | 
|  | 217 |  | 
|  | 218 | tmp = readl(&r->EPxBCS); | 
|  | 219 | tmp &= ~(0x11 << ep->num); | 
|  | 220 | writel(tmp, &r->EPxBCS); | 
|  | 221 | } | 
|  | 222 | /* reset dma in case we're still using it */ | 
|  | 223 | if (ep->dma) { | 
|  | 224 | u32	master; | 
|  | 225 |  | 
|  | 226 | master = readl(®s->dma_master) & MST_RW_BITS; | 
|  | 227 | if (ep->num == UDC_MSTWR_ENDPOINT) { | 
|  | 228 | master &= ~MST_W_BITS; | 
|  | 229 | master |= MST_WR_RESET; | 
|  | 230 | } else { | 
|  | 231 | master &= ~MST_R_BITS; | 
|  | 232 | master |= MST_RD_RESET; | 
|  | 233 | } | 
|  | 234 | writel(master, ®s->dma_master); | 
|  | 235 | } | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | ep->ep.maxpacket = MAX_FIFO_SIZE; | 
|  | 239 | ep->desc = NULL; | 
|  | 240 | ep->stopped = 1; | 
|  | 241 | ep->irqs = 0; | 
|  | 242 | ep->dma = 0; | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | static int goku_ep_disable(struct usb_ep *_ep) | 
|  | 246 | { | 
|  | 247 | struct goku_ep	*ep; | 
|  | 248 | struct goku_udc	*dev; | 
|  | 249 | unsigned long	flags; | 
|  | 250 |  | 
|  | 251 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 252 | if (!_ep || !ep->desc) | 
|  | 253 | return -ENODEV; | 
|  | 254 | dev = ep->dev; | 
|  | 255 | if (dev->ep0state == EP0_SUSPEND) | 
|  | 256 | return -EBUSY; | 
|  | 257 |  | 
|  | 258 | VDBG(dev, "disable %s\n", _ep->name); | 
|  | 259 |  | 
|  | 260 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 261 | nuke(ep, -ESHUTDOWN); | 
|  | 262 | ep_reset(dev->regs, ep); | 
|  | 263 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 264 |  | 
|  | 265 | return 0; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | /*-------------------------------------------------------------------------*/ | 
|  | 269 |  | 
|  | 270 | static struct usb_request * | 
| Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 271 | goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { | 
|  | 273 | struct goku_request	*req; | 
|  | 274 |  | 
|  | 275 | if (!_ep) | 
|  | 276 | return NULL; | 
| Eric Sesterhenn | 7039f42 | 2006-02-27 13:34:10 -0800 | [diff] [blame] | 277 | req = kzalloc(sizeof *req, gfp_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | if (!req) | 
|  | 279 | return NULL; | 
|  | 280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | req->req.dma = DMA_ADDR_INVALID; | 
|  | 282 | INIT_LIST_HEAD(&req->queue); | 
|  | 283 | return &req->req; | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | static void | 
|  | 287 | goku_free_request(struct usb_ep *_ep, struct usb_request *_req) | 
|  | 288 | { | 
|  | 289 | struct goku_request	*req; | 
|  | 290 |  | 
|  | 291 | if (!_ep || !_req) | 
|  | 292 | return; | 
|  | 293 |  | 
|  | 294 | req = container_of(_req, struct goku_request, req); | 
|  | 295 | WARN_ON(!list_empty(&req->queue)); | 
|  | 296 | kfree(req); | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | /*-------------------------------------------------------------------------*/ | 
|  | 300 |  | 
|  | 301 | #undef USE_KMALLOC | 
|  | 302 |  | 
|  | 303 | /* many common platforms have dma-coherent caches, which means that it's | 
|  | 304 | * safe to use kmalloc() memory for all i/o buffers without using any | 
|  | 305 | * cache flushing calls.  (unless you're trying to share cache lines | 
|  | 306 | * between dma and non-dma activities, which is a slow idea in any case.) | 
|  | 307 | * | 
|  | 308 | * other platforms need more care, with 2.6 having a moderately general | 
|  | 309 | * solution except for the common "buffer is smaller than a page" case. | 
|  | 310 | */ | 
|  | 311 | #if	defined(CONFIG_X86) | 
|  | 312 | #define USE_KMALLOC | 
|  | 313 |  | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 314 | #elif	defined(CONFIG_MIPS) && !defined(CONFIG_DMA_NONCOHERENT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | #define USE_KMALLOC | 
|  | 316 |  | 
|  | 317 | #elif	defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) | 
|  | 318 | #define USE_KMALLOC | 
|  | 319 |  | 
|  | 320 | #endif | 
|  | 321 |  | 
|  | 322 | /* allocating buffers this way eliminates dma mapping overhead, which | 
|  | 323 | * on some platforms will mean eliminating a per-io buffer copy.  with | 
|  | 324 | * some kinds of system caches, further tweaks may still be needed. | 
|  | 325 | */ | 
|  | 326 | static void * | 
|  | 327 | goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes, | 
| Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 328 | dma_addr_t *dma, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { | 
|  | 330 | void		*retval; | 
|  | 331 | struct goku_ep	*ep; | 
|  | 332 |  | 
|  | 333 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 334 | if (!_ep) | 
|  | 335 | return NULL; | 
|  | 336 | *dma = DMA_ADDR_INVALID; | 
|  | 337 |  | 
|  | 338 | #if	defined(USE_KMALLOC) | 
|  | 339 | retval = kmalloc(bytes, gfp_flags); | 
|  | 340 | if (retval) | 
|  | 341 | *dma = virt_to_phys(retval); | 
|  | 342 | #else | 
|  | 343 | if (ep->dma) { | 
|  | 344 | /* the main problem with this call is that it wastes memory | 
|  | 345 | * on typical 1/N page allocations: it allocates 1-N pages. | 
|  | 346 | */ | 
|  | 347 | #warning Using dma_alloc_coherent even with buffers smaller than a page. | 
|  | 348 | retval = dma_alloc_coherent(&ep->dev->pdev->dev, | 
|  | 349 | bytes, dma, gfp_flags); | 
|  | 350 | } else | 
|  | 351 | retval = kmalloc(bytes, gfp_flags); | 
|  | 352 | #endif | 
|  | 353 | return retval; | 
|  | 354 | } | 
|  | 355 |  | 
|  | 356 | static void | 
|  | 357 | goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes) | 
|  | 358 | { | 
|  | 359 | /* free memory into the right allocator */ | 
|  | 360 | #ifndef	USE_KMALLOC | 
|  | 361 | if (dma != DMA_ADDR_INVALID) { | 
|  | 362 | struct goku_ep	*ep; | 
|  | 363 |  | 
|  | 364 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 365 | if (!_ep) | 
|  | 366 | return; | 
|  | 367 | dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma); | 
|  | 368 | } else | 
|  | 369 | #endif | 
|  | 370 | kfree (buf); | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | /*-------------------------------------------------------------------------*/ | 
|  | 374 |  | 
|  | 375 | static void | 
|  | 376 | done(struct goku_ep *ep, struct goku_request *req, int status) | 
|  | 377 | { | 
|  | 378 | struct goku_udc		*dev; | 
|  | 379 | unsigned		stopped = ep->stopped; | 
|  | 380 |  | 
|  | 381 | list_del_init(&req->queue); | 
|  | 382 |  | 
|  | 383 | if (likely(req->req.status == -EINPROGRESS)) | 
|  | 384 | req->req.status = status; | 
|  | 385 | else | 
|  | 386 | status = req->req.status; | 
|  | 387 |  | 
|  | 388 | dev = ep->dev; | 
|  | 389 | if (req->mapped) { | 
|  | 390 | pci_unmap_single(dev->pdev, req->req.dma, req->req.length, | 
|  | 391 | ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | 
|  | 392 | req->req.dma = DMA_ADDR_INVALID; | 
|  | 393 | req->mapped = 0; | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | #ifndef USB_TRACE | 
|  | 397 | if (status && status != -ESHUTDOWN) | 
|  | 398 | #endif | 
|  | 399 | VDBG(dev, "complete %s req %p stat %d len %u/%u\n", | 
|  | 400 | ep->ep.name, &req->req, status, | 
|  | 401 | req->req.actual, req->req.length); | 
|  | 402 |  | 
|  | 403 | /* don't modify queue heads during completion callback */ | 
|  | 404 | ep->stopped = 1; | 
|  | 405 | spin_unlock(&dev->lock); | 
|  | 406 | req->req.complete(&ep->ep, &req->req); | 
|  | 407 | spin_lock(&dev->lock); | 
|  | 408 | ep->stopped = stopped; | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | /*-------------------------------------------------------------------------*/ | 
|  | 412 |  | 
|  | 413 | static inline int | 
|  | 414 | write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max) | 
|  | 415 | { | 
|  | 416 | unsigned	length, count; | 
|  | 417 |  | 
|  | 418 | length = min(req->req.length - req->req.actual, max); | 
|  | 419 | req->req.actual += length; | 
|  | 420 |  | 
|  | 421 | count = length; | 
|  | 422 | while (likely(count--)) | 
|  | 423 | writel(*buf++, fifo); | 
|  | 424 | return length; | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 | // return:  0 = still running, 1 = completed, negative = errno | 
|  | 428 | static int write_fifo(struct goku_ep *ep, struct goku_request *req) | 
|  | 429 | { | 
|  | 430 | struct goku_udc	*dev = ep->dev; | 
|  | 431 | u32		tmp; | 
|  | 432 | u8		*buf; | 
|  | 433 | unsigned	count; | 
|  | 434 | int		is_last; | 
|  | 435 |  | 
|  | 436 | tmp = readl(&dev->regs->DataSet); | 
|  | 437 | buf = req->req.buf + req->req.actual; | 
|  | 438 | prefetch(buf); | 
|  | 439 |  | 
|  | 440 | dev = ep->dev; | 
|  | 441 | if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN)) | 
|  | 442 | return -EL2HLT; | 
|  | 443 |  | 
|  | 444 | /* NOTE:  just single-buffered PIO-IN for now.  */ | 
|  | 445 | if (unlikely((tmp & DATASET_A(ep->num)) != 0)) | 
|  | 446 | return 0; | 
|  | 447 |  | 
|  | 448 | /* clear our "packet available" irq */ | 
|  | 449 | if (ep->num != 0) | 
|  | 450 | writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status); | 
|  | 451 |  | 
|  | 452 | count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket); | 
|  | 453 |  | 
|  | 454 | /* last packet often short (sometimes a zlp, especially on ep0) */ | 
|  | 455 | if (unlikely(count != ep->ep.maxpacket)) { | 
|  | 456 | writel(~(1<<ep->num), &dev->regs->EOP); | 
|  | 457 | if (ep->num == 0) { | 
|  | 458 | dev->ep[0].stopped = 1; | 
|  | 459 | dev->ep0state = EP0_STATUS; | 
|  | 460 | } | 
|  | 461 | is_last = 1; | 
|  | 462 | } else { | 
|  | 463 | if (likely(req->req.length != req->req.actual) | 
|  | 464 | || req->req.zero) | 
|  | 465 | is_last = 0; | 
|  | 466 | else | 
|  | 467 | is_last = 1; | 
|  | 468 | } | 
|  | 469 | #if 0		/* printk seemed to trash is_last...*/ | 
|  | 470 | //#ifdef USB_TRACE | 
|  | 471 | VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n", | 
|  | 472 | ep->ep.name, count, is_last ? "/last" : "", | 
|  | 473 | req->req.length - req->req.actual, req); | 
|  | 474 | #endif | 
|  | 475 |  | 
|  | 476 | /* requests complete when all IN data is in the FIFO, | 
|  | 477 | * or sometimes later, if a zlp was needed. | 
|  | 478 | */ | 
|  | 479 | if (is_last) { | 
|  | 480 | done(ep, req, 0); | 
|  | 481 | return 1; | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | return 0; | 
|  | 485 | } | 
|  | 486 |  | 
|  | 487 | static int read_fifo(struct goku_ep *ep, struct goku_request *req) | 
|  | 488 | { | 
|  | 489 | struct goku_udc_regs __iomem	*regs; | 
|  | 490 | u32				size, set; | 
|  | 491 | u8				*buf; | 
|  | 492 | unsigned			bufferspace, is_short, dbuff; | 
|  | 493 |  | 
|  | 494 | regs = ep->dev->regs; | 
|  | 495 | top: | 
|  | 496 | buf = req->req.buf + req->req.actual; | 
|  | 497 | prefetchw(buf); | 
|  | 498 |  | 
|  | 499 | if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT)) | 
|  | 500 | return -EL2HLT; | 
|  | 501 |  | 
|  | 502 | dbuff = (ep->num == 1 || ep->num == 2); | 
|  | 503 | do { | 
|  | 504 | /* ack dataset irq matching the status we'll handle */ | 
|  | 505 | if (ep->num != 0) | 
|  | 506 | writel(~INT_EPxDATASET(ep->num), ®s->int_status); | 
|  | 507 |  | 
|  | 508 | set = readl(®s->DataSet) & DATASET_AB(ep->num); | 
|  | 509 | size = readl(®s->EPxSizeLA[ep->num]); | 
|  | 510 | bufferspace = req->req.length - req->req.actual; | 
|  | 511 |  | 
|  | 512 | /* usually do nothing without an OUT packet */ | 
|  | 513 | if (likely(ep->num != 0 || bufferspace != 0)) { | 
|  | 514 | if (unlikely(set == 0)) | 
|  | 515 | break; | 
|  | 516 | /* use ep1/ep2 double-buffering for OUT */ | 
|  | 517 | if (!(size & PACKET_ACTIVE)) | 
|  | 518 | size = readl(®s->EPxSizeLB[ep->num]); | 
|  | 519 | if (!(size & PACKET_ACTIVE)) 	// "can't happen" | 
|  | 520 | break; | 
|  | 521 | size &= DATASIZE;	/* EPxSizeH == 0 */ | 
|  | 522 |  | 
|  | 523 | /* ep0out no-out-data case for set_config, etc */ | 
|  | 524 | } else | 
|  | 525 | size = 0; | 
|  | 526 |  | 
|  | 527 | /* read all bytes from this packet */ | 
|  | 528 | req->req.actual += size; | 
|  | 529 | is_short = (size < ep->ep.maxpacket); | 
|  | 530 | #ifdef USB_TRACE | 
|  | 531 | VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n", | 
|  | 532 | ep->ep.name, size, is_short ? "/S" : "", | 
|  | 533 | req, req->req.actual, req->req.length); | 
|  | 534 | #endif | 
|  | 535 | while (likely(size-- != 0)) { | 
|  | 536 | u8	byte = (u8) readl(ep->reg_fifo); | 
|  | 537 |  | 
|  | 538 | if (unlikely(bufferspace == 0)) { | 
|  | 539 | /* this happens when the driver's buffer | 
|  | 540 | * is smaller than what the host sent. | 
|  | 541 | * discard the extra data in this packet. | 
|  | 542 | */ | 
|  | 543 | if (req->req.status != -EOVERFLOW) | 
|  | 544 | DBG(ep->dev, "%s overflow %u\n", | 
|  | 545 | ep->ep.name, size); | 
|  | 546 | req->req.status = -EOVERFLOW; | 
|  | 547 | } else { | 
|  | 548 | *buf++ = byte; | 
|  | 549 | bufferspace--; | 
|  | 550 | } | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | /* completion */ | 
|  | 554 | if (unlikely(is_short || req->req.actual == req->req.length)) { | 
|  | 555 | if (unlikely(ep->num == 0)) { | 
|  | 556 | /* non-control endpoints now usable? */ | 
|  | 557 | if (ep->dev->req_config) | 
|  | 558 | writel(ep->dev->configured | 
|  | 559 | ? USBSTATE_CONFIGURED | 
|  | 560 | : 0, | 
|  | 561 | ®s->UsbState); | 
|  | 562 | /* ep0out status stage */ | 
|  | 563 | writel(~(1<<0), ®s->EOP); | 
|  | 564 | ep->stopped = 1; | 
|  | 565 | ep->dev->ep0state = EP0_STATUS; | 
|  | 566 | } | 
|  | 567 | done(ep, req, 0); | 
|  | 568 |  | 
|  | 569 | /* empty the second buffer asap */ | 
|  | 570 | if (dbuff && !list_empty(&ep->queue)) { | 
|  | 571 | req = list_entry(ep->queue.next, | 
|  | 572 | struct goku_request, queue); | 
|  | 573 | goto top; | 
|  | 574 | } | 
|  | 575 | return 1; | 
|  | 576 | } | 
|  | 577 | } while (dbuff); | 
|  | 578 | return 0; | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | static inline void | 
|  | 582 | pio_irq_enable(struct goku_udc *dev, | 
|  | 583 | struct goku_udc_regs __iomem *regs, int epnum) | 
|  | 584 | { | 
|  | 585 | dev->int_enable |= INT_EPxDATASET (epnum); | 
|  | 586 | writel(dev->int_enable, ®s->int_enable); | 
|  | 587 | /* write may still be posted */ | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | static inline void | 
|  | 591 | pio_irq_disable(struct goku_udc *dev, | 
|  | 592 | struct goku_udc_regs __iomem *regs, int epnum) | 
|  | 593 | { | 
|  | 594 | dev->int_enable &= ~INT_EPxDATASET (epnum); | 
|  | 595 | writel(dev->int_enable, ®s->int_enable); | 
|  | 596 | /* write may still be posted */ | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | static inline void | 
|  | 600 | pio_advance(struct goku_ep *ep) | 
|  | 601 | { | 
|  | 602 | struct goku_request	*req; | 
|  | 603 |  | 
|  | 604 | if (unlikely(list_empty (&ep->queue))) | 
|  | 605 | return; | 
|  | 606 | req = list_entry(ep->queue.next, struct goku_request, queue); | 
|  | 607 | (ep->is_in ? write_fifo : read_fifo)(ep, req); | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 |  | 
|  | 611 | /*-------------------------------------------------------------------------*/ | 
|  | 612 |  | 
|  | 613 | // return:  0 = q running, 1 = q stopped, negative = errno | 
|  | 614 | static int start_dma(struct goku_ep *ep, struct goku_request *req) | 
|  | 615 | { | 
|  | 616 | struct goku_udc_regs __iomem	*regs = ep->dev->regs; | 
|  | 617 | u32				master; | 
|  | 618 | u32				start = req->req.dma; | 
|  | 619 | u32				end = start + req->req.length - 1; | 
|  | 620 |  | 
|  | 621 | master = readl(®s->dma_master) & MST_RW_BITS; | 
|  | 622 |  | 
|  | 623 | /* re-init the bits affecting IN dma; careful with zlps */ | 
|  | 624 | if (likely(ep->is_in)) { | 
|  | 625 | if (unlikely(master & MST_RD_ENA)) { | 
|  | 626 | DBG (ep->dev, "start, IN active dma %03x!!\n", | 
|  | 627 | master); | 
|  | 628 | //			return -EL2HLT; | 
|  | 629 | } | 
|  | 630 | writel(end, ®s->in_dma_end); | 
|  | 631 | writel(start, ®s->in_dma_start); | 
|  | 632 |  | 
|  | 633 | master &= ~MST_R_BITS; | 
|  | 634 | if (unlikely(req->req.length == 0)) | 
|  | 635 | master = MST_RD_ENA | MST_RD_EOPB; | 
|  | 636 | else if ((req->req.length % ep->ep.maxpacket) != 0 | 
|  | 637 | || req->req.zero) | 
|  | 638 | master = MST_RD_ENA | MST_EOPB_ENA; | 
|  | 639 | else | 
|  | 640 | master = MST_RD_ENA | MST_EOPB_DIS; | 
|  | 641 |  | 
|  | 642 | ep->dev->int_enable |= INT_MSTRDEND; | 
|  | 643 |  | 
|  | 644 | /* Goku DMA-OUT merges short packets, which plays poorly with | 
|  | 645 | * protocols where short packets mark the transfer boundaries. | 
|  | 646 | * The chip supports a nonstandard policy with INT_MSTWRTMOUT, | 
|  | 647 | * ending transfers after 3 SOFs; we don't turn it on. | 
|  | 648 | */ | 
|  | 649 | } else { | 
|  | 650 | if (unlikely(master & MST_WR_ENA)) { | 
|  | 651 | DBG (ep->dev, "start, OUT active dma %03x!!\n", | 
|  | 652 | master); | 
|  | 653 | //			return -EL2HLT; | 
|  | 654 | } | 
|  | 655 | writel(end, ®s->out_dma_end); | 
|  | 656 | writel(start, ®s->out_dma_start); | 
|  | 657 |  | 
|  | 658 | master &= ~MST_W_BITS; | 
|  | 659 | master |= MST_WR_ENA | MST_TIMEOUT_DIS; | 
|  | 660 |  | 
|  | 661 | ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT; | 
|  | 662 | } | 
|  | 663 |  | 
|  | 664 | writel(master, ®s->dma_master); | 
|  | 665 | writel(ep->dev->int_enable, ®s->int_enable); | 
|  | 666 | return 0; | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | static void dma_advance(struct goku_udc *dev, struct goku_ep *ep) | 
|  | 670 | { | 
|  | 671 | struct goku_request		*req; | 
|  | 672 | struct goku_udc_regs __iomem	*regs = ep->dev->regs; | 
|  | 673 | u32				master; | 
|  | 674 |  | 
|  | 675 | master = readl(®s->dma_master); | 
|  | 676 |  | 
|  | 677 | if (unlikely(list_empty(&ep->queue))) { | 
|  | 678 | stop: | 
|  | 679 | if (ep->is_in) | 
|  | 680 | dev->int_enable &= ~INT_MSTRDEND; | 
|  | 681 | else | 
|  | 682 | dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT); | 
|  | 683 | writel(dev->int_enable, ®s->int_enable); | 
|  | 684 | return; | 
|  | 685 | } | 
|  | 686 | req = list_entry(ep->queue.next, struct goku_request, queue); | 
|  | 687 |  | 
|  | 688 | /* normal hw dma completion (not abort) */ | 
|  | 689 | if (likely(ep->is_in)) { | 
|  | 690 | if (unlikely(master & MST_RD_ENA)) | 
|  | 691 | return; | 
|  | 692 | req->req.actual = readl(®s->in_dma_current); | 
|  | 693 | } else { | 
|  | 694 | if (unlikely(master & MST_WR_ENA)) | 
|  | 695 | return; | 
|  | 696 |  | 
|  | 697 | /* hardware merges short packets, and also hides packet | 
|  | 698 | * overruns.  a partial packet MAY be in the fifo here. | 
|  | 699 | */ | 
|  | 700 | req->req.actual = readl(®s->out_dma_current); | 
|  | 701 | } | 
|  | 702 | req->req.actual -= req->req.dma; | 
|  | 703 | req->req.actual++; | 
|  | 704 |  | 
|  | 705 | #ifdef USB_TRACE | 
|  | 706 | VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n", | 
|  | 707 | ep->ep.name, ep->is_in ? "IN" : "OUT", | 
|  | 708 | req->req.actual, req->req.length, req); | 
|  | 709 | #endif | 
|  | 710 | done(ep, req, 0); | 
|  | 711 | if (list_empty(&ep->queue)) | 
|  | 712 | goto stop; | 
|  | 713 | req = list_entry(ep->queue.next, struct goku_request, queue); | 
|  | 714 | (void) start_dma(ep, req); | 
|  | 715 | } | 
|  | 716 |  | 
|  | 717 | static void abort_dma(struct goku_ep *ep, int status) | 
|  | 718 | { | 
|  | 719 | struct goku_udc_regs __iomem	*regs = ep->dev->regs; | 
|  | 720 | struct goku_request		*req; | 
|  | 721 | u32				curr, master; | 
|  | 722 |  | 
|  | 723 | /* NAK future host requests, hoping the implicit delay lets the | 
|  | 724 | * dma engine finish reading (or writing) its latest packet and | 
|  | 725 | * empty the dma buffer (up to 16 bytes). | 
|  | 726 | * | 
|  | 727 | * This avoids needing to clean up a partial packet in the fifo; | 
|  | 728 | * we can't do that for IN without side effects to HALT and TOGGLE. | 
|  | 729 | */ | 
|  | 730 | command(regs, COMMAND_FIFO_DISABLE, ep->num); | 
|  | 731 | req = list_entry(ep->queue.next, struct goku_request, queue); | 
|  | 732 | master = readl(®s->dma_master) & MST_RW_BITS; | 
|  | 733 |  | 
|  | 734 | /* FIXME using these resets isn't usably documented. this may | 
|  | 735 | * not work unless it's followed by disabling the endpoint. | 
|  | 736 | * | 
|  | 737 | * FIXME the OUT reset path doesn't even behave consistently. | 
|  | 738 | */ | 
|  | 739 | if (ep->is_in) { | 
|  | 740 | if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0)) | 
|  | 741 | goto finished; | 
|  | 742 | curr = readl(®s->in_dma_current); | 
|  | 743 |  | 
|  | 744 | writel(curr, ®s->in_dma_end); | 
|  | 745 | writel(curr, ®s->in_dma_start); | 
|  | 746 |  | 
|  | 747 | master &= ~MST_R_BITS; | 
|  | 748 | master |= MST_RD_RESET; | 
|  | 749 | writel(master, ®s->dma_master); | 
|  | 750 |  | 
|  | 751 | if (readl(®s->dma_master) & MST_RD_ENA) | 
|  | 752 | DBG(ep->dev, "IN dma active after reset!\n"); | 
|  | 753 |  | 
|  | 754 | } else { | 
|  | 755 | if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0)) | 
|  | 756 | goto finished; | 
|  | 757 | curr = readl(®s->out_dma_current); | 
|  | 758 |  | 
|  | 759 | writel(curr, ®s->out_dma_end); | 
|  | 760 | writel(curr, ®s->out_dma_start); | 
|  | 761 |  | 
|  | 762 | master &= ~MST_W_BITS; | 
|  | 763 | master |= MST_WR_RESET; | 
|  | 764 | writel(master, ®s->dma_master); | 
|  | 765 |  | 
|  | 766 | if (readl(®s->dma_master) & MST_WR_ENA) | 
|  | 767 | DBG(ep->dev, "OUT dma active after reset!\n"); | 
|  | 768 | } | 
|  | 769 | req->req.actual = (curr - req->req.dma) + 1; | 
|  | 770 | req->req.status = status; | 
|  | 771 |  | 
|  | 772 | VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name, | 
|  | 773 | ep->is_in ? "IN" : "OUT", | 
|  | 774 | req->req.actual, req->req.length); | 
|  | 775 |  | 
|  | 776 | command(regs, COMMAND_FIFO_ENABLE, ep->num); | 
|  | 777 |  | 
|  | 778 | return; | 
|  | 779 |  | 
|  | 780 | finished: | 
|  | 781 | /* dma already completed; no abort needed */ | 
|  | 782 | command(regs, COMMAND_FIFO_ENABLE, ep->num); | 
|  | 783 | req->req.actual = req->req.length; | 
|  | 784 | req->req.status = 0; | 
|  | 785 | } | 
|  | 786 |  | 
|  | 787 | /*-------------------------------------------------------------------------*/ | 
|  | 788 |  | 
|  | 789 | static int | 
| Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 790 | goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | { | 
|  | 792 | struct goku_request	*req; | 
|  | 793 | struct goku_ep		*ep; | 
|  | 794 | struct goku_udc		*dev; | 
|  | 795 | unsigned long		flags; | 
|  | 796 | int			status; | 
|  | 797 |  | 
|  | 798 | /* always require a cpu-view buffer so pio works */ | 
|  | 799 | req = container_of(_req, struct goku_request, req); | 
|  | 800 | if (unlikely(!_req || !_req->complete | 
|  | 801 | || !_req->buf || !list_empty(&req->queue))) | 
|  | 802 | return -EINVAL; | 
|  | 803 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 804 | if (unlikely(!_ep || (!ep->desc && ep->num != 0))) | 
|  | 805 | return -EINVAL; | 
|  | 806 | dev = ep->dev; | 
|  | 807 | if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) | 
|  | 808 | return -ESHUTDOWN; | 
|  | 809 |  | 
|  | 810 | /* can't touch registers when suspended */ | 
|  | 811 | if (dev->ep0state == EP0_SUSPEND) | 
|  | 812 | return -EBUSY; | 
|  | 813 |  | 
|  | 814 | /* set up dma mapping in case the caller didn't */ | 
|  | 815 | if (ep->dma && _req->dma == DMA_ADDR_INVALID) { | 
|  | 816 | _req->dma = pci_map_single(dev->pdev, _req->buf, _req->length, | 
|  | 817 | ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | 
|  | 818 | req->mapped = 1; | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | #ifdef USB_TRACE | 
|  | 822 | VDBG(dev, "%s queue req %p, len %u buf %p\n", | 
|  | 823 | _ep->name, _req, _req->length, _req->buf); | 
|  | 824 | #endif | 
|  | 825 |  | 
|  | 826 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 827 |  | 
|  | 828 | _req->status = -EINPROGRESS; | 
|  | 829 | _req->actual = 0; | 
|  | 830 |  | 
|  | 831 | /* for ep0 IN without premature status, zlp is required and | 
|  | 832 | * writing EOP starts the status stage (OUT). | 
|  | 833 | */ | 
|  | 834 | if (unlikely(ep->num == 0 && ep->is_in)) | 
|  | 835 | _req->zero = 1; | 
|  | 836 |  | 
|  | 837 | /* kickstart this i/o queue? */ | 
|  | 838 | status = 0; | 
|  | 839 | if (list_empty(&ep->queue) && likely(!ep->stopped)) { | 
|  | 840 | /* dma:  done after dma completion IRQ (or error) | 
|  | 841 | * pio:  done after last fifo operation | 
|  | 842 | */ | 
|  | 843 | if (ep->dma) | 
|  | 844 | status = start_dma(ep, req); | 
|  | 845 | else | 
|  | 846 | status = (ep->is_in ? write_fifo : read_fifo)(ep, req); | 
|  | 847 |  | 
|  | 848 | if (unlikely(status != 0)) { | 
|  | 849 | if (status > 0) | 
|  | 850 | status = 0; | 
|  | 851 | req = NULL; | 
|  | 852 | } | 
|  | 853 |  | 
|  | 854 | } /* else pio or dma irq handler advances the queue. */ | 
|  | 855 |  | 
|  | 856 | if (likely(req != 0)) | 
|  | 857 | list_add_tail(&req->queue, &ep->queue); | 
|  | 858 |  | 
|  | 859 | if (likely(!list_empty(&ep->queue)) | 
|  | 860 | && likely(ep->num != 0) | 
|  | 861 | && !ep->dma | 
|  | 862 | && !(dev->int_enable & INT_EPxDATASET (ep->num))) | 
|  | 863 | pio_irq_enable(dev, dev->regs, ep->num); | 
|  | 864 |  | 
|  | 865 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 866 |  | 
|  | 867 | /* pci writes may still be posted */ | 
|  | 868 | return status; | 
|  | 869 | } | 
|  | 870 |  | 
|  | 871 | /* dequeue ALL requests */ | 
|  | 872 | static void nuke(struct goku_ep *ep, int status) | 
|  | 873 | { | 
|  | 874 | struct goku_request	*req; | 
|  | 875 |  | 
|  | 876 | ep->stopped = 1; | 
|  | 877 | if (list_empty(&ep->queue)) | 
|  | 878 | return; | 
|  | 879 | if (ep->dma) | 
|  | 880 | abort_dma(ep, status); | 
|  | 881 | while (!list_empty(&ep->queue)) { | 
|  | 882 | req = list_entry(ep->queue.next, struct goku_request, queue); | 
|  | 883 | done(ep, req, status); | 
|  | 884 | } | 
|  | 885 | } | 
|  | 886 |  | 
|  | 887 | /* dequeue JUST ONE request */ | 
|  | 888 | static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req) | 
|  | 889 | { | 
|  | 890 | struct goku_request	*req; | 
|  | 891 | struct goku_ep		*ep; | 
|  | 892 | struct goku_udc		*dev; | 
|  | 893 | unsigned long		flags; | 
|  | 894 |  | 
|  | 895 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 896 | if (!_ep || !_req || (!ep->desc && ep->num != 0)) | 
|  | 897 | return -EINVAL; | 
|  | 898 | dev = ep->dev; | 
|  | 899 | if (!dev->driver) | 
|  | 900 | return -ESHUTDOWN; | 
|  | 901 |  | 
|  | 902 | /* we can't touch (dma) registers when suspended */ | 
|  | 903 | if (dev->ep0state == EP0_SUSPEND) | 
|  | 904 | return -EBUSY; | 
|  | 905 |  | 
|  | 906 | VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name, | 
|  | 907 | ep->is_in ? "IN" : "OUT", | 
|  | 908 | ep->dma ? "dma" : "pio", | 
|  | 909 | _req); | 
|  | 910 |  | 
|  | 911 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 912 |  | 
|  | 913 | /* make sure it's actually queued on this endpoint */ | 
|  | 914 | list_for_each_entry (req, &ep->queue, queue) { | 
|  | 915 | if (&req->req == _req) | 
|  | 916 | break; | 
|  | 917 | } | 
|  | 918 | if (&req->req != _req) { | 
|  | 919 | spin_unlock_irqrestore (&dev->lock, flags); | 
|  | 920 | return -EINVAL; | 
|  | 921 | } | 
|  | 922 |  | 
|  | 923 | if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) { | 
|  | 924 | abort_dma(ep, -ECONNRESET); | 
|  | 925 | done(ep, req, -ECONNRESET); | 
|  | 926 | dma_advance(dev, ep); | 
|  | 927 | } else if (!list_empty(&req->queue)) | 
|  | 928 | done(ep, req, -ECONNRESET); | 
|  | 929 | else | 
|  | 930 | req = NULL; | 
|  | 931 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 932 |  | 
|  | 933 | return req ? 0 : -EOPNOTSUPP; | 
|  | 934 | } | 
|  | 935 |  | 
|  | 936 | /*-------------------------------------------------------------------------*/ | 
|  | 937 |  | 
|  | 938 | static void goku_clear_halt(struct goku_ep *ep) | 
|  | 939 | { | 
|  | 940 | // assert (ep->num !=0) | 
|  | 941 | VDBG(ep->dev, "%s clear halt\n", ep->ep.name); | 
|  | 942 | command(ep->dev->regs, COMMAND_SETDATA0, ep->num); | 
|  | 943 | command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num); | 
|  | 944 | if (ep->stopped) { | 
|  | 945 | ep->stopped = 0; | 
|  | 946 | if (ep->dma) { | 
|  | 947 | struct goku_request	*req; | 
|  | 948 |  | 
|  | 949 | if (list_empty(&ep->queue)) | 
|  | 950 | return; | 
|  | 951 | req = list_entry(ep->queue.next, struct goku_request, | 
|  | 952 | queue); | 
|  | 953 | (void) start_dma(ep, req); | 
|  | 954 | } else | 
|  | 955 | pio_advance(ep); | 
|  | 956 | } | 
|  | 957 | } | 
|  | 958 |  | 
|  | 959 | static int goku_set_halt(struct usb_ep *_ep, int value) | 
|  | 960 | { | 
|  | 961 | struct goku_ep	*ep; | 
|  | 962 | unsigned long	flags; | 
|  | 963 | int		retval = 0; | 
|  | 964 |  | 
|  | 965 | if (!_ep) | 
|  | 966 | return -ENODEV; | 
|  | 967 | ep = container_of (_ep, struct goku_ep, ep); | 
|  | 968 |  | 
|  | 969 | if (ep->num == 0) { | 
|  | 970 | if (value) { | 
|  | 971 | ep->dev->ep0state = EP0_STALL; | 
|  | 972 | ep->dev->ep[0].stopped = 1; | 
|  | 973 | } else | 
|  | 974 | return -EINVAL; | 
|  | 975 |  | 
|  | 976 | /* don't change EPxSTATUS_EP_INVALID to READY */ | 
|  | 977 | } else if (!ep->desc) { | 
|  | 978 | DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name); | 
|  | 979 | return -EINVAL; | 
|  | 980 | } | 
|  | 981 |  | 
|  | 982 | spin_lock_irqsave(&ep->dev->lock, flags); | 
|  | 983 | if (!list_empty(&ep->queue)) | 
|  | 984 | retval = -EAGAIN; | 
|  | 985 | else if (ep->is_in && value | 
|  | 986 | /* data in (either) packet buffer? */ | 
|  | 987 | && (readl(&ep->dev->regs->DataSet) | 
|  | 988 | & DATASET_AB(ep->num))) | 
|  | 989 | retval = -EAGAIN; | 
|  | 990 | else if (!value) | 
|  | 991 | goku_clear_halt(ep); | 
|  | 992 | else { | 
|  | 993 | ep->stopped = 1; | 
|  | 994 | VDBG(ep->dev, "%s set halt\n", ep->ep.name); | 
|  | 995 | command(ep->dev->regs, COMMAND_STALL, ep->num); | 
|  | 996 | readl(ep->reg_status); | 
|  | 997 | } | 
|  | 998 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 
|  | 999 | return retval; | 
|  | 1000 | } | 
|  | 1001 |  | 
|  | 1002 | static int goku_fifo_status(struct usb_ep *_ep) | 
|  | 1003 | { | 
|  | 1004 | struct goku_ep			*ep; | 
|  | 1005 | struct goku_udc_regs __iomem	*regs; | 
|  | 1006 | u32				size; | 
|  | 1007 |  | 
|  | 1008 | if (!_ep) | 
|  | 1009 | return -ENODEV; | 
|  | 1010 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 1011 |  | 
|  | 1012 | /* size is only reported sanely for OUT */ | 
|  | 1013 | if (ep->is_in) | 
|  | 1014 | return -EOPNOTSUPP; | 
|  | 1015 |  | 
|  | 1016 | /* ignores 16-byte dma buffer; SizeH == 0 */ | 
|  | 1017 | regs = ep->dev->regs; | 
|  | 1018 | size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE; | 
|  | 1019 | size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE; | 
|  | 1020 | VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size); | 
|  | 1021 | return size; | 
|  | 1022 | } | 
|  | 1023 |  | 
|  | 1024 | static void goku_fifo_flush(struct usb_ep *_ep) | 
|  | 1025 | { | 
|  | 1026 | struct goku_ep			*ep; | 
|  | 1027 | struct goku_udc_regs __iomem	*regs; | 
|  | 1028 | u32				size; | 
|  | 1029 |  | 
|  | 1030 | if (!_ep) | 
|  | 1031 | return; | 
|  | 1032 | ep = container_of(_ep, struct goku_ep, ep); | 
|  | 1033 | VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name); | 
|  | 1034 |  | 
|  | 1035 | /* don't change EPxSTATUS_EP_INVALID to READY */ | 
|  | 1036 | if (!ep->desc && ep->num != 0) { | 
|  | 1037 | DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name); | 
|  | 1038 | return; | 
|  | 1039 | } | 
|  | 1040 |  | 
|  | 1041 | regs = ep->dev->regs; | 
|  | 1042 | size = readl(®s->EPxSizeLA[ep->num]); | 
|  | 1043 | size &= DATASIZE; | 
|  | 1044 |  | 
|  | 1045 | /* Non-desirable behavior:  FIFO_CLEAR also clears the | 
|  | 1046 | * endpoint halt feature.  For OUT, we _could_ just read | 
|  | 1047 | * the bytes out (PIO, if !ep->dma); for in, no choice. | 
|  | 1048 | */ | 
|  | 1049 | if (size) | 
|  | 1050 | command(regs, COMMAND_FIFO_CLEAR, ep->num); | 
|  | 1051 | } | 
|  | 1052 |  | 
|  | 1053 | static struct usb_ep_ops goku_ep_ops = { | 
|  | 1054 | .enable		= goku_ep_enable, | 
|  | 1055 | .disable	= goku_ep_disable, | 
|  | 1056 |  | 
|  | 1057 | .alloc_request	= goku_alloc_request, | 
|  | 1058 | .free_request	= goku_free_request, | 
|  | 1059 |  | 
|  | 1060 | .alloc_buffer	= goku_alloc_buffer, | 
|  | 1061 | .free_buffer	= goku_free_buffer, | 
|  | 1062 |  | 
|  | 1063 | .queue		= goku_queue, | 
|  | 1064 | .dequeue	= goku_dequeue, | 
|  | 1065 |  | 
|  | 1066 | .set_halt	= goku_set_halt, | 
|  | 1067 | .fifo_status	= goku_fifo_status, | 
|  | 1068 | .fifo_flush	= goku_fifo_flush, | 
|  | 1069 | }; | 
|  | 1070 |  | 
|  | 1071 | /*-------------------------------------------------------------------------*/ | 
|  | 1072 |  | 
|  | 1073 | static int goku_get_frame(struct usb_gadget *_gadget) | 
|  | 1074 | { | 
|  | 1075 | return -EOPNOTSUPP; | 
|  | 1076 | } | 
|  | 1077 |  | 
|  | 1078 | static const struct usb_gadget_ops goku_ops = { | 
|  | 1079 | .get_frame	= goku_get_frame, | 
|  | 1080 | // no remote wakeup | 
|  | 1081 | // not selfpowered | 
|  | 1082 | }; | 
|  | 1083 |  | 
|  | 1084 | /*-------------------------------------------------------------------------*/ | 
|  | 1085 |  | 
|  | 1086 | static inline char *dmastr(void) | 
|  | 1087 | { | 
|  | 1088 | if (use_dma == 0) | 
|  | 1089 | return "(dma disabled)"; | 
|  | 1090 | else if (use_dma == 2) | 
|  | 1091 | return "(dma IN and OUT)"; | 
|  | 1092 | else | 
|  | 1093 | return "(dma IN)"; | 
|  | 1094 | } | 
|  | 1095 |  | 
|  | 1096 | #ifdef CONFIG_USB_GADGET_DEBUG_FILES | 
|  | 1097 |  | 
|  | 1098 | static const char proc_node_name [] = "driver/udc"; | 
|  | 1099 |  | 
|  | 1100 | #define FOURBITS "%s%s%s%s" | 
|  | 1101 | #define EIGHTBITS FOURBITS FOURBITS | 
|  | 1102 |  | 
|  | 1103 | static void | 
|  | 1104 | dump_intmask(const char *label, u32 mask, char **next, unsigned *size) | 
|  | 1105 | { | 
|  | 1106 | int t; | 
|  | 1107 |  | 
|  | 1108 | /* int_status is the same format ... */ | 
|  | 1109 | t = scnprintf(*next, *size, | 
|  | 1110 | "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n", | 
|  | 1111 | label, mask, | 
|  | 1112 | (mask & INT_PWRDETECT) ? " power" : "", | 
|  | 1113 | (mask & INT_SYSERROR) ? " sys" : "", | 
|  | 1114 | (mask & INT_MSTRDEND) ? " in-dma" : "", | 
|  | 1115 | (mask & INT_MSTWRTMOUT) ? " wrtmo" : "", | 
|  | 1116 |  | 
|  | 1117 | (mask & INT_MSTWREND) ? " out-dma" : "", | 
|  | 1118 | (mask & INT_MSTWRSET) ? " wrset" : "", | 
|  | 1119 | (mask & INT_ERR) ? " err" : "", | 
|  | 1120 | (mask & INT_SOF) ? " sof" : "", | 
|  | 1121 |  | 
|  | 1122 | (mask & INT_EP3NAK) ? " ep3nak" : "", | 
|  | 1123 | (mask & INT_EP2NAK) ? " ep2nak" : "", | 
|  | 1124 | (mask & INT_EP1NAK) ? " ep1nak" : "", | 
|  | 1125 | (mask & INT_EP3DATASET) ? " ep3" : "", | 
|  | 1126 |  | 
|  | 1127 | (mask & INT_EP2DATASET) ? " ep2" : "", | 
|  | 1128 | (mask & INT_EP1DATASET) ? " ep1" : "", | 
|  | 1129 | (mask & INT_STATUSNAK) ? " ep0snak" : "", | 
|  | 1130 | (mask & INT_STATUS) ? " ep0status" : "", | 
|  | 1131 |  | 
|  | 1132 | (mask & INT_SETUP) ? " setup" : "", | 
|  | 1133 | (mask & INT_ENDPOINT0) ? " ep0" : "", | 
|  | 1134 | (mask & INT_USBRESET) ? " reset" : "", | 
|  | 1135 | (mask & INT_SUSPEND) ? " suspend" : ""); | 
|  | 1136 | *size -= t; | 
|  | 1137 | *next += t; | 
|  | 1138 | } | 
|  | 1139 |  | 
|  | 1140 |  | 
|  | 1141 | static int | 
|  | 1142 | udc_proc_read(char *buffer, char **start, off_t off, int count, | 
|  | 1143 | int *eof, void *_dev) | 
|  | 1144 | { | 
|  | 1145 | char				*buf = buffer; | 
|  | 1146 | struct goku_udc			*dev = _dev; | 
|  | 1147 | struct goku_udc_regs __iomem	*regs = dev->regs; | 
|  | 1148 | char				*next = buf; | 
|  | 1149 | unsigned			size = count; | 
|  | 1150 | unsigned long			flags; | 
|  | 1151 | int				i, t, is_usb_connected; | 
|  | 1152 | u32				tmp; | 
|  | 1153 |  | 
|  | 1154 | if (off != 0) | 
|  | 1155 | return 0; | 
|  | 1156 |  | 
|  | 1157 | local_irq_save(flags); | 
|  | 1158 |  | 
|  | 1159 | /* basic device status */ | 
|  | 1160 | tmp = readl(®s->power_detect); | 
|  | 1161 | is_usb_connected = tmp & PW_DETECT; | 
|  | 1162 | t = scnprintf(next, size, | 
|  | 1163 | "%s - %s\n" | 
|  | 1164 | "%s version: %s %s\n" | 
|  | 1165 | "Gadget driver: %s\n" | 
|  | 1166 | "Host %s, %s\n" | 
|  | 1167 | "\n", | 
|  | 1168 | pci_name(dev->pdev), driver_desc, | 
|  | 1169 | driver_name, DRIVER_VERSION, dmastr(), | 
|  | 1170 | dev->driver ? dev->driver->driver.name : "(none)", | 
|  | 1171 | is_usb_connected | 
|  | 1172 | ? ((tmp & PW_PULLUP) ? "full speed" : "powered") | 
|  | 1173 | : "disconnected", | 
|  | 1174 | ({char *tmp; | 
|  | 1175 | switch(dev->ep0state){ | 
|  | 1176 | case EP0_DISCONNECT:	tmp = "ep0_disconnect"; break; | 
|  | 1177 | case EP0_IDLE:		tmp = "ep0_idle"; break; | 
|  | 1178 | case EP0_IN:		tmp = "ep0_in"; break; | 
|  | 1179 | case EP0_OUT:		tmp = "ep0_out"; break; | 
|  | 1180 | case EP0_STATUS:	tmp = "ep0_status"; break; | 
|  | 1181 | case EP0_STALL:		tmp = "ep0_stall"; break; | 
|  | 1182 | case EP0_SUSPEND:	tmp = "ep0_suspend"; break; | 
|  | 1183 | default:		tmp = "ep0_?"; break; | 
|  | 1184 | } tmp; }) | 
|  | 1185 | ); | 
|  | 1186 | size -= t; | 
|  | 1187 | next += t; | 
|  | 1188 |  | 
|  | 1189 | dump_intmask("int_status", readl(®s->int_status), &next, &size); | 
|  | 1190 | dump_intmask("int_enable", readl(®s->int_enable), &next, &size); | 
|  | 1191 |  | 
|  | 1192 | if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0) | 
|  | 1193 | goto done; | 
|  | 1194 |  | 
|  | 1195 | /* registers for (active) device and ep0 */ | 
|  | 1196 | t = scnprintf(next, size, "\nirqs %lu\ndataset %02x " | 
|  | 1197 | "single.bcs %02x.%02x state %x addr %u\n", | 
|  | 1198 | dev->irqs, readl(®s->DataSet), | 
|  | 1199 | readl(®s->EPxSingle), readl(®s->EPxBCS), | 
|  | 1200 | readl(®s->UsbState), | 
|  | 1201 | readl(®s->address)); | 
|  | 1202 | size -= t; | 
|  | 1203 | next += t; | 
|  | 1204 |  | 
|  | 1205 | tmp = readl(®s->dma_master); | 
|  | 1206 | t = scnprintf(next, size, | 
|  | 1207 | "dma %03X =" EIGHTBITS "%s %s\n", tmp, | 
|  | 1208 | (tmp & MST_EOPB_DIS) ? " eopb-" : "", | 
|  | 1209 | (tmp & MST_EOPB_ENA) ? " eopb+" : "", | 
|  | 1210 | (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "", | 
|  | 1211 | (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "", | 
|  | 1212 |  | 
|  | 1213 | (tmp & MST_RD_EOPB) ? " eopb" : "", | 
|  | 1214 | (tmp & MST_RD_RESET) ? " in_reset" : "", | 
|  | 1215 | (tmp & MST_WR_RESET) ? " out_reset" : "", | 
|  | 1216 | (tmp & MST_RD_ENA) ? " IN" : "", | 
|  | 1217 |  | 
|  | 1218 | (tmp & MST_WR_ENA) ? " OUT" : "", | 
|  | 1219 | (tmp & MST_CONNECTION) | 
|  | 1220 | ? "ep1in/ep2out" | 
|  | 1221 | : "ep1out/ep2in"); | 
|  | 1222 | size -= t; | 
|  | 1223 | next += t; | 
|  | 1224 |  | 
|  | 1225 | /* dump endpoint queues */ | 
|  | 1226 | for (i = 0; i < 4; i++) { | 
|  | 1227 | struct goku_ep		*ep = &dev->ep [i]; | 
|  | 1228 | struct goku_request	*req; | 
|  | 1229 | int			t; | 
|  | 1230 |  | 
|  | 1231 | if (i && !ep->desc) | 
|  | 1232 | continue; | 
|  | 1233 |  | 
|  | 1234 | tmp = readl(ep->reg_status); | 
|  | 1235 | t = scnprintf(next, size, | 
|  | 1236 | "%s %s max %u %s, irqs %lu, " | 
|  | 1237 | "status %02x (%s) " FOURBITS "\n", | 
|  | 1238 | ep->ep.name, | 
|  | 1239 | ep->is_in ? "in" : "out", | 
|  | 1240 | ep->ep.maxpacket, | 
|  | 1241 | ep->dma ? "dma" : "pio", | 
|  | 1242 | ep->irqs, | 
|  | 1243 | tmp, ({ char *s; | 
|  | 1244 | switch (tmp & EPxSTATUS_EP_MASK) { | 
|  | 1245 | case EPxSTATUS_EP_READY: | 
|  | 1246 | s = "ready"; break; | 
|  | 1247 | case EPxSTATUS_EP_DATAIN: | 
|  | 1248 | s = "packet"; break; | 
|  | 1249 | case EPxSTATUS_EP_FULL: | 
|  | 1250 | s = "full"; break; | 
|  | 1251 | case EPxSTATUS_EP_TX_ERR:	// host will retry | 
|  | 1252 | s = "tx_err"; break; | 
|  | 1253 | case EPxSTATUS_EP_RX_ERR: | 
|  | 1254 | s = "rx_err"; break; | 
|  | 1255 | case EPxSTATUS_EP_BUSY:		/* ep0 only */ | 
|  | 1256 | s = "busy"; break; | 
|  | 1257 | case EPxSTATUS_EP_STALL: | 
|  | 1258 | s = "stall"; break; | 
|  | 1259 | case EPxSTATUS_EP_INVALID:	// these "can't happen" | 
|  | 1260 | s = "invalid"; break; | 
|  | 1261 | default: | 
|  | 1262 | s = "?"; break; | 
|  | 1263 | }; s; }), | 
|  | 1264 | (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0", | 
|  | 1265 | (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "", | 
|  | 1266 | (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "", | 
|  | 1267 | (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : "" | 
|  | 1268 | ); | 
|  | 1269 | if (t <= 0 || t > size) | 
|  | 1270 | goto done; | 
|  | 1271 | size -= t; | 
|  | 1272 | next += t; | 
|  | 1273 |  | 
|  | 1274 | if (list_empty(&ep->queue)) { | 
|  | 1275 | t = scnprintf(next, size, "\t(nothing queued)\n"); | 
|  | 1276 | if (t <= 0 || t > size) | 
|  | 1277 | goto done; | 
|  | 1278 | size -= t; | 
|  | 1279 | next += t; | 
|  | 1280 | continue; | 
|  | 1281 | } | 
|  | 1282 | list_for_each_entry(req, &ep->queue, queue) { | 
|  | 1283 | if (ep->dma && req->queue.prev == &ep->queue) { | 
|  | 1284 | if (i == UDC_MSTRD_ENDPOINT) | 
|  | 1285 | tmp = readl(®s->in_dma_current); | 
|  | 1286 | else | 
|  | 1287 | tmp = readl(®s->out_dma_current); | 
|  | 1288 | tmp -= req->req.dma; | 
|  | 1289 | tmp++; | 
|  | 1290 | } else | 
|  | 1291 | tmp = req->req.actual; | 
|  | 1292 |  | 
|  | 1293 | t = scnprintf(next, size, | 
|  | 1294 | "\treq %p len %u/%u buf %p\n", | 
|  | 1295 | &req->req, tmp, req->req.length, | 
|  | 1296 | req->req.buf); | 
|  | 1297 | if (t <= 0 || t > size) | 
|  | 1298 | goto done; | 
|  | 1299 | size -= t; | 
|  | 1300 | next += t; | 
|  | 1301 | } | 
|  | 1302 | } | 
|  | 1303 |  | 
|  | 1304 | done: | 
|  | 1305 | local_irq_restore(flags); | 
|  | 1306 | *eof = 1; | 
|  | 1307 | return count - size; | 
|  | 1308 | } | 
|  | 1309 |  | 
|  | 1310 | #endif	/* CONFIG_USB_GADGET_DEBUG_FILES */ | 
|  | 1311 |  | 
|  | 1312 | /*-------------------------------------------------------------------------*/ | 
|  | 1313 |  | 
|  | 1314 | static void udc_reinit (struct goku_udc *dev) | 
|  | 1315 | { | 
|  | 1316 | static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" }; | 
|  | 1317 |  | 
|  | 1318 | unsigned i; | 
|  | 1319 |  | 
|  | 1320 | INIT_LIST_HEAD (&dev->gadget.ep_list); | 
|  | 1321 | dev->gadget.ep0 = &dev->ep [0].ep; | 
|  | 1322 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 
|  | 1323 | dev->ep0state = EP0_DISCONNECT; | 
|  | 1324 | dev->irqs = 0; | 
|  | 1325 |  | 
|  | 1326 | for (i = 0; i < 4; i++) { | 
|  | 1327 | struct goku_ep	*ep = &dev->ep[i]; | 
|  | 1328 |  | 
|  | 1329 | ep->num = i; | 
|  | 1330 | ep->ep.name = names[i]; | 
|  | 1331 | ep->reg_fifo = &dev->regs->ep_fifo [i]; | 
|  | 1332 | ep->reg_status = &dev->regs->ep_status [i]; | 
|  | 1333 | ep->reg_mode = &dev->regs->ep_mode[i]; | 
|  | 1334 |  | 
|  | 1335 | ep->ep.ops = &goku_ep_ops; | 
|  | 1336 | list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); | 
|  | 1337 | ep->dev = dev; | 
|  | 1338 | INIT_LIST_HEAD (&ep->queue); | 
|  | 1339 |  | 
|  | 1340 | ep_reset(NULL, ep); | 
|  | 1341 | } | 
|  | 1342 |  | 
|  | 1343 | dev->ep[0].reg_mode = NULL; | 
|  | 1344 | dev->ep[0].ep.maxpacket = MAX_EP0_SIZE; | 
|  | 1345 | list_del_init (&dev->ep[0].ep.ep_list); | 
|  | 1346 | } | 
|  | 1347 |  | 
|  | 1348 | static void udc_reset(struct goku_udc *dev) | 
|  | 1349 | { | 
|  | 1350 | struct goku_udc_regs __iomem	*regs = dev->regs; | 
|  | 1351 |  | 
|  | 1352 | writel(0, ®s->power_detect); | 
|  | 1353 | writel(0, ®s->int_enable); | 
|  | 1354 | readl(®s->int_enable); | 
|  | 1355 | dev->int_enable = 0; | 
|  | 1356 |  | 
|  | 1357 | /* deassert reset, leave USB D+ at hi-Z (no pullup) | 
|  | 1358 | * don't let INT_PWRDETECT sequence begin | 
|  | 1359 | */ | 
|  | 1360 | udelay(250); | 
|  | 1361 | writel(PW_RESETB, ®s->power_detect); | 
|  | 1362 | readl(®s->int_enable); | 
|  | 1363 | } | 
|  | 1364 |  | 
|  | 1365 | static void ep0_start(struct goku_udc *dev) | 
|  | 1366 | { | 
|  | 1367 | struct goku_udc_regs __iomem	*regs = dev->regs; | 
|  | 1368 | unsigned			i; | 
|  | 1369 |  | 
|  | 1370 | VDBG(dev, "%s\n", __FUNCTION__); | 
|  | 1371 |  | 
|  | 1372 | udc_reset(dev); | 
|  | 1373 | udc_reinit (dev); | 
|  | 1374 | //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master); | 
|  | 1375 |  | 
|  | 1376 | /* hw handles set_address, set_feature, get_status; maybe more */ | 
|  | 1377 | writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF | 
|  | 1378 | | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF | 
|  | 1379 | | G_REQMODE_GET_DESC | 
|  | 1380 | | G_REQMODE_CLEAR_FEAT | 
|  | 1381 | , ®s->reqmode); | 
|  | 1382 |  | 
|  | 1383 | for (i = 0; i < 4; i++) | 
|  | 1384 | dev->ep[i].irqs = 0; | 
|  | 1385 |  | 
|  | 1386 | /* can't modify descriptors after writing UsbReady */ | 
|  | 1387 | for (i = 0; i < DESC_LEN; i++) | 
|  | 1388 | writel(0, ®s->descriptors[i]); | 
|  | 1389 | writel(0, ®s->UsbReady); | 
|  | 1390 |  | 
|  | 1391 | /* expect ep0 requests when the host drops reset */ | 
|  | 1392 | writel(PW_RESETB | PW_PULLUP, ®s->power_detect); | 
|  | 1393 | dev->int_enable = INT_DEVWIDE | INT_EP0; | 
|  | 1394 | writel(dev->int_enable, &dev->regs->int_enable); | 
|  | 1395 | readl(®s->int_enable); | 
|  | 1396 | dev->gadget.speed = USB_SPEED_FULL; | 
|  | 1397 | dev->ep0state = EP0_IDLE; | 
|  | 1398 | } | 
|  | 1399 |  | 
|  | 1400 | static void udc_enable(struct goku_udc *dev) | 
|  | 1401 | { | 
|  | 1402 | /* start enumeration now, or after power detect irq */ | 
|  | 1403 | if (readl(&dev->regs->power_detect) & PW_DETECT) | 
|  | 1404 | ep0_start(dev); | 
|  | 1405 | else { | 
|  | 1406 | DBG(dev, "%s\n", __FUNCTION__); | 
|  | 1407 | dev->int_enable = INT_PWRDETECT; | 
|  | 1408 | writel(dev->int_enable, &dev->regs->int_enable); | 
|  | 1409 | } | 
|  | 1410 | } | 
|  | 1411 |  | 
|  | 1412 | /*-------------------------------------------------------------------------*/ | 
|  | 1413 |  | 
|  | 1414 | /* keeping it simple: | 
|  | 1415 | * - one bus driver, initted first; | 
|  | 1416 | * - one function driver, initted second | 
|  | 1417 | */ | 
|  | 1418 |  | 
|  | 1419 | static struct goku_udc	*the_controller; | 
|  | 1420 |  | 
|  | 1421 | /* when a driver is successfully registered, it will receive | 
|  | 1422 | * control requests including set_configuration(), which enables | 
|  | 1423 | * non-control requests.  then usb traffic follows until a | 
|  | 1424 | * disconnect is reported.  then a host may connect again, or | 
|  | 1425 | * the driver might get unbound. | 
|  | 1426 | */ | 
|  | 1427 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 
|  | 1428 | { | 
|  | 1429 | struct goku_udc	*dev = the_controller; | 
|  | 1430 | int			retval; | 
|  | 1431 |  | 
|  | 1432 | if (!driver | 
|  | 1433 | || driver->speed != USB_SPEED_FULL | 
|  | 1434 | || !driver->bind | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1435 | || !driver->disconnect | 
|  | 1436 | || !driver->setup) | 
|  | 1437 | return -EINVAL; | 
|  | 1438 | if (!dev) | 
|  | 1439 | return -ENODEV; | 
|  | 1440 | if (dev->driver) | 
|  | 1441 | return -EBUSY; | 
|  | 1442 |  | 
|  | 1443 | /* hook up the driver */ | 
|  | 1444 | driver->driver.bus = NULL; | 
|  | 1445 | dev->driver = driver; | 
|  | 1446 | dev->gadget.dev.driver = &driver->driver; | 
|  | 1447 | retval = driver->bind(&dev->gadget); | 
|  | 1448 | if (retval) { | 
|  | 1449 | DBG(dev, "bind to driver %s --> error %d\n", | 
|  | 1450 | driver->driver.name, retval); | 
|  | 1451 | dev->driver = NULL; | 
|  | 1452 | dev->gadget.dev.driver = NULL; | 
|  | 1453 | return retval; | 
|  | 1454 | } | 
|  | 1455 |  | 
|  | 1456 | /* then enable host detection and ep0; and we're ready | 
|  | 1457 | * for set_configuration as well as eventual disconnect. | 
|  | 1458 | */ | 
|  | 1459 | udc_enable(dev); | 
|  | 1460 |  | 
|  | 1461 | DBG(dev, "registered gadget driver '%s'\n", driver->driver.name); | 
|  | 1462 | return 0; | 
|  | 1463 | } | 
|  | 1464 | EXPORT_SYMBOL(usb_gadget_register_driver); | 
|  | 1465 |  | 
|  | 1466 | static void | 
|  | 1467 | stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver) | 
|  | 1468 | { | 
|  | 1469 | unsigned	i; | 
|  | 1470 |  | 
|  | 1471 | DBG (dev, "%s\n", __FUNCTION__); | 
|  | 1472 |  | 
|  | 1473 | if (dev->gadget.speed == USB_SPEED_UNKNOWN) | 
|  | 1474 | driver = NULL; | 
|  | 1475 |  | 
|  | 1476 | /* disconnect gadget driver after quiesceing hw and the driver */ | 
|  | 1477 | udc_reset (dev); | 
|  | 1478 | for (i = 0; i < 4; i++) | 
|  | 1479 | nuke(&dev->ep [i], -ESHUTDOWN); | 
|  | 1480 | if (driver) { | 
|  | 1481 | spin_unlock(&dev->lock); | 
|  | 1482 | driver->disconnect(&dev->gadget); | 
|  | 1483 | spin_lock(&dev->lock); | 
|  | 1484 | } | 
|  | 1485 |  | 
|  | 1486 | if (dev->driver) | 
|  | 1487 | udc_enable(dev); | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 
|  | 1491 | { | 
|  | 1492 | struct goku_udc	*dev = the_controller; | 
|  | 1493 | unsigned long	flags; | 
|  | 1494 |  | 
|  | 1495 | if (!dev) | 
|  | 1496 | return -ENODEV; | 
| David Brownell | 6bea476 | 2006-12-05 03:15:33 -0800 | [diff] [blame] | 1497 | if (!driver || driver != dev->driver || !driver->unbind) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | return -EINVAL; | 
|  | 1499 |  | 
|  | 1500 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 1501 | dev->driver = NULL; | 
|  | 1502 | stop_activity(dev, driver); | 
|  | 1503 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 1504 |  | 
|  | 1505 | driver->unbind(&dev->gadget); | 
|  | 1506 |  | 
|  | 1507 | DBG(dev, "unregistered driver '%s'\n", driver->driver.name); | 
|  | 1508 | return 0; | 
|  | 1509 | } | 
|  | 1510 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | 
|  | 1511 |  | 
|  | 1512 |  | 
|  | 1513 | /*-------------------------------------------------------------------------*/ | 
|  | 1514 |  | 
|  | 1515 | static void ep0_setup(struct goku_udc *dev) | 
|  | 1516 | { | 
|  | 1517 | struct goku_udc_regs __iomem	*regs = dev->regs; | 
|  | 1518 | struct usb_ctrlrequest		ctrl; | 
|  | 1519 | int				tmp; | 
|  | 1520 |  | 
|  | 1521 | /* read SETUP packet and enter DATA stage */ | 
|  | 1522 | ctrl.bRequestType = readl(®s->bRequestType); | 
|  | 1523 | ctrl.bRequest = readl(®s->bRequest); | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1524 | ctrl.wValue  = cpu_to_le16((readl(®s->wValueH)  << 8) | 
|  | 1525 | | readl(®s->wValueL)); | 
|  | 1526 | ctrl.wIndex  = cpu_to_le16((readl(®s->wIndexH)  << 8) | 
|  | 1527 | | readl(®s->wIndexL)); | 
|  | 1528 | ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8) | 
|  | 1529 | | readl(®s->wLengthL)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 | writel(0, ®s->SetupRecv); | 
|  | 1531 |  | 
|  | 1532 | nuke(&dev->ep[0], 0); | 
|  | 1533 | dev->ep[0].stopped = 0; | 
|  | 1534 | if (likely(ctrl.bRequestType & USB_DIR_IN)) { | 
|  | 1535 | dev->ep[0].is_in = 1; | 
|  | 1536 | dev->ep0state = EP0_IN; | 
|  | 1537 | /* detect early status stages */ | 
|  | 1538 | writel(ICONTROL_STATUSNAK, &dev->regs->IntControl); | 
|  | 1539 | } else { | 
|  | 1540 | dev->ep[0].is_in = 0; | 
|  | 1541 | dev->ep0state = EP0_OUT; | 
|  | 1542 |  | 
|  | 1543 | /* NOTE:  CLEAR_FEATURE is done in software so that we can | 
|  | 1544 | * synchronize transfer restarts after bulk IN stalls.  data | 
|  | 1545 | * won't even enter the fifo until the halt is cleared. | 
|  | 1546 | */ | 
|  | 1547 | switch (ctrl.bRequest) { | 
|  | 1548 | case USB_REQ_CLEAR_FEATURE: | 
|  | 1549 | switch (ctrl.bRequestType) { | 
|  | 1550 | case USB_RECIP_ENDPOINT: | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1551 | tmp = le16_to_cpu(ctrl.wIndex) & 0x0f; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | /* active endpoint */ | 
|  | 1553 | if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0)) | 
|  | 1554 | goto stall; | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1555 | if (ctrl.wIndex & __constant_cpu_to_le16( | 
|  | 1556 | USB_DIR_IN)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | if (!dev->ep[tmp].is_in) | 
|  | 1558 | goto stall; | 
|  | 1559 | } else { | 
|  | 1560 | if (dev->ep[tmp].is_in) | 
|  | 1561 | goto stall; | 
|  | 1562 | } | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1563 | if (ctrl.wValue != __constant_cpu_to_le16( | 
|  | 1564 | USB_ENDPOINT_HALT)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1565 | goto stall; | 
|  | 1566 | if (tmp) | 
|  | 1567 | goku_clear_halt(&dev->ep[tmp]); | 
|  | 1568 | succeed: | 
|  | 1569 | /* start ep0out status stage */ | 
|  | 1570 | writel(~(1<<0), ®s->EOP); | 
|  | 1571 | dev->ep[0].stopped = 1; | 
|  | 1572 | dev->ep0state = EP0_STATUS; | 
|  | 1573 | return; | 
|  | 1574 | case USB_RECIP_DEVICE: | 
|  | 1575 | /* device remote wakeup: always clear */ | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1576 | if (ctrl.wValue != __constant_cpu_to_le16(1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 | goto stall; | 
|  | 1578 | VDBG(dev, "clear dev remote wakeup\n"); | 
|  | 1579 | goto succeed; | 
|  | 1580 | case USB_RECIP_INTERFACE: | 
|  | 1581 | goto stall; | 
|  | 1582 | default:		/* pass to gadget driver */ | 
|  | 1583 | break; | 
|  | 1584 | } | 
|  | 1585 | break; | 
|  | 1586 | default: | 
|  | 1587 | break; | 
|  | 1588 | } | 
|  | 1589 | } | 
|  | 1590 |  | 
|  | 1591 | #ifdef USB_TRACE | 
|  | 1592 | VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", | 
|  | 1593 | ctrl.bRequestType, ctrl.bRequest, | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1594 | le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex), | 
|  | 1595 | le16_to_cpu(ctrl.wLength)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | #endif | 
|  | 1597 |  | 
|  | 1598 | /* hw wants to know when we're configured (or not) */ | 
|  | 1599 | dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION | 
|  | 1600 | && ctrl.bRequestType == USB_RECIP_DEVICE); | 
|  | 1601 | if (unlikely(dev->req_config)) | 
| David Brownell | 988199f | 2005-05-07 13:05:52 -0700 | [diff] [blame] | 1602 | dev->configured = (ctrl.wValue != __constant_cpu_to_le16(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 |  | 
|  | 1604 | /* delegate everything to the gadget driver. | 
|  | 1605 | * it may respond after this irq handler returns. | 
|  | 1606 | */ | 
|  | 1607 | spin_unlock (&dev->lock); | 
|  | 1608 | tmp = dev->driver->setup(&dev->gadget, &ctrl); | 
|  | 1609 | spin_lock (&dev->lock); | 
|  | 1610 | if (unlikely(tmp < 0)) { | 
|  | 1611 | stall: | 
|  | 1612 | #ifdef USB_TRACE | 
|  | 1613 | VDBG(dev, "req %02x.%02x protocol STALL; err %d\n", | 
|  | 1614 | ctrl.bRequestType, ctrl.bRequest, tmp); | 
|  | 1615 | #endif | 
|  | 1616 | command(regs, COMMAND_STALL, 0); | 
|  | 1617 | dev->ep[0].stopped = 1; | 
|  | 1618 | dev->ep0state = EP0_STALL; | 
|  | 1619 | } | 
|  | 1620 |  | 
|  | 1621 | /* expect at least one data or status stage irq */ | 
|  | 1622 | } | 
|  | 1623 |  | 
|  | 1624 | #define ACK(irqbit) { \ | 
|  | 1625 | stat &= ~irqbit; \ | 
|  | 1626 | writel(~irqbit, ®s->int_status); \ | 
|  | 1627 | handled = 1; \ | 
|  | 1628 | } | 
|  | 1629 |  | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 1630 | static irqreturn_t goku_irq(int irq, void *_dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | { | 
|  | 1632 | struct goku_udc			*dev = _dev; | 
|  | 1633 | struct goku_udc_regs __iomem	*regs = dev->regs; | 
|  | 1634 | struct goku_ep			*ep; | 
|  | 1635 | u32				stat, handled = 0; | 
|  | 1636 | unsigned			i, rescans = 5; | 
|  | 1637 |  | 
|  | 1638 | spin_lock(&dev->lock); | 
|  | 1639 |  | 
|  | 1640 | rescan: | 
|  | 1641 | stat = readl(®s->int_status) & dev->int_enable; | 
|  | 1642 | if (!stat) | 
|  | 1643 | goto done; | 
|  | 1644 | dev->irqs++; | 
|  | 1645 |  | 
|  | 1646 | /* device-wide irqs */ | 
|  | 1647 | if (unlikely(stat & INT_DEVWIDE)) { | 
|  | 1648 | if (stat & INT_SYSERROR) { | 
|  | 1649 | ERROR(dev, "system error\n"); | 
|  | 1650 | stop_activity(dev, dev->driver); | 
|  | 1651 | stat = 0; | 
|  | 1652 | handled = 1; | 
|  | 1653 | // FIXME have a neater way to prevent re-enumeration | 
|  | 1654 | dev->driver = NULL; | 
|  | 1655 | goto done; | 
|  | 1656 | } | 
|  | 1657 | if (stat & INT_PWRDETECT) { | 
|  | 1658 | writel(~stat, ®s->int_status); | 
|  | 1659 | if (readl(&dev->regs->power_detect) & PW_DETECT) { | 
|  | 1660 | VDBG(dev, "connect\n"); | 
|  | 1661 | ep0_start(dev); | 
|  | 1662 | } else { | 
|  | 1663 | DBG(dev, "disconnect\n"); | 
|  | 1664 | if (dev->gadget.speed == USB_SPEED_FULL) | 
|  | 1665 | stop_activity(dev, dev->driver); | 
|  | 1666 | dev->ep0state = EP0_DISCONNECT; | 
|  | 1667 | dev->int_enable = INT_DEVWIDE; | 
|  | 1668 | writel(dev->int_enable, &dev->regs->int_enable); | 
|  | 1669 | } | 
|  | 1670 | stat = 0; | 
|  | 1671 | handled = 1; | 
|  | 1672 | goto done; | 
|  | 1673 | } | 
|  | 1674 | if (stat & INT_SUSPEND) { | 
|  | 1675 | ACK(INT_SUSPEND); | 
|  | 1676 | if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) { | 
|  | 1677 | switch (dev->ep0state) { | 
|  | 1678 | case EP0_DISCONNECT: | 
|  | 1679 | case EP0_SUSPEND: | 
|  | 1680 | goto pm_next; | 
|  | 1681 | default: | 
|  | 1682 | break; | 
|  | 1683 | } | 
|  | 1684 | DBG(dev, "USB suspend\n"); | 
|  | 1685 | dev->ep0state = EP0_SUSPEND; | 
|  | 1686 | if (dev->gadget.speed != USB_SPEED_UNKNOWN | 
|  | 1687 | && dev->driver | 
|  | 1688 | && dev->driver->suspend) { | 
|  | 1689 | spin_unlock(&dev->lock); | 
|  | 1690 | dev->driver->suspend(&dev->gadget); | 
|  | 1691 | spin_lock(&dev->lock); | 
|  | 1692 | } | 
|  | 1693 | } else { | 
|  | 1694 | if (dev->ep0state != EP0_SUSPEND) { | 
|  | 1695 | DBG(dev, "bogus USB resume %d\n", | 
|  | 1696 | dev->ep0state); | 
|  | 1697 | goto pm_next; | 
|  | 1698 | } | 
|  | 1699 | DBG(dev, "USB resume\n"); | 
|  | 1700 | dev->ep0state = EP0_IDLE; | 
|  | 1701 | if (dev->gadget.speed != USB_SPEED_UNKNOWN | 
|  | 1702 | && dev->driver | 
|  | 1703 | && dev->driver->resume) { | 
|  | 1704 | spin_unlock(&dev->lock); | 
|  | 1705 | dev->driver->resume(&dev->gadget); | 
|  | 1706 | spin_lock(&dev->lock); | 
|  | 1707 | } | 
|  | 1708 | } | 
|  | 1709 | } | 
|  | 1710 | pm_next: | 
|  | 1711 | if (stat & INT_USBRESET) {		/* hub reset done */ | 
|  | 1712 | ACK(INT_USBRESET); | 
|  | 1713 | INFO(dev, "USB reset done, gadget %s\n", | 
|  | 1714 | dev->driver->driver.name); | 
|  | 1715 | } | 
|  | 1716 | // and INT_ERR on some endpoint's crc/bitstuff/... problem | 
|  | 1717 | } | 
|  | 1718 |  | 
|  | 1719 | /* progress ep0 setup, data, or status stages. | 
|  | 1720 | * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs | 
|  | 1721 | */ | 
|  | 1722 | if (stat & INT_SETUP) { | 
|  | 1723 | ACK(INT_SETUP); | 
|  | 1724 | dev->ep[0].irqs++; | 
|  | 1725 | ep0_setup(dev); | 
|  | 1726 | } | 
|  | 1727 | if (stat & INT_STATUSNAK) { | 
|  | 1728 | ACK(INT_STATUSNAK|INT_ENDPOINT0); | 
|  | 1729 | if (dev->ep0state == EP0_IN) { | 
|  | 1730 | ep = &dev->ep[0]; | 
|  | 1731 | ep->irqs++; | 
|  | 1732 | nuke(ep, 0); | 
|  | 1733 | writel(~(1<<0), ®s->EOP); | 
|  | 1734 | dev->ep0state = EP0_STATUS; | 
|  | 1735 | } | 
|  | 1736 | } | 
|  | 1737 | if (stat & INT_ENDPOINT0) { | 
|  | 1738 | ACK(INT_ENDPOINT0); | 
|  | 1739 | ep = &dev->ep[0]; | 
|  | 1740 | ep->irqs++; | 
|  | 1741 | pio_advance(ep); | 
|  | 1742 | } | 
|  | 1743 |  | 
|  | 1744 | /* dma completion */ | 
|  | 1745 | if (stat & INT_MSTRDEND) {	/* IN */ | 
|  | 1746 | ACK(INT_MSTRDEND); | 
|  | 1747 | ep = &dev->ep[UDC_MSTRD_ENDPOINT]; | 
|  | 1748 | ep->irqs++; | 
|  | 1749 | dma_advance(dev, ep); | 
|  | 1750 | } | 
|  | 1751 | if (stat & INT_MSTWREND) {	/* OUT */ | 
|  | 1752 | ACK(INT_MSTWREND); | 
|  | 1753 | ep = &dev->ep[UDC_MSTWR_ENDPOINT]; | 
|  | 1754 | ep->irqs++; | 
|  | 1755 | dma_advance(dev, ep); | 
|  | 1756 | } | 
|  | 1757 | if (stat & INT_MSTWRTMOUT) {	/* OUT */ | 
|  | 1758 | ACK(INT_MSTWRTMOUT); | 
|  | 1759 | ep = &dev->ep[UDC_MSTWR_ENDPOINT]; | 
|  | 1760 | ep->irqs++; | 
|  | 1761 | ERROR(dev, "%s write timeout ?\n", ep->ep.name); | 
|  | 1762 | // reset dma? then dma_advance() | 
|  | 1763 | } | 
|  | 1764 |  | 
|  | 1765 | /* pio */ | 
|  | 1766 | for (i = 1; i < 4; i++) { | 
|  | 1767 | u32		tmp = INT_EPxDATASET(i); | 
|  | 1768 |  | 
|  | 1769 | if (!(stat & tmp)) | 
|  | 1770 | continue; | 
|  | 1771 | ep = &dev->ep[i]; | 
|  | 1772 | pio_advance(ep); | 
|  | 1773 | if (list_empty (&ep->queue)) | 
|  | 1774 | pio_irq_disable(dev, regs, i); | 
|  | 1775 | stat &= ~tmp; | 
|  | 1776 | handled = 1; | 
|  | 1777 | ep->irqs++; | 
|  | 1778 | } | 
|  | 1779 |  | 
|  | 1780 | if (rescans--) | 
|  | 1781 | goto rescan; | 
|  | 1782 |  | 
|  | 1783 | done: | 
|  | 1784 | (void)readl(®s->int_enable); | 
|  | 1785 | spin_unlock(&dev->lock); | 
|  | 1786 | if (stat) | 
|  | 1787 | DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat, | 
|  | 1788 | readl(®s->int_status), dev->int_enable); | 
|  | 1789 | return IRQ_RETVAL(handled); | 
|  | 1790 | } | 
|  | 1791 |  | 
|  | 1792 | #undef ACK | 
|  | 1793 |  | 
|  | 1794 | /*-------------------------------------------------------------------------*/ | 
|  | 1795 |  | 
|  | 1796 | static void gadget_release(struct device *_dev) | 
|  | 1797 | { | 
|  | 1798 | struct goku_udc	*dev = dev_get_drvdata(_dev); | 
|  | 1799 |  | 
|  | 1800 | kfree(dev); | 
|  | 1801 | } | 
|  | 1802 |  | 
|  | 1803 | /* tear down the binding between this driver and the pci device */ | 
|  | 1804 |  | 
|  | 1805 | static void goku_remove(struct pci_dev *pdev) | 
|  | 1806 | { | 
|  | 1807 | struct goku_udc		*dev = pci_get_drvdata(pdev); | 
|  | 1808 |  | 
|  | 1809 | DBG(dev, "%s\n", __FUNCTION__); | 
| David Brownell | 6bea476 | 2006-12-05 03:15:33 -0800 | [diff] [blame] | 1810 |  | 
|  | 1811 | BUG_ON(dev->driver); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 |  | 
|  | 1813 | #ifdef CONFIG_USB_GADGET_DEBUG_FILES | 
|  | 1814 | remove_proc_entry(proc_node_name, NULL); | 
|  | 1815 | #endif | 
|  | 1816 | if (dev->regs) | 
|  | 1817 | udc_reset(dev); | 
|  | 1818 | if (dev->got_irq) | 
|  | 1819 | free_irq(pdev->irq, dev); | 
|  | 1820 | if (dev->regs) | 
|  | 1821 | iounmap(dev->regs); | 
|  | 1822 | if (dev->got_region) | 
|  | 1823 | release_mem_region(pci_resource_start (pdev, 0), | 
|  | 1824 | pci_resource_len (pdev, 0)); | 
|  | 1825 | if (dev->enabled) | 
|  | 1826 | pci_disable_device(pdev); | 
|  | 1827 | device_unregister(&dev->gadget.dev); | 
|  | 1828 |  | 
|  | 1829 | pci_set_drvdata(pdev, NULL); | 
|  | 1830 | dev->regs = NULL; | 
|  | 1831 | the_controller = NULL; | 
|  | 1832 |  | 
|  | 1833 | INFO(dev, "unbind\n"); | 
|  | 1834 | } | 
|  | 1835 |  | 
|  | 1836 | /* wrap this driver around the specified pci device, but | 
|  | 1837 | * don't respond over USB until a gadget driver binds to us. | 
|  | 1838 | */ | 
|  | 1839 |  | 
|  | 1840 | static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 
|  | 1841 | { | 
|  | 1842 | struct goku_udc		*dev = NULL; | 
|  | 1843 | unsigned long		resource, len; | 
|  | 1844 | void __iomem		*base = NULL; | 
|  | 1845 | int			retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 |  | 
|  | 1847 | /* if you want to support more than one controller in a system, | 
|  | 1848 | * usb_gadget_driver_{register,unregister}() must change. | 
|  | 1849 | */ | 
|  | 1850 | if (the_controller) { | 
|  | 1851 | WARN(dev, "ignoring %s\n", pci_name(pdev)); | 
|  | 1852 | return -EBUSY; | 
|  | 1853 | } | 
|  | 1854 | if (!pdev->irq) { | 
|  | 1855 | printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev)); | 
|  | 1856 | retval = -ENODEV; | 
|  | 1857 | goto done; | 
|  | 1858 | } | 
|  | 1859 |  | 
|  | 1860 | /* alloc, and start init */ | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 1861 | dev = kmalloc (sizeof *dev, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 | if (dev == NULL){ | 
|  | 1863 | pr_debug("enomem %s\n", pci_name(pdev)); | 
|  | 1864 | retval = -ENOMEM; | 
|  | 1865 | goto done; | 
|  | 1866 | } | 
|  | 1867 |  | 
|  | 1868 | memset(dev, 0, sizeof *dev); | 
|  | 1869 | spin_lock_init(&dev->lock); | 
|  | 1870 | dev->pdev = pdev; | 
|  | 1871 | dev->gadget.ops = &goku_ops; | 
|  | 1872 |  | 
|  | 1873 | /* the "gadget" abstracts/virtualizes the controller */ | 
|  | 1874 | strcpy(dev->gadget.dev.bus_id, "gadget"); | 
|  | 1875 | dev->gadget.dev.parent = &pdev->dev; | 
|  | 1876 | dev->gadget.dev.dma_mask = pdev->dev.dma_mask; | 
|  | 1877 | dev->gadget.dev.release = gadget_release; | 
|  | 1878 | dev->gadget.name = driver_name; | 
|  | 1879 |  | 
|  | 1880 | /* now all the pci goodies ... */ | 
|  | 1881 | retval = pci_enable_device(pdev); | 
|  | 1882 | if (retval < 0) { | 
|  | 1883 | DBG(dev, "can't enable, %d\n", retval); | 
|  | 1884 | goto done; | 
|  | 1885 | } | 
|  | 1886 | dev->enabled = 1; | 
|  | 1887 |  | 
|  | 1888 | resource = pci_resource_start(pdev, 0); | 
|  | 1889 | len = pci_resource_len(pdev, 0); | 
|  | 1890 | if (!request_mem_region(resource, len, driver_name)) { | 
|  | 1891 | DBG(dev, "controller already in use\n"); | 
|  | 1892 | retval = -EBUSY; | 
|  | 1893 | goto done; | 
|  | 1894 | } | 
|  | 1895 | dev->got_region = 1; | 
|  | 1896 |  | 
|  | 1897 | base = ioremap_nocache(resource, len); | 
|  | 1898 | if (base == NULL) { | 
|  | 1899 | DBG(dev, "can't map memory\n"); | 
|  | 1900 | retval = -EFAULT; | 
|  | 1901 | goto done; | 
|  | 1902 | } | 
|  | 1903 | dev->regs = (struct goku_udc_regs __iomem *) base; | 
|  | 1904 |  | 
|  | 1905 | pci_set_drvdata(pdev, dev); | 
|  | 1906 | INFO(dev, "%s\n", driver_desc); | 
|  | 1907 | INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); | 
| David S. Miller | c6387a4 | 2006-06-20 01:21:29 -0700 | [diff] [blame] | 1908 | INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 |  | 
|  | 1910 | /* init to known state, then setup irqs */ | 
|  | 1911 | udc_reset(dev); | 
|  | 1912 | udc_reinit (dev); | 
| Thomas Gleixner | d54b5ca | 2006-07-01 19:29:44 -0700 | [diff] [blame] | 1913 | if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 | driver_name, dev) != 0) { | 
| David S. Miller | c6387a4 | 2006-06-20 01:21:29 -0700 | [diff] [blame] | 1915 | DBG(dev, "request interrupt %d failed\n", pdev->irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | retval = -EBUSY; | 
|  | 1917 | goto done; | 
|  | 1918 | } | 
|  | 1919 | dev->got_irq = 1; | 
|  | 1920 | if (use_dma) | 
|  | 1921 | pci_set_master(pdev); | 
|  | 1922 |  | 
|  | 1923 |  | 
|  | 1924 | #ifdef CONFIG_USB_GADGET_DEBUG_FILES | 
|  | 1925 | create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev); | 
|  | 1926 | #endif | 
|  | 1927 |  | 
|  | 1928 | /* done */ | 
|  | 1929 | the_controller = dev; | 
|  | 1930 | device_register(&dev->gadget.dev); | 
|  | 1931 |  | 
|  | 1932 | return 0; | 
|  | 1933 |  | 
|  | 1934 | done: | 
|  | 1935 | if (dev) | 
|  | 1936 | goku_remove (pdev); | 
|  | 1937 | return retval; | 
|  | 1938 | } | 
|  | 1939 |  | 
|  | 1940 |  | 
|  | 1941 | /*-------------------------------------------------------------------------*/ | 
|  | 1942 |  | 
|  | 1943 | static struct pci_device_id pci_ids [] = { { | 
|  | 1944 | .class = 	((PCI_CLASS_SERIAL_USB << 8) | 0xfe), | 
|  | 1945 | .class_mask = 	~0, | 
|  | 1946 | .vendor =	0x102f,		/* Toshiba */ | 
|  | 1947 | .device =	0x0107,		/* this UDC */ | 
|  | 1948 | .subvendor =	PCI_ANY_ID, | 
|  | 1949 | .subdevice =	PCI_ANY_ID, | 
|  | 1950 |  | 
|  | 1951 | }, { /* end: all zeroes */ } | 
|  | 1952 | }; | 
|  | 1953 | MODULE_DEVICE_TABLE (pci, pci_ids); | 
|  | 1954 |  | 
|  | 1955 | static struct pci_driver goku_pci_driver = { | 
|  | 1956 | .name =		(char *) driver_name, | 
|  | 1957 | .id_table =	pci_ids, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1958 |  | 
|  | 1959 | .probe =	goku_probe, | 
|  | 1960 | .remove =	goku_remove, | 
|  | 1961 |  | 
|  | 1962 | /* FIXME add power management support */ | 
|  | 1963 | }; | 
|  | 1964 |  | 
|  | 1965 | static int __init init (void) | 
|  | 1966 | { | 
|  | 1967 | return pci_register_driver (&goku_pci_driver); | 
|  | 1968 | } | 
|  | 1969 | module_init (init); | 
|  | 1970 |  | 
|  | 1971 | static void __exit cleanup (void) | 
|  | 1972 | { | 
|  | 1973 | pci_unregister_driver (&goku_pci_driver); | 
|  | 1974 | } | 
|  | 1975 | module_exit (cleanup); |