blob: 269ce7f4ad6608f56ab79aeb1e7e3a0d97593bc3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
David Brownell91987692005-05-07 13:20:19 -07003 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27#undef DEBUG
28// #define VERBOSE DBG_VERBOSE
29
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/ioport.h>
34#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/errno.h>
36#include <linux/delay.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/timer.h>
41#include <linux/list.h>
42#include <linux/interrupt.h>
43#include <linux/proc_fs.h>
44#include <linux/mm.h>
Russell Kingd052d1b2005-10-29 19:07:23 +010045#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/dma-mapping.h>
47
48#include <asm/byteorder.h>
49#include <asm/dma.h>
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/system.h>
53#include <asm/mach-types.h>
54#include <asm/unaligned.h>
55#include <asm/hardware.h>
Milan Svoboda44df45a2006-05-29 03:34:00 -070056#ifdef CONFIG_ARCH_PXA
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <asm/arch/pxa-regs.h>
Milan Svoboda44df45a2006-05-29 03:34:00 -070058#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <linux/usb_ch9.h>
61#include <linux/usb_gadget.h>
62
Milan Svoboda043ea182006-05-29 03:34:00 -070063#include <asm/arch/hardware/intel_udc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65
66/*
David Brownell91987692005-05-07 13:20:19 -070067 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 * series processors. The UDC for the IXP 4xx series is very similar.
69 * There are fifteen endpoints, in addition to ep0.
70 *
71 * Such controller drivers work with a gadget driver. The gadget driver
72 * returns descriptors, implements configuration and data protocols used
73 * by the host to interact with this device, and allocates endpoints to
74 * the different protocol interfaces. The controller driver virtualizes
75 * usb hardware so that the gadget drivers will be more portable.
76 *
77 * This UDC hardware wants to implement a bit too much USB protocol, so
78 * it constrains the sorts of USB configuration change events that work.
79 * The errata for these chips are misleading; some "fixed" bugs from
80 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
81 */
82
David Brownell91987692005-05-07 13:20:19 -070083#define DRIVER_VERSION "4-May-2005"
84#define DRIVER_DESC "PXA 25x USB Device Controller driver"
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86
87static const char driver_name [] = "pxa2xx_udc";
88
89static const char ep0name [] = "ep0";
90
91
92// #define USE_DMA
93// #define USE_OUT_DMA
94// #define DISABLE_TEST_MODE
95
96#ifdef CONFIG_ARCH_IXP4XX
97#undef USE_DMA
98
99/* cpu-specific register addresses are compiled in to this code */
100#ifdef CONFIG_ARCH_PXA
101#error "Can't configure both IXP and PXA"
102#endif
103
104#endif
105
106#include "pxa2xx_udc.h"
107
108
109#ifdef USE_DMA
110static int use_dma = 1;
111module_param(use_dma, bool, 0);
112MODULE_PARM_DESC (use_dma, "true to use dma");
113
114static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
115static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
116
117#ifdef USE_OUT_DMA
118#define DMASTR " (dma support)"
119#else
120#define DMASTR " (dma in)"
121#endif
122
123#else /* !USE_DMA */
124#define DMASTR " (pio only)"
125#undef USE_OUT_DMA
126#endif
127
128#ifdef CONFIG_USB_PXA2XX_SMALL
129#define SIZE_STR " (small)"
130#else
131#define SIZE_STR ""
132#endif
133
134#ifdef DISABLE_TEST_MODE
135/* (mode == 0) == no undocumented chip tweaks
136 * (mode & 1) == double buffer bulk IN
137 * (mode & 2) == double buffer bulk OUT
138 * ... so mode = 3 (or 7, 15, etc) does it for both
139 */
140static ushort fifo_mode = 0;
141module_param(fifo_mode, ushort, 0);
142MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
143#endif
144
145/* ---------------------------------------------------------------------------
146 * endpoint related parts of the api to the usb controller hardware,
147 * used by gadget driver; and the inner talker-to-hardware core.
148 * ---------------------------------------------------------------------------
149 */
150
151static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
152static void nuke (struct pxa2xx_ep *, int status);
153
154static void pio_irq_enable(int bEndpointAddress)
155{
156 bEndpointAddress &= 0xf;
157 if (bEndpointAddress < 8)
158 UICR0 &= ~(1 << bEndpointAddress);
159 else {
160 bEndpointAddress -= 8;
161 UICR1 &= ~(1 << bEndpointAddress);
162 }
163}
164
165static void pio_irq_disable(int bEndpointAddress)
166{
167 bEndpointAddress &= 0xf;
168 if (bEndpointAddress < 8)
169 UICR0 |= 1 << bEndpointAddress;
170 else {
171 bEndpointAddress -= 8;
172 UICR1 |= 1 << bEndpointAddress;
173 }
174}
175
176/* The UDCCR reg contains mask and interrupt status bits,
177 * so using '|=' isn't safe as it may ack an interrupt.
178 */
179#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
180
181static inline void udc_set_mask_UDCCR(int mask)
182{
183 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
184}
185
186static inline void udc_clear_mask_UDCCR(int mask)
187{
188 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
189}
190
191static inline void udc_ack_int_UDCCR(int mask)
192{
193 /* udccr contains the bits we dont want to change */
194 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
195
196 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
197}
198
199/*
200 * endpoint enable/disable
201 *
202 * we need to verify the descriptors used to enable endpoints. since pxa2xx
203 * endpoint configurations are fixed, and are pretty much always enabled,
204 * there's not a lot to manage here.
205 *
206 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
207 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
208 * for a single interface (with only the default altsetting) and for gadget
209 * drivers that don't halt endpoints (not reset by set_interface). that also
210 * means that if you use ISO, you must violate the USB spec rule that all
211 * iso endpoints must be in non-default altsettings.
212 */
213static int pxa2xx_ep_enable (struct usb_ep *_ep,
214 const struct usb_endpoint_descriptor *desc)
215{
216 struct pxa2xx_ep *ep;
217 struct pxa2xx_udc *dev;
218
219 ep = container_of (_ep, struct pxa2xx_ep, ep);
220 if (!_ep || !desc || ep->desc || _ep->name == ep0name
221 || desc->bDescriptorType != USB_DT_ENDPOINT
222 || ep->bEndpointAddress != desc->bEndpointAddress
223 || ep->fifo_size < le16_to_cpu
224 (desc->wMaxPacketSize)) {
225 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
226 return -EINVAL;
227 }
228
229 /* xfer types must match, except that interrupt ~= bulk */
230 if (ep->bmAttributes != desc->bmAttributes
231 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
232 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
233 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
234 return -EINVAL;
235 }
236
237 /* hardware _could_ do smaller, but driver doesn't */
238 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
239 && le16_to_cpu (desc->wMaxPacketSize)
240 != BULK_FIFO_SIZE)
241 || !desc->wMaxPacketSize) {
242 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
243 return -ERANGE;
244 }
245
246 dev = ep->dev;
247 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
248 DMSG("%s, bogus device state\n", __FUNCTION__);
249 return -ESHUTDOWN;
250 }
251
252 ep->desc = desc;
253 ep->dma = -1;
254 ep->stopped = 0;
255 ep->pio_irqs = ep->dma_irqs = 0;
256 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
257
258 /* flush fifo (mostly for OUT buffers) */
259 pxa2xx_ep_fifo_flush (_ep);
260
261 /* ... reset halt state too, if we could ... */
262
263#ifdef USE_DMA
264 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
265 * bind it to the endpoint. otherwise use PIO.
266 */
267 switch (ep->bmAttributes) {
268 case USB_ENDPOINT_XFER_ISOC:
269 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
270 break;
271 // fall through
272 case USB_ENDPOINT_XFER_BULK:
273 if (!use_dma || !ep->reg_drcmr)
274 break;
275 ep->dma = pxa_request_dma ((char *)_ep->name,
276 (le16_to_cpu (desc->wMaxPacketSize) > 64)
277 ? DMA_PRIO_MEDIUM /* some iso */
278 : DMA_PRIO_LOW,
279 dma_nodesc_handler, ep);
280 if (ep->dma >= 0) {
281 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
282 DMSG("%s using dma%d\n", _ep->name, ep->dma);
283 }
284 }
285#endif
286
287 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
288 return 0;
289}
290
291static int pxa2xx_ep_disable (struct usb_ep *_ep)
292{
293 struct pxa2xx_ep *ep;
David Brownell91987692005-05-07 13:20:19 -0700294 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296 ep = container_of (_ep, struct pxa2xx_ep, ep);
297 if (!_ep || !ep->desc) {
298 DMSG("%s, %s not enabled\n", __FUNCTION__,
299 _ep ? ep->ep.name : NULL);
300 return -EINVAL;
301 }
David Brownell91987692005-05-07 13:20:19 -0700302 local_irq_save(flags);
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 nuke (ep, -ESHUTDOWN);
305
306#ifdef USE_DMA
307 if (ep->dma >= 0) {
308 *ep->reg_drcmr = 0;
309 pxa_free_dma (ep->dma);
310 ep->dma = -1;
311 }
312#endif
313
314 /* flush fifo (mostly for IN buffers) */
315 pxa2xx_ep_fifo_flush (_ep);
316
317 ep->desc = NULL;
318 ep->stopped = 1;
319
David Brownell91987692005-05-07 13:20:19 -0700320 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
322 return 0;
323}
324
325/*-------------------------------------------------------------------------*/
326
327/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
328 * must still pass correctly initialized endpoints, since other controller
329 * drivers may care about how it's currently set up (dma issues etc).
330 */
331
332/*
333 * pxa2xx_ep_alloc_request - allocate a request data structure
334 */
335static struct usb_request *
Al Viro55016f12005-10-21 03:21:58 -0400336pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 struct pxa2xx_request *req;
339
Eric Sesterhenn7039f422006-02-27 13:34:10 -0800340 req = kzalloc(sizeof(*req), gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 if (!req)
342 return NULL;
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 INIT_LIST_HEAD (&req->queue);
345 return &req->req;
346}
347
348
349/*
350 * pxa2xx_ep_free_request - deallocate a request data structure
351 */
352static void
353pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
354{
355 struct pxa2xx_request *req;
356
357 req = container_of (_req, struct pxa2xx_request, req);
358 WARN_ON (!list_empty (&req->queue));
359 kfree(req);
360}
361
362
363/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
364 * no device-affinity and the heap works perfectly well for i/o buffers.
365 * It wastes much less memory than dma_alloc_coherent() would, and even
366 * prevents cacheline (32 bytes wide) sharing problems.
367 */
368static void *
369pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
Al Viro55016f12005-10-21 03:21:58 -0400370 dma_addr_t *dma, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
372 char *retval;
373
374 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
375 if (retval)
376#ifdef USE_DMA
377 *dma = virt_to_bus (retval);
378#else
379 *dma = (dma_addr_t)~0;
380#endif
381 return retval;
382}
383
384static void
385pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
386 unsigned bytes)
387{
388 kfree (buf);
389}
390
391/*-------------------------------------------------------------------------*/
392
393/*
394 * done - retire a request; caller blocked irqs
395 */
396static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
397{
398 unsigned stopped = ep->stopped;
399
400 list_del_init(&req->queue);
401
402 if (likely (req->req.status == -EINPROGRESS))
403 req->req.status = status;
404 else
405 status = req->req.status;
406
407 if (status && status != -ESHUTDOWN)
408 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
409 ep->ep.name, &req->req, status,
410 req->req.actual, req->req.length);
411
412 /* don't modify queue heads during completion callback */
413 ep->stopped = 1;
414 req->req.complete(&ep->ep, &req->req);
415 ep->stopped = stopped;
416}
417
418
419static inline void ep0_idle (struct pxa2xx_udc *dev)
420{
421 dev->ep0state = EP0_IDLE;
422}
423
424static int
Ian Campbell63a4b522005-10-28 15:26:42 +0100425write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 u8 *buf;
428 unsigned length, count;
429
430 buf = req->req.buf + req->req.actual;
431 prefetch(buf);
432
433 /* how big will this packet be? */
434 length = min(req->req.length - req->req.actual, max);
435 req->req.actual += length;
436
437 count = length;
438 while (likely(count--))
439 *uddr = *buf++;
440
441 return length;
442}
443
444/*
445 * write to an IN endpoint fifo, as many packets as possible.
446 * irqs will use this to write the rest later.
447 * caller guarantees at least one packet buffer is ready (or a zlp).
448 */
449static int
450write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
451{
452 unsigned max;
453
454 max = le16_to_cpu(ep->desc->wMaxPacketSize);
455 do {
456 unsigned count;
457 int is_last, is_short;
458
459 count = write_packet(ep->reg_uddr, req, max);
460
461 /* last packet is usually short (or a zlp) */
462 if (unlikely (count != max))
463 is_last = is_short = 1;
464 else {
465 if (likely(req->req.length != req->req.actual)
466 || req->req.zero)
467 is_last = 0;
468 else
469 is_last = 1;
470 /* interrupt/iso maxpacket may not fill the fifo */
471 is_short = unlikely (max < ep->fifo_size);
472 }
473
474 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
475 ep->ep.name, count,
476 is_last ? "/L" : "", is_short ? "/S" : "",
477 req->req.length - req->req.actual, req);
478
479 /* let loose that packet. maybe try writing another one,
480 * double buffering might work. TSP, TPC, and TFS
481 * bit values are the same for all normal IN endpoints.
482 */
483 *ep->reg_udccs = UDCCS_BI_TPC;
484 if (is_short)
485 *ep->reg_udccs = UDCCS_BI_TSP;
486
487 /* requests complete when all IN data is in the FIFO */
488 if (is_last) {
489 done (ep, req, 0);
490 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
491 pio_irq_disable (ep->bEndpointAddress);
492#ifdef USE_DMA
493 /* unaligned data and zlps couldn't use dma */
494 if (unlikely(!list_empty(&ep->queue))) {
495 req = list_entry(ep->queue.next,
496 struct pxa2xx_request, queue);
497 kick_dma(ep,req);
498 return 0;
499 }
500#endif
501 }
502 return 1;
503 }
504
505 // TODO experiment: how robust can fifo mode tweaking be?
506 // double buffering is off in the default fifo mode, which
507 // prevents TFS from being set here.
508
509 } while (*ep->reg_udccs & UDCCS_BI_TFS);
510 return 0;
511}
512
513/* caller asserts req->pending (ep0 irq status nyet cleared); starts
514 * ep0 data stage. these chips want very simple state transitions.
515 */
516static inline
517void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
518{
519 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
520 USIR0 = USIR0_IR0;
521 dev->req_pending = 0;
522 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
523 __FUNCTION__, tag, UDCCS0, flags);
524}
525
526static int
527write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
528{
529 unsigned count;
530 int is_short;
531
532 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
533 ep->dev->stats.write.bytes += count;
534
535 /* last packet "must be" short (or a zlp) */
536 is_short = (count != EP0_FIFO_SIZE);
537
538 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
539 req->req.length - req->req.actual, req);
540
541 if (unlikely (is_short)) {
542 if (ep->dev->req_pending)
543 ep0start(ep->dev, UDCCS0_IPR, "short IN");
544 else
545 UDCCS0 = UDCCS0_IPR;
546
547 count = req->req.length;
548 done (ep, req, 0);
549 ep0_idle(ep->dev);
Milan Svoboda043ea182006-05-29 03:34:00 -0700550#ifndef CONFIG_ARCH_IXP4XX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551#if 1
552 /* This seems to get rid of lost status irqs in some cases:
553 * host responds quickly, or next request involves config
554 * change automagic, or should have been hidden, or ...
555 *
556 * FIXME get rid of all udelays possible...
557 */
558 if (count >= EP0_FIFO_SIZE) {
559 count = 100;
560 do {
561 if ((UDCCS0 & UDCCS0_OPR) != 0) {
562 /* clear OPR, generate ack */
563 UDCCS0 = UDCCS0_OPR;
564 break;
565 }
566 count--;
567 udelay(1);
568 } while (count);
569 }
570#endif
Milan Svoboda043ea182006-05-29 03:34:00 -0700571#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 } else if (ep->dev->req_pending)
573 ep0start(ep->dev, 0, "IN");
574 return is_short;
575}
576
577
578/*
579 * read_fifo - unload packet(s) from the fifo we use for usb OUT
580 * transfers and put them into the request. caller should have made
581 * sure there's at least one packet ready.
582 *
583 * returns true if the request completed because of short packet or the
584 * request buffer having filled (and maybe overran till end-of-packet).
585 */
586static int
587read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
588{
589 for (;;) {
590 u32 udccs;
591 u8 *buf;
592 unsigned bufferspace, count, is_short;
593
594 /* make sure there's a packet in the FIFO.
595 * UDCCS_{BO,IO}_RPC are all the same bit value.
596 * UDCCS_{BO,IO}_RNE are all the same bit value.
597 */
598 udccs = *ep->reg_udccs;
599 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
600 break;
601 buf = req->req.buf + req->req.actual;
602 prefetchw(buf);
603 bufferspace = req->req.length - req->req.actual;
604
605 /* read all bytes from this packet */
606 if (likely (udccs & UDCCS_BO_RNE)) {
607 count = 1 + (0x0ff & *ep->reg_ubcr);
608 req->req.actual += min (count, bufferspace);
609 } else /* zlp */
610 count = 0;
611 is_short = (count < ep->ep.maxpacket);
612 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
613 ep->ep.name, udccs, count,
614 is_short ? "/S" : "",
615 req, req->req.actual, req->req.length);
616 while (likely (count-- != 0)) {
617 u8 byte = (u8) *ep->reg_uddr;
618
619 if (unlikely (bufferspace == 0)) {
620 /* this happens when the driver's buffer
621 * is smaller than what the host sent.
622 * discard the extra data.
623 */
624 if (req->req.status != -EOVERFLOW)
625 DMSG("%s overflow %d\n",
626 ep->ep.name, count);
627 req->req.status = -EOVERFLOW;
628 } else {
629 *buf++ = byte;
630 bufferspace--;
631 }
632 }
633 *ep->reg_udccs = UDCCS_BO_RPC;
634 /* RPC/RSP/RNE could now reflect the other packet buffer */
635
636 /* iso is one request per packet */
637 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
638 if (udccs & UDCCS_IO_ROF)
639 req->req.status = -EHOSTUNREACH;
640 /* more like "is_done" */
641 is_short = 1;
642 }
643
644 /* completion */
645 if (is_short || req->req.actual == req->req.length) {
646 done (ep, req, 0);
647 if (list_empty(&ep->queue))
648 pio_irq_disable (ep->bEndpointAddress);
649 return 1;
650 }
651
652 /* finished that packet. the next one may be waiting... */
653 }
654 return 0;
655}
656
657/*
658 * special ep0 version of the above. no UBCR0 or double buffering; status
659 * handshaking is magic. most device protocols don't need control-OUT.
660 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
661 * protocols do use them.
662 */
663static int
664read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
665{
666 u8 *buf, byte;
667 unsigned bufferspace;
668
669 buf = req->req.buf + req->req.actual;
670 bufferspace = req->req.length - req->req.actual;
671
672 while (UDCCS0 & UDCCS0_RNE) {
673 byte = (u8) UDDR0;
674
675 if (unlikely (bufferspace == 0)) {
676 /* this happens when the driver's buffer
677 * is smaller than what the host sent.
678 * discard the extra data.
679 */
680 if (req->req.status != -EOVERFLOW)
681 DMSG("%s overflow\n", ep->ep.name);
682 req->req.status = -EOVERFLOW;
683 } else {
684 *buf++ = byte;
685 req->req.actual++;
686 bufferspace--;
687 }
688 }
689
690 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
691
692 /* completion */
693 if (req->req.actual >= req->req.length)
694 return 1;
695
696 /* finished that packet. the next one may be waiting... */
697 return 0;
698}
699
700#ifdef USE_DMA
701
702#define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
703
704static void
705start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
706{
707 u32 dcmd = req->req.length;
708 u32 buf = req->req.dma;
709 u32 fifo = io_v2p ((u32)ep->reg_uddr);
710
711 /* caller guarantees there's a packet or more remaining
712 * - IN may end with a short packet (TSP set separately),
713 * - OUT is always full length
714 */
715 buf += req->req.actual;
716 dcmd -= req->req.actual;
717 ep->dma_fixup = 0;
718
719 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
720 DCSR(ep->dma) = DCSR_NODESC;
721 if (is_in) {
722 DSADR(ep->dma) = buf;
723 DTADR(ep->dma) = fifo;
724 if (dcmd > MAX_IN_DMA)
725 dcmd = MAX_IN_DMA;
726 else
727 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
728 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
729 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
730 } else {
731#ifdef USE_OUT_DMA
732 DSADR(ep->dma) = fifo;
733 DTADR(ep->dma) = buf;
734 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
735 dcmd = ep->ep.maxpacket;
736 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
737 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
738#endif
739 }
740 DCMD(ep->dma) = dcmd;
741 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
742 | (unlikely(is_in)
743 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
744 : 0); /* use handle_ep() */
745}
746
747static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
748{
749 int is_in = ep->bEndpointAddress & USB_DIR_IN;
750
751 if (is_in) {
752 /* unaligned tx buffers and zlps only work with PIO */
753 if ((req->req.dma & 0x0f) != 0
754 || unlikely((req->req.length - req->req.actual)
755 == 0)) {
756 pio_irq_enable(ep->bEndpointAddress);
757 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
758 (void) write_fifo(ep, req);
759 } else {
760 start_dma_nodesc(ep, req, USB_DIR_IN);
761 }
762 } else {
763 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
764 DMSG("%s short dma read...\n", ep->ep.name);
765 /* we're always set up for pio out */
766 read_fifo (ep, req);
767 } else {
768 *ep->reg_udccs = UDCCS_BO_DME
769 | (*ep->reg_udccs & UDCCS_BO_FST);
770 start_dma_nodesc(ep, req, USB_DIR_OUT);
771 }
772 }
773}
774
775static void cancel_dma(struct pxa2xx_ep *ep)
776{
777 struct pxa2xx_request *req;
778 u32 tmp;
779
780 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
781 return;
782
783 DCSR(ep->dma) = 0;
784 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
785 cpu_relax();
786
787 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
788 tmp = DCMD(ep->dma) & DCMD_LENGTH;
789 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
790
791 /* the last tx packet may be incomplete, so flush the fifo.
792 * FIXME correct req.actual if we can
793 */
794 if (ep->bEndpointAddress & USB_DIR_IN)
795 *ep->reg_udccs = UDCCS_BI_FTF;
796}
797
798/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
799static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
800{
801 struct pxa2xx_ep *ep = _ep;
802 struct pxa2xx_request *req;
803 u32 tmp, completed;
804
805 local_irq_disable();
806
807 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
808
809 ep->dma_irqs++;
810 ep->dev->stats.irqs++;
811 HEX_DISPLAY(ep->dev->stats.irqs);
812
813 /* ack/clear */
814 tmp = DCSR(ep->dma);
815 DCSR(ep->dma) = tmp;
816 if ((tmp & DCSR_STOPSTATE) == 0
817 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
818 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
819 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
820 goto done;
821 }
822 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
823
824 /* update transfer status */
825 completed = tmp & DCSR_BUSERR;
826 if (ep->bEndpointAddress & USB_DIR_IN)
827 tmp = DSADR(ep->dma);
828 else
829 tmp = DTADR(ep->dma);
830 req->req.actual = tmp - req->req.dma;
831
832 /* FIXME seems we sometimes see partial transfers... */
833
834 if (unlikely(completed != 0))
835 req->req.status = -EIO;
836 else if (req->req.actual) {
837 /* these registers have zeroes in low bits; they miscount
838 * some (end-of-transfer) short packets: tx 14 as tx 12
839 */
840 if (ep->dma_fixup)
841 req->req.actual = min(req->req.actual + 3,
842 req->req.length);
843
844 tmp = (req->req.length - req->req.actual);
845 completed = (tmp == 0);
846 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
847
848 /* maybe validate final short packet ... */
849 if ((req->req.actual % ep->ep.maxpacket) != 0)
850 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
851
852 /* ... or zlp, using pio fallback */
853 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
854 && req->req.zero) {
855 DMSG("%s zlp terminate ...\n", ep->ep.name);
856 completed = 0;
857 }
858 }
859 }
860
861 if (likely(completed)) {
862 done(ep, req, 0);
863
864 /* maybe re-activate after completion */
865 if (ep->stopped || list_empty(&ep->queue))
866 goto done;
867 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
868 }
869 kick_dma(ep, req);
870done:
871 local_irq_enable();
872}
873
874#endif
875
876/*-------------------------------------------------------------------------*/
877
878static int
Al Viro55016f12005-10-21 03:21:58 -0400879pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
881 struct pxa2xx_request *req;
882 struct pxa2xx_ep *ep;
883 struct pxa2xx_udc *dev;
884 unsigned long flags;
885
886 req = container_of(_req, struct pxa2xx_request, req);
887 if (unlikely (!_req || !_req->complete || !_req->buf
888 || !list_empty(&req->queue))) {
889 DMSG("%s, bad params\n", __FUNCTION__);
890 return -EINVAL;
891 }
892
893 ep = container_of(_ep, struct pxa2xx_ep, ep);
894 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
895 DMSG("%s, bad ep\n", __FUNCTION__);
896 return -EINVAL;
897 }
898
899 dev = ep->dev;
900 if (unlikely (!dev->driver
901 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
902 DMSG("%s, bogus device state\n", __FUNCTION__);
903 return -ESHUTDOWN;
904 }
905
906 /* iso is always one packet per request, that's the only way
907 * we can report per-packet status. that also helps with dma.
908 */
909 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
910 && req->req.length > le16_to_cpu
911 (ep->desc->wMaxPacketSize)))
912 return -EMSGSIZE;
913
914#ifdef USE_DMA
915 // FIXME caller may already have done the dma mapping
916 if (ep->dma >= 0) {
917 _req->dma = dma_map_single(dev->dev,
918 _req->buf, _req->length,
919 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
920 ? DMA_TO_DEVICE
921 : DMA_FROM_DEVICE);
922 }
923#endif
924
925 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
926 _ep->name, _req, _req->length, _req->buf);
927
928 local_irq_save(flags);
929
930 _req->status = -EINPROGRESS;
931 _req->actual = 0;
932
933 /* kickstart this i/o queue? */
934 if (list_empty(&ep->queue) && !ep->stopped) {
935 if (ep->desc == 0 /* ep0 */) {
936 unsigned length = _req->length;
937
938 switch (dev->ep0state) {
939 case EP0_IN_DATA_PHASE:
940 dev->stats.write.ops++;
941 if (write_ep0_fifo(ep, req))
942 req = NULL;
943 break;
944
945 case EP0_OUT_DATA_PHASE:
946 dev->stats.read.ops++;
947 /* messy ... */
948 if (dev->req_config) {
949 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
950 dev->has_cfr ? "" : " raced");
951 if (dev->has_cfr)
952 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
953 |UDCCFR_MB1;
954 done(ep, req, 0);
955 dev->ep0state = EP0_END_XFER;
956 local_irq_restore (flags);
957 return 0;
958 }
959 if (dev->req_pending)
960 ep0start(dev, UDCCS0_IPR, "OUT");
961 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
962 && read_ep0_fifo(ep, req))) {
963 ep0_idle(dev);
964 done(ep, req, 0);
965 req = NULL;
966 }
967 break;
968
969 default:
970 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
971 local_irq_restore (flags);
972 return -EL2HLT;
973 }
974#ifdef USE_DMA
975 /* either start dma or prime pio pump */
976 } else if (ep->dma >= 0) {
977 kick_dma(ep, req);
978#endif
979 /* can the FIFO can satisfy the request immediately? */
David Brownell91987692005-05-07 13:20:19 -0700980 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
981 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
982 && write_fifo(ep, req))
983 req = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
985 && read_fifo(ep, req)) {
986 req = NULL;
987 }
988
989 if (likely (req && ep->desc) && ep->dma < 0)
990 pio_irq_enable(ep->bEndpointAddress);
991 }
992
993 /* pio or dma irq handler advances the queue. */
994 if (likely (req != 0))
995 list_add_tail(&req->queue, &ep->queue);
996 local_irq_restore(flags);
997
998 return 0;
999}
1000
1001
1002/*
1003 * nuke - dequeue ALL requests
1004 */
1005static void nuke(struct pxa2xx_ep *ep, int status)
1006{
1007 struct pxa2xx_request *req;
1008
1009 /* called with irqs blocked */
1010#ifdef USE_DMA
1011 if (ep->dma >= 0 && !ep->stopped)
1012 cancel_dma(ep);
1013#endif
1014 while (!list_empty(&ep->queue)) {
1015 req = list_entry(ep->queue.next,
1016 struct pxa2xx_request,
1017 queue);
1018 done(ep, req, status);
1019 }
1020 if (ep->desc)
1021 pio_irq_disable (ep->bEndpointAddress);
1022}
1023
1024
1025/* dequeue JUST ONE request */
1026static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1027{
1028 struct pxa2xx_ep *ep;
1029 struct pxa2xx_request *req;
1030 unsigned long flags;
1031
1032 ep = container_of(_ep, struct pxa2xx_ep, ep);
1033 if (!_ep || ep->ep.name == ep0name)
1034 return -EINVAL;
1035
1036 local_irq_save(flags);
1037
1038 /* make sure it's actually queued on this endpoint */
1039 list_for_each_entry (req, &ep->queue, queue) {
1040 if (&req->req == _req)
1041 break;
1042 }
1043 if (&req->req != _req) {
1044 local_irq_restore(flags);
1045 return -EINVAL;
1046 }
1047
1048#ifdef USE_DMA
1049 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1050 cancel_dma(ep);
1051 done(ep, req, -ECONNRESET);
1052 /* restart i/o */
1053 if (!list_empty(&ep->queue)) {
1054 req = list_entry(ep->queue.next,
1055 struct pxa2xx_request, queue);
1056 kick_dma(ep, req);
1057 }
1058 } else
1059#endif
1060 done(ep, req, -ECONNRESET);
1061
1062 local_irq_restore(flags);
1063 return 0;
1064}
1065
1066/*-------------------------------------------------------------------------*/
1067
1068static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1069{
1070 struct pxa2xx_ep *ep;
1071 unsigned long flags;
1072
1073 ep = container_of(_ep, struct pxa2xx_ep, ep);
1074 if (unlikely (!_ep
1075 || (!ep->desc && ep->ep.name != ep0name))
1076 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1077 DMSG("%s, bad ep\n", __FUNCTION__);
1078 return -EINVAL;
1079 }
1080 if (value == 0) {
1081 /* this path (reset toggle+halt) is needed to implement
1082 * SET_INTERFACE on normal hardware. but it can't be
1083 * done from software on the PXA UDC, and the hardware
1084 * forgets to do it as part of SET_INTERFACE automagic.
1085 */
1086 DMSG("only host can clear %s halt\n", _ep->name);
1087 return -EROFS;
1088 }
1089
1090 local_irq_save(flags);
1091
1092 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1093 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1094 || !list_empty(&ep->queue))) {
1095 local_irq_restore(flags);
1096 return -EAGAIN;
1097 }
1098
1099 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1100 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1101
1102 /* ep0 needs special care */
1103 if (!ep->desc) {
1104 start_watchdog(ep->dev);
1105 ep->dev->req_pending = 0;
1106 ep->dev->ep0state = EP0_STALL;
1107
1108 /* and bulk/intr endpoints like dropping stalls too */
1109 } else {
1110 unsigned i;
1111 for (i = 0; i < 1000; i += 20) {
1112 if (*ep->reg_udccs & UDCCS_BI_SST)
1113 break;
1114 udelay(20);
1115 }
1116 }
1117 local_irq_restore(flags);
1118
1119 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1120 return 0;
1121}
1122
1123static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1124{
1125 struct pxa2xx_ep *ep;
1126
1127 ep = container_of(_ep, struct pxa2xx_ep, ep);
1128 if (!_ep) {
1129 DMSG("%s, bad ep\n", __FUNCTION__);
1130 return -ENODEV;
1131 }
1132 /* pxa can't report unclaimed bytes from IN fifos */
1133 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1134 return -EOPNOTSUPP;
1135 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1136 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1137 return 0;
1138 else
1139 return (*ep->reg_ubcr & 0xfff) + 1;
1140}
1141
1142static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1143{
1144 struct pxa2xx_ep *ep;
1145
1146 ep = container_of(_ep, struct pxa2xx_ep, ep);
1147 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1148 DMSG("%s, bad ep\n", __FUNCTION__);
1149 return;
1150 }
1151
1152 /* toggle and halt bits stay unchanged */
1153
1154 /* for OUT, just read and discard the FIFO contents. */
1155 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1156 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1157 (void) *ep->reg_uddr;
1158 return;
1159 }
1160
1161 /* most IN status is the same, but ISO can't stall */
1162 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1163 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1164 ? 0 : UDCCS_BI_SST;
1165}
1166
1167
1168static struct usb_ep_ops pxa2xx_ep_ops = {
1169 .enable = pxa2xx_ep_enable,
1170 .disable = pxa2xx_ep_disable,
1171
1172 .alloc_request = pxa2xx_ep_alloc_request,
1173 .free_request = pxa2xx_ep_free_request,
1174
1175 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1176 .free_buffer = pxa2xx_ep_free_buffer,
1177
1178 .queue = pxa2xx_ep_queue,
1179 .dequeue = pxa2xx_ep_dequeue,
1180
1181 .set_halt = pxa2xx_ep_set_halt,
1182 .fifo_status = pxa2xx_ep_fifo_status,
1183 .fifo_flush = pxa2xx_ep_fifo_flush,
1184};
1185
1186
1187/* ---------------------------------------------------------------------------
1188 * device-scoped parts of the api to the usb controller hardware
1189 * ---------------------------------------------------------------------------
1190 */
1191
1192static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1193{
1194 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1195}
1196
1197static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1198{
1199 /* host may not have enabled remote wakeup */
1200 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1201 return -EHOSTUNREACH;
1202 udc_set_mask_UDCCR(UDCCR_RSM);
1203 return 0;
1204}
1205
1206static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1207static void udc_enable (struct pxa2xx_udc *);
1208static void udc_disable(struct pxa2xx_udc *);
1209
1210/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1211 * in active use.
1212 */
1213static int pullup(struct pxa2xx_udc *udc, int is_active)
1214{
1215 is_active = is_active && udc->vbus && udc->pullup;
1216 DMSG("%s\n", is_active ? "active" : "inactive");
1217 if (is_active)
1218 udc_enable(udc);
1219 else {
1220 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1221 DMSG("disconnect %s\n", udc->driver
1222 ? udc->driver->driver.name
1223 : "(no driver)");
1224 stop_activity(udc, udc->driver);
1225 }
1226 udc_disable(udc);
1227 }
1228 return 0;
1229}
1230
1231/* VBUS reporting logically comes from a transceiver */
1232static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1233{
1234 struct pxa2xx_udc *udc;
1235
1236 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1237 udc->vbus = is_active = (is_active != 0);
1238 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1239 pullup(udc, is_active);
1240 return 0;
1241}
1242
1243/* drivers may have software control over D+ pullup */
1244static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1245{
1246 struct pxa2xx_udc *udc;
1247
1248 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1249
1250 /* not all boards support pullup control */
1251 if (!udc->mach->udc_command)
1252 return -EOPNOTSUPP;
1253
1254 is_active = (is_active != 0);
1255 udc->pullup = is_active;
1256 pullup(udc, is_active);
1257 return 0;
1258}
1259
1260static const struct usb_gadget_ops pxa2xx_udc_ops = {
1261 .get_frame = pxa2xx_udc_get_frame,
1262 .wakeup = pxa2xx_udc_wakeup,
1263 .vbus_session = pxa2xx_udc_vbus_session,
1264 .pullup = pxa2xx_udc_pullup,
1265
1266 // .vbus_draw ... boards may consume current from VBUS, up to
1267 // 100-500mA based on config. the 500uA suspend ceiling means
1268 // that exclusively vbus-powered PXA designs violate USB specs.
1269};
1270
1271/*-------------------------------------------------------------------------*/
1272
1273#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1274
1275static const char proc_node_name [] = "driver/udc";
1276
1277static int
1278udc_proc_read(char *page, char **start, off_t off, int count,
1279 int *eof, void *_dev)
1280{
1281 char *buf = page;
1282 struct pxa2xx_udc *dev = _dev;
1283 char *next = buf;
1284 unsigned size = count;
1285 unsigned long flags;
1286 int i, t;
1287 u32 tmp;
1288
1289 if (off != 0)
1290 return 0;
1291
1292 local_irq_save(flags);
1293
1294 /* basic device status */
1295 t = scnprintf(next, size, DRIVER_DESC "\n"
1296 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1297 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1298 dev->driver ? dev->driver->driver.name : "(none)",
David Brownell91987692005-05-07 13:20:19 -07001299 is_vbus_present() ? "full speed" : "disconnected");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 size -= t;
1301 next += t;
1302
1303 /* registers for device and ep0 */
1304 t = scnprintf(next, size,
1305 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1306 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1307 size -= t;
1308 next += t;
1309
1310 tmp = UDCCR;
1311 t = scnprintf(next, size,
1312 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1313 (tmp & UDCCR_REM) ? " rem" : "",
1314 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1315 (tmp & UDCCR_SRM) ? " srm" : "",
1316 (tmp & UDCCR_SUSIR) ? " susir" : "",
1317 (tmp & UDCCR_RESIR) ? " resir" : "",
1318 (tmp & UDCCR_RSM) ? " rsm" : "",
1319 (tmp & UDCCR_UDA) ? " uda" : "",
1320 (tmp & UDCCR_UDE) ? " ude" : "");
1321 size -= t;
1322 next += t;
1323
1324 tmp = UDCCS0;
1325 t = scnprintf(next, size,
1326 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1327 (tmp & UDCCS0_SA) ? " sa" : "",
1328 (tmp & UDCCS0_RNE) ? " rne" : "",
1329 (tmp & UDCCS0_FST) ? " fst" : "",
1330 (tmp & UDCCS0_SST) ? " sst" : "",
1331 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1332 (tmp & UDCCS0_FTF) ? " ftf" : "",
1333 (tmp & UDCCS0_IPR) ? " ipr" : "",
1334 (tmp & UDCCS0_OPR) ? " opr" : "");
1335 size -= t;
1336 next += t;
1337
1338 if (dev->has_cfr) {
1339 tmp = UDCCFR;
1340 t = scnprintf(next, size,
1341 "udccfr %02X =%s%s\n", tmp,
1342 (tmp & UDCCFR_AREN) ? " aren" : "",
1343 (tmp & UDCCFR_ACM) ? " acm" : "");
1344 size -= t;
1345 next += t;
1346 }
1347
David Brownell91987692005-05-07 13:20:19 -07001348 if (!is_vbus_present() || !dev->driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 goto done;
1350
1351 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1352 dev->stats.write.bytes, dev->stats.write.ops,
1353 dev->stats.read.bytes, dev->stats.read.ops,
1354 dev->stats.irqs);
1355 size -= t;
1356 next += t;
1357
1358 /* dump endpoint queues */
1359 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1360 struct pxa2xx_ep *ep = &dev->ep [i];
1361 struct pxa2xx_request *req;
1362 int t;
1363
1364 if (i != 0) {
1365 const struct usb_endpoint_descriptor *d;
1366
1367 d = ep->desc;
1368 if (!d)
1369 continue;
1370 tmp = *dev->ep [i].reg_udccs;
1371 t = scnprintf(next, size,
1372 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1373 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1374 (ep->dma >= 0) ? "dma" : "pio", tmp,
1375 ep->pio_irqs, ep->dma_irqs);
1376 /* TODO translate all five groups of udccs bits! */
1377
1378 } else /* ep0 should only have one transfer queued */
1379 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1380 ep->pio_irqs);
1381 if (t <= 0 || t > size)
1382 goto done;
1383 size -= t;
1384 next += t;
1385
1386 if (list_empty(&ep->queue)) {
1387 t = scnprintf(next, size, "\t(nothing queued)\n");
1388 if (t <= 0 || t > size)
1389 goto done;
1390 size -= t;
1391 next += t;
1392 continue;
1393 }
1394 list_for_each_entry(req, &ep->queue, queue) {
1395#ifdef USE_DMA
1396 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1397 t = scnprintf(next, size,
1398 "\treq %p len %d/%d "
1399 "buf %p (dma%d dcmd %08x)\n",
1400 &req->req, req->req.actual,
1401 req->req.length, req->req.buf,
1402 ep->dma, DCMD(ep->dma)
1403 // low 13 bits == bytes-to-go
1404 );
1405 else
1406#endif
1407 t = scnprintf(next, size,
1408 "\treq %p len %d/%d buf %p\n",
1409 &req->req, req->req.actual,
1410 req->req.length, req->req.buf);
1411 if (t <= 0 || t > size)
1412 goto done;
1413 size -= t;
1414 next += t;
1415 }
1416 }
1417
1418done:
1419 local_irq_restore(flags);
1420 *eof = 1;
1421 return count - size;
1422}
1423
1424#define create_proc_files() \
1425 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1426#define remove_proc_files() \
1427 remove_proc_entry(proc_node_name, NULL)
1428
1429#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1430
1431#define create_proc_files() do {} while (0)
1432#define remove_proc_files() do {} while (0)
1433
1434#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1435
1436/* "function" sysfs attribute */
1437static ssize_t
Yani Ioannou10523b32005-05-17 06:43:37 -04001438show_function (struct device *_dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
1440 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1441
1442 if (!dev->driver
1443 || !dev->driver->function
1444 || strlen (dev->driver->function) > PAGE_SIZE)
1445 return 0;
1446 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1447}
1448static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1449
1450/*-------------------------------------------------------------------------*/
1451
1452/*
1453 * udc_disable - disable USB device controller
1454 */
1455static void udc_disable(struct pxa2xx_udc *dev)
1456{
1457 /* block all irqs */
1458 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1459 UICR0 = UICR1 = 0xff;
1460 UFNRH = UFNRH_SIM;
1461
1462 /* if hardware supports it, disconnect from usb */
David Brownell91987692005-05-07 13:20:19 -07001463 pullup_off();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
1465 udc_clear_mask_UDCCR(UDCCR_UDE);
1466
1467#ifdef CONFIG_ARCH_PXA
1468 /* Disable clock for USB device */
1469 pxa_set_cken(CKEN11_USB, 0);
1470#endif
1471
1472 ep0_idle (dev);
1473 dev->gadget.speed = USB_SPEED_UNKNOWN;
1474 LED_CONNECTED_OFF;
1475}
1476
1477
1478/*
1479 * udc_reinit - initialize software state
1480 */
1481static void udc_reinit(struct pxa2xx_udc *dev)
1482{
1483 u32 i;
1484
1485 /* device/ep0 records init */
1486 INIT_LIST_HEAD (&dev->gadget.ep_list);
1487 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1488 dev->ep0state = EP0_IDLE;
1489
1490 /* basic endpoint records init */
1491 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1492 struct pxa2xx_ep *ep = &dev->ep[i];
1493
1494 if (i != 0)
1495 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1496
1497 ep->desc = NULL;
1498 ep->stopped = 0;
1499 INIT_LIST_HEAD (&ep->queue);
1500 ep->pio_irqs = ep->dma_irqs = 0;
1501 }
1502
1503 /* the rest was statically initialized, and is read-only */
1504}
1505
1506/* until it's enabled, this UDC should be completely invisible
1507 * to any USB host.
1508 */
1509static void udc_enable (struct pxa2xx_udc *dev)
1510{
1511 udc_clear_mask_UDCCR(UDCCR_UDE);
1512
1513#ifdef CONFIG_ARCH_PXA
1514 /* Enable clock for USB device */
1515 pxa_set_cken(CKEN11_USB, 1);
1516 udelay(5);
1517#endif
1518
1519 /* try to clear these bits before we enable the udc */
1520 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1521
1522 ep0_idle(dev);
1523 dev->gadget.speed = USB_SPEED_UNKNOWN;
1524 dev->stats.irqs = 0;
1525
1526 /*
1527 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1528 * - enable UDC
1529 * - if RESET is already in progress, ack interrupt
1530 * - unmask reset interrupt
1531 */
1532 udc_set_mask_UDCCR(UDCCR_UDE);
1533 if (!(UDCCR & UDCCR_UDA))
1534 udc_ack_int_UDCCR(UDCCR_RSTIR);
1535
1536 if (dev->has_cfr /* UDC_RES2 is defined */) {
1537 /* pxa255 (a0+) can avoid a set_config race that could
1538 * prevent gadget drivers from configuring correctly
1539 */
1540 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1541 } else {
1542 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1543 * which could result in missing packets and interrupts.
1544 * supposedly one bit per endpoint, controlling whether it
1545 * double buffers or not; ACM/AREN bits fit into the holes.
1546 * zero bits (like USIR0_IRx) disable double buffering.
1547 */
1548 UDC_RES1 = 0x00;
1549 UDC_RES2 = 0x00;
1550 }
1551
1552#ifdef DISABLE_TEST_MODE
1553 /* "test mode" seems to have become the default in later chip
1554 * revs, preventing double buffering (and invalidating docs).
1555 * this EXPERIMENT enables it for bulk endpoints by tweaking
1556 * undefined/reserved register bits (that other drivers clear).
1557 * Belcarra code comments noted this usage.
1558 */
1559 if (fifo_mode & 1) { /* IN endpoints */
1560 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1561 UDC_RES2 |= USIR1_IR11;
1562 }
1563 if (fifo_mode & 2) { /* OUT endpoints */
1564 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1565 UDC_RES2 |= USIR1_IR12;
1566 }
1567#endif
1568
1569 /* enable suspend/resume and reset irqs */
1570 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1571
1572 /* enable ep0 irqs */
1573 UICR0 &= ~UICR0_IM0;
1574
1575 /* if hardware supports it, pullup D+ and wait for reset */
David Brownell91987692005-05-07 13:20:19 -07001576 pullup_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577}
1578
1579
1580/* when a driver is successfully registered, it will receive
1581 * control requests including set_configuration(), which enables
1582 * non-control requests. then usb traffic follows until a
1583 * disconnect is reported. then a host may connect again, or
1584 * the driver might get unbound.
1585 */
1586int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1587{
1588 struct pxa2xx_udc *dev = the_controller;
1589 int retval;
1590
1591 if (!driver
Milan Svoboda7c0642c2006-05-29 03:34:00 -07001592 || driver->speed < USB_SPEED_FULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 || !driver->bind
1594 || !driver->unbind
1595 || !driver->disconnect
1596 || !driver->setup)
1597 return -EINVAL;
1598 if (!dev)
1599 return -ENODEV;
1600 if (dev->driver)
1601 return -EBUSY;
1602
1603 /* first hook up the driver ... */
1604 dev->driver = driver;
1605 dev->gadget.dev.driver = &driver->driver;
1606 dev->pullup = 1;
1607
1608 device_add (&dev->gadget.dev);
1609 retval = driver->bind(&dev->gadget);
1610 if (retval) {
1611 DMSG("bind to driver %s --> error %d\n",
1612 driver->driver.name, retval);
1613 device_del (&dev->gadget.dev);
1614
1615 dev->driver = NULL;
1616 dev->gadget.dev.driver = NULL;
1617 return retval;
1618 }
1619 device_create_file(dev->dev, &dev_attr_function);
1620
1621 /* ... then enable host detection and ep0; and we're ready
1622 * for set_configuration as well as eventual disconnect.
1623 */
1624 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1625 pullup(dev, 1);
1626 dump_state(dev);
1627 return 0;
1628}
1629EXPORT_SYMBOL(usb_gadget_register_driver);
1630
1631static void
1632stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1633{
1634 int i;
1635
1636 /* don't disconnect drivers more than once */
1637 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1638 driver = NULL;
1639 dev->gadget.speed = USB_SPEED_UNKNOWN;
1640
1641 /* prevent new request submissions, kill any outstanding requests */
1642 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1643 struct pxa2xx_ep *ep = &dev->ep[i];
1644
1645 ep->stopped = 1;
1646 nuke(ep, -ESHUTDOWN);
1647 }
1648 del_timer_sync(&dev->timer);
1649
1650 /* report disconnect; the driver is already quiesced */
1651 LED_CONNECTED_OFF;
1652 if (driver)
1653 driver->disconnect(&dev->gadget);
1654
1655 /* re-init driver-visible data structures */
1656 udc_reinit(dev);
1657}
1658
1659int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1660{
1661 struct pxa2xx_udc *dev = the_controller;
1662
1663 if (!dev)
1664 return -ENODEV;
1665 if (!driver || driver != dev->driver)
1666 return -EINVAL;
1667
1668 local_irq_disable();
1669 pullup(dev, 0);
1670 stop_activity(dev, driver);
1671 local_irq_enable();
1672
1673 driver->unbind(&dev->gadget);
1674 dev->driver = NULL;
1675
1676 device_del (&dev->gadget.dev);
1677 device_remove_file(dev->dev, &dev_attr_function);
1678
1679 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1680 dump_state(dev);
1681 return 0;
1682}
1683EXPORT_SYMBOL(usb_gadget_unregister_driver);
1684
1685
1686/*-------------------------------------------------------------------------*/
1687
1688#ifdef CONFIG_ARCH_LUBBOCK
1689
1690/* Lubbock has separate connect and disconnect irqs. More typical designs
1691 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1692 */
1693
1694static irqreturn_t
1695lubbock_vbus_irq(int irq, void *_dev, struct pt_regs *r)
1696{
1697 struct pxa2xx_udc *dev = _dev;
1698 int vbus;
1699
1700 dev->stats.irqs++;
1701 HEX_DISPLAY(dev->stats.irqs);
1702 switch (irq) {
1703 case LUBBOCK_USB_IRQ:
1704 LED_CONNECTED_ON;
1705 vbus = 1;
1706 disable_irq(LUBBOCK_USB_IRQ);
1707 enable_irq(LUBBOCK_USB_DISC_IRQ);
1708 break;
1709 case LUBBOCK_USB_DISC_IRQ:
1710 LED_CONNECTED_OFF;
1711 vbus = 0;
1712 disable_irq(LUBBOCK_USB_DISC_IRQ);
1713 enable_irq(LUBBOCK_USB_IRQ);
1714 break;
1715 default:
1716 return IRQ_NONE;
1717 }
1718
1719 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1720 return IRQ_HANDLED;
1721}
1722
1723#endif
1724
1725
1726/*-------------------------------------------------------------------------*/
1727
1728static inline void clear_ep_state (struct pxa2xx_udc *dev)
1729{
1730 unsigned i;
1731
1732 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1733 * fifos, and pending transactions mustn't be continued in any case.
1734 */
1735 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1736 nuke(&dev->ep[i], -ECONNABORTED);
1737}
1738
1739static void udc_watchdog(unsigned long _dev)
1740{
1741 struct pxa2xx_udc *dev = (void *)_dev;
1742
1743 local_irq_disable();
1744 if (dev->ep0state == EP0_STALL
1745 && (UDCCS0 & UDCCS0_FST) == 0
1746 && (UDCCS0 & UDCCS0_SST) == 0) {
1747 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1748 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1749 start_watchdog(dev);
1750 }
1751 local_irq_enable();
1752}
1753
1754static void handle_ep0 (struct pxa2xx_udc *dev)
1755{
1756 u32 udccs0 = UDCCS0;
1757 struct pxa2xx_ep *ep = &dev->ep [0];
1758 struct pxa2xx_request *req;
1759 union {
1760 struct usb_ctrlrequest r;
1761 u8 raw [8];
1762 u32 word [2];
1763 } u;
1764
1765 if (list_empty(&ep->queue))
1766 req = NULL;
1767 else
1768 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1769
1770 /* clear stall status */
1771 if (udccs0 & UDCCS0_SST) {
1772 nuke(ep, -EPIPE);
1773 UDCCS0 = UDCCS0_SST;
1774 del_timer(&dev->timer);
1775 ep0_idle(dev);
1776 }
1777
1778 /* previous request unfinished? non-error iff back-to-back ... */
1779 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1780 nuke(ep, 0);
1781 del_timer(&dev->timer);
1782 ep0_idle(dev);
1783 }
1784
1785 switch (dev->ep0state) {
1786 case EP0_IDLE:
1787 /* late-breaking status? */
1788 udccs0 = UDCCS0;
1789
1790 /* start control request? */
1791 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1792 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1793 int i;
1794
1795 nuke (ep, -EPROTO);
1796
1797 /* read SETUP packet */
1798 for (i = 0; i < 8; i++) {
1799 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1800bad_setup:
1801 DMSG("SETUP %d!\n", i);
1802 goto stall;
1803 }
1804 u.raw [i] = (u8) UDDR0;
1805 }
1806 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1807 goto bad_setup;
1808
1809got_setup:
1810 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1811 u.r.bRequestType, u.r.bRequest,
1812 le16_to_cpu(u.r.wValue),
1813 le16_to_cpu(u.r.wIndex),
1814 le16_to_cpu(u.r.wLength));
1815
1816 /* cope with automagic for some standard requests. */
1817 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1818 == USB_TYPE_STANDARD;
1819 dev->req_config = 0;
1820 dev->req_pending = 1;
1821 switch (u.r.bRequest) {
1822 /* hardware restricts gadget drivers here! */
1823 case USB_REQ_SET_CONFIGURATION:
1824 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1825 /* reflect hardware's automagic
1826 * up to the gadget driver.
1827 */
1828config_change:
1829 dev->req_config = 1;
1830 clear_ep_state(dev);
1831 /* if !has_cfr, there's no synch
1832 * else use AREN (later) not SA|OPR
1833 * USIR0_IR0 acts edge sensitive
1834 */
1835 }
1836 break;
1837 /* ... and here, even more ... */
1838 case USB_REQ_SET_INTERFACE:
1839 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1840 /* udc hardware is broken by design:
1841 * - altsetting may only be zero;
1842 * - hw resets all interfaces' eps;
1843 * - ep reset doesn't include halt(?).
1844 */
1845 DMSG("broken set_interface (%d/%d)\n",
1846 le16_to_cpu(u.r.wIndex),
1847 le16_to_cpu(u.r.wValue));
1848 goto config_change;
1849 }
1850 break;
1851 /* hardware was supposed to hide this */
1852 case USB_REQ_SET_ADDRESS:
1853 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1854 ep0start(dev, 0, "address");
1855 return;
1856 }
1857 break;
1858 }
1859
1860 if (u.r.bRequestType & USB_DIR_IN)
1861 dev->ep0state = EP0_IN_DATA_PHASE;
1862 else
1863 dev->ep0state = EP0_OUT_DATA_PHASE;
1864
1865 i = dev->driver->setup(&dev->gadget, &u.r);
1866 if (i < 0) {
1867 /* hardware automagic preventing STALL... */
1868 if (dev->req_config) {
1869 /* hardware sometimes neglects to tell
1870 * tell us about config change events,
1871 * so later ones may fail...
1872 */
1873 WARN("config change %02x fail %d?\n",
1874 u.r.bRequest, i);
1875 return;
1876 /* TODO experiment: if has_cfr,
1877 * hardware didn't ACK; maybe we
1878 * could actually STALL!
1879 */
1880 }
1881 DBG(DBG_VERBOSE, "protocol STALL, "
1882 "%02x err %d\n", UDCCS0, i);
1883stall:
1884 /* the watchdog timer helps deal with cases
1885 * where udc seems to clear FST wrongly, and
1886 * then NAKs instead of STALLing.
1887 */
1888 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1889 start_watchdog(dev);
1890 dev->ep0state = EP0_STALL;
1891
1892 /* deferred i/o == no response yet */
1893 } else if (dev->req_pending) {
1894 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1895 || dev->req_std || u.r.wLength))
1896 ep0start(dev, 0, "defer");
1897 else
1898 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1899 }
1900
1901 /* expect at least one data or status stage irq */
1902 return;
1903
1904 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1905 == (UDCCS0_OPR|UDCCS0_SA))) {
1906 unsigned i;
1907
1908 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1909 * still observed on a pxa255 a0.
1910 */
1911 DBG(DBG_VERBOSE, "e131\n");
1912 nuke(ep, -EPROTO);
1913
1914 /* read SETUP data, but don't trust it too much */
1915 for (i = 0; i < 8; i++)
1916 u.raw [i] = (u8) UDDR0;
1917 if ((u.r.bRequestType & USB_RECIP_MASK)
1918 > USB_RECIP_OTHER)
1919 goto stall;
1920 if (u.word [0] == 0 && u.word [1] == 0)
1921 goto stall;
1922 goto got_setup;
1923 } else {
1924 /* some random early IRQ:
1925 * - we acked FST
1926 * - IPR cleared
1927 * - OPR got set, without SA (likely status stage)
1928 */
1929 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1930 }
1931 break;
1932 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1933 if (udccs0 & UDCCS0_OPR) {
1934 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1935 DBG(DBG_VERBOSE, "ep0in premature status\n");
1936 if (req)
1937 done(ep, req, 0);
1938 ep0_idle(dev);
1939 } else /* irq was IPR clearing */ {
1940 if (req) {
1941 /* this IN packet might finish the request */
1942 (void) write_ep0_fifo(ep, req);
1943 } /* else IN token before response was written */
1944 }
1945 break;
1946 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1947 if (udccs0 & UDCCS0_OPR) {
1948 if (req) {
1949 /* this OUT packet might finish the request */
1950 if (read_ep0_fifo(ep, req))
1951 done(ep, req, 0);
1952 /* else more OUT packets expected */
1953 } /* else OUT token before read was issued */
1954 } else /* irq was IPR clearing */ {
1955 DBG(DBG_VERBOSE, "ep0out premature status\n");
1956 if (req)
1957 done(ep, req, 0);
1958 ep0_idle(dev);
1959 }
1960 break;
1961 case EP0_END_XFER:
1962 if (req)
1963 done(ep, req, 0);
1964 /* ack control-IN status (maybe in-zlp was skipped)
1965 * also appears after some config change events.
1966 */
1967 if (udccs0 & UDCCS0_OPR)
1968 UDCCS0 = UDCCS0_OPR;
1969 ep0_idle(dev);
1970 break;
1971 case EP0_STALL:
1972 UDCCS0 = UDCCS0_FST;
1973 break;
1974 }
1975 USIR0 = USIR0_IR0;
1976}
1977
1978static void handle_ep(struct pxa2xx_ep *ep)
1979{
1980 struct pxa2xx_request *req;
1981 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1982 int completed;
1983 u32 udccs, tmp;
1984
1985 do {
1986 completed = 0;
1987 if (likely (!list_empty(&ep->queue)))
1988 req = list_entry(ep->queue.next,
1989 struct pxa2xx_request, queue);
1990 else
1991 req = NULL;
1992
1993 // TODO check FST handling
1994
1995 udccs = *ep->reg_udccs;
1996 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1997 tmp = UDCCS_BI_TUR;
1998 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1999 tmp |= UDCCS_BI_SST;
2000 tmp &= udccs;
2001 if (likely (tmp))
2002 *ep->reg_udccs = tmp;
2003 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2004 completed = write_fifo(ep, req);
2005
2006 } else { /* irq from RPC (or for ISO, ROF) */
2007 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2008 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2009 else
2010 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2011 tmp &= udccs;
2012 if (likely(tmp))
2013 *ep->reg_udccs = tmp;
2014
2015 /* fifos can hold packets, ready for reading... */
2016 if (likely(req)) {
2017#ifdef USE_OUT_DMA
2018// TODO didn't yet debug out-dma. this approach assumes
2019// the worst about short packets and RPC; it might be better.
2020
2021 if (likely(ep->dma >= 0)) {
2022 if (!(udccs & UDCCS_BO_RSP)) {
2023 *ep->reg_udccs = UDCCS_BO_RPC;
2024 ep->dma_irqs++;
2025 return;
2026 }
2027 }
2028#endif
2029 completed = read_fifo(ep, req);
2030 } else
2031 pio_irq_disable (ep->bEndpointAddress);
2032 }
2033 ep->pio_irqs++;
2034 } while (completed);
2035}
2036
2037/*
2038 * pxa2xx_udc_irq - interrupt handler
2039 *
2040 * avoid delays in ep0 processing. the control handshaking isn't always
2041 * under software control (pxa250c0 and the pxa255 are better), and delays
2042 * could cause usb protocol errors.
2043 */
2044static irqreturn_t
2045pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
2046{
2047 struct pxa2xx_udc *dev = _dev;
2048 int handled;
2049
2050 dev->stats.irqs++;
2051 HEX_DISPLAY(dev->stats.irqs);
2052 do {
2053 u32 udccr = UDCCR;
2054
2055 handled = 0;
2056
2057 /* SUSpend Interrupt Request */
2058 if (unlikely(udccr & UDCCR_SUSIR)) {
2059 udc_ack_int_UDCCR(UDCCR_SUSIR);
2060 handled = 1;
David Brownell91987692005-05-07 13:20:19 -07002061 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 ? "" : "+disconnect");
2063
David Brownell91987692005-05-07 13:20:19 -07002064 if (!is_vbus_present())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 stop_activity(dev, dev->driver);
2066 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2067 && dev->driver
2068 && dev->driver->suspend)
2069 dev->driver->suspend(&dev->gadget);
2070 ep0_idle (dev);
2071 }
2072
2073 /* RESume Interrupt Request */
2074 if (unlikely(udccr & UDCCR_RESIR)) {
2075 udc_ack_int_UDCCR(UDCCR_RESIR);
2076 handled = 1;
2077 DBG(DBG_VERBOSE, "USB resume\n");
2078
2079 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2080 && dev->driver
2081 && dev->driver->resume
David Brownell91987692005-05-07 13:20:19 -07002082 && is_vbus_present())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 dev->driver->resume(&dev->gadget);
2084 }
2085
2086 /* ReSeT Interrupt Request - USB reset */
2087 if (unlikely(udccr & UDCCR_RSTIR)) {
2088 udc_ack_int_UDCCR(UDCCR_RSTIR);
2089 handled = 1;
2090
2091 if ((UDCCR & UDCCR_UDA) == 0) {
2092 DBG(DBG_VERBOSE, "USB reset start\n");
2093
2094 /* reset driver and endpoints,
2095 * in case that's not yet done
2096 */
2097 stop_activity (dev, dev->driver);
2098
2099 } else {
2100 DBG(DBG_VERBOSE, "USB reset end\n");
2101 dev->gadget.speed = USB_SPEED_FULL;
2102 LED_CONNECTED_ON;
2103 memset(&dev->stats, 0, sizeof dev->stats);
2104 /* driver and endpoints are still reset */
2105 }
2106
2107 } else {
2108 u32 usir0 = USIR0 & ~UICR0;
2109 u32 usir1 = USIR1 & ~UICR1;
2110 int i;
2111
2112 if (unlikely (!usir0 && !usir1))
2113 continue;
2114
2115 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2116
2117 /* control traffic */
2118 if (usir0 & USIR0_IR0) {
2119 dev->ep[0].pio_irqs++;
2120 handle_ep0(dev);
2121 handled = 1;
2122 }
2123
2124 /* endpoint data transfers */
2125 for (i = 0; i < 8; i++) {
2126 u32 tmp = 1 << i;
2127
2128 if (i && (usir0 & tmp)) {
2129 handle_ep(&dev->ep[i]);
2130 USIR0 |= tmp;
2131 handled = 1;
2132 }
2133 if (usir1 & tmp) {
2134 handle_ep(&dev->ep[i+8]);
2135 USIR1 |= tmp;
2136 handled = 1;
2137 }
2138 }
2139 }
2140
2141 /* we could also ask for 1 msec SOF (SIR) interrupts */
2142
2143 } while (handled);
2144 return IRQ_HANDLED;
2145}
2146
2147/*-------------------------------------------------------------------------*/
2148
2149static void nop_release (struct device *dev)
2150{
2151 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2152}
2153
2154/* this uses load-time allocation and initialization (instead of
2155 * doing it at run-time) to save code, eliminate fault paths, and
2156 * be more obviously correct.
2157 */
2158static struct pxa2xx_udc memory = {
2159 .gadget = {
2160 .ops = &pxa2xx_udc_ops,
2161 .ep0 = &memory.ep[0].ep,
2162 .name = driver_name,
2163 .dev = {
2164 .bus_id = "gadget",
2165 .release = nop_release,
2166 },
2167 },
2168
2169 /* control endpoint */
2170 .ep[0] = {
2171 .ep = {
2172 .name = ep0name,
2173 .ops = &pxa2xx_ep_ops,
2174 .maxpacket = EP0_FIFO_SIZE,
2175 },
2176 .dev = &memory,
2177 .reg_udccs = &UDCCS0,
2178 .reg_uddr = &UDDR0,
2179 },
2180
2181 /* first group of endpoints */
2182 .ep[1] = {
2183 .ep = {
2184 .name = "ep1in-bulk",
2185 .ops = &pxa2xx_ep_ops,
2186 .maxpacket = BULK_FIFO_SIZE,
2187 },
2188 .dev = &memory,
2189 .fifo_size = BULK_FIFO_SIZE,
2190 .bEndpointAddress = USB_DIR_IN | 1,
2191 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2192 .reg_udccs = &UDCCS1,
2193 .reg_uddr = &UDDR1,
2194 drcmr (25)
2195 },
2196 .ep[2] = {
2197 .ep = {
2198 .name = "ep2out-bulk",
2199 .ops = &pxa2xx_ep_ops,
2200 .maxpacket = BULK_FIFO_SIZE,
2201 },
2202 .dev = &memory,
2203 .fifo_size = BULK_FIFO_SIZE,
2204 .bEndpointAddress = 2,
2205 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2206 .reg_udccs = &UDCCS2,
2207 .reg_ubcr = &UBCR2,
2208 .reg_uddr = &UDDR2,
2209 drcmr (26)
2210 },
2211#ifndef CONFIG_USB_PXA2XX_SMALL
2212 .ep[3] = {
2213 .ep = {
2214 .name = "ep3in-iso",
2215 .ops = &pxa2xx_ep_ops,
2216 .maxpacket = ISO_FIFO_SIZE,
2217 },
2218 .dev = &memory,
2219 .fifo_size = ISO_FIFO_SIZE,
2220 .bEndpointAddress = USB_DIR_IN | 3,
2221 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2222 .reg_udccs = &UDCCS3,
2223 .reg_uddr = &UDDR3,
2224 drcmr (27)
2225 },
2226 .ep[4] = {
2227 .ep = {
2228 .name = "ep4out-iso",
2229 .ops = &pxa2xx_ep_ops,
2230 .maxpacket = ISO_FIFO_SIZE,
2231 },
2232 .dev = &memory,
2233 .fifo_size = ISO_FIFO_SIZE,
2234 .bEndpointAddress = 4,
2235 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2236 .reg_udccs = &UDCCS4,
2237 .reg_ubcr = &UBCR4,
2238 .reg_uddr = &UDDR4,
2239 drcmr (28)
2240 },
2241 .ep[5] = {
2242 .ep = {
2243 .name = "ep5in-int",
2244 .ops = &pxa2xx_ep_ops,
2245 .maxpacket = INT_FIFO_SIZE,
2246 },
2247 .dev = &memory,
2248 .fifo_size = INT_FIFO_SIZE,
2249 .bEndpointAddress = USB_DIR_IN | 5,
2250 .bmAttributes = USB_ENDPOINT_XFER_INT,
2251 .reg_udccs = &UDCCS5,
2252 .reg_uddr = &UDDR5,
2253 },
2254
2255 /* second group of endpoints */
2256 .ep[6] = {
2257 .ep = {
2258 .name = "ep6in-bulk",
2259 .ops = &pxa2xx_ep_ops,
2260 .maxpacket = BULK_FIFO_SIZE,
2261 },
2262 .dev = &memory,
2263 .fifo_size = BULK_FIFO_SIZE,
2264 .bEndpointAddress = USB_DIR_IN | 6,
2265 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2266 .reg_udccs = &UDCCS6,
2267 .reg_uddr = &UDDR6,
2268 drcmr (30)
2269 },
2270 .ep[7] = {
2271 .ep = {
2272 .name = "ep7out-bulk",
2273 .ops = &pxa2xx_ep_ops,
2274 .maxpacket = BULK_FIFO_SIZE,
2275 },
2276 .dev = &memory,
2277 .fifo_size = BULK_FIFO_SIZE,
2278 .bEndpointAddress = 7,
2279 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2280 .reg_udccs = &UDCCS7,
2281 .reg_ubcr = &UBCR7,
2282 .reg_uddr = &UDDR7,
2283 drcmr (31)
2284 },
2285 .ep[8] = {
2286 .ep = {
2287 .name = "ep8in-iso",
2288 .ops = &pxa2xx_ep_ops,
2289 .maxpacket = ISO_FIFO_SIZE,
2290 },
2291 .dev = &memory,
2292 .fifo_size = ISO_FIFO_SIZE,
2293 .bEndpointAddress = USB_DIR_IN | 8,
2294 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2295 .reg_udccs = &UDCCS8,
2296 .reg_uddr = &UDDR8,
2297 drcmr (32)
2298 },
2299 .ep[9] = {
2300 .ep = {
2301 .name = "ep9out-iso",
2302 .ops = &pxa2xx_ep_ops,
2303 .maxpacket = ISO_FIFO_SIZE,
2304 },
2305 .dev = &memory,
2306 .fifo_size = ISO_FIFO_SIZE,
2307 .bEndpointAddress = 9,
2308 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2309 .reg_udccs = &UDCCS9,
2310 .reg_ubcr = &UBCR9,
2311 .reg_uddr = &UDDR9,
2312 drcmr (33)
2313 },
2314 .ep[10] = {
2315 .ep = {
2316 .name = "ep10in-int",
2317 .ops = &pxa2xx_ep_ops,
2318 .maxpacket = INT_FIFO_SIZE,
2319 },
2320 .dev = &memory,
2321 .fifo_size = INT_FIFO_SIZE,
2322 .bEndpointAddress = USB_DIR_IN | 10,
2323 .bmAttributes = USB_ENDPOINT_XFER_INT,
2324 .reg_udccs = &UDCCS10,
2325 .reg_uddr = &UDDR10,
2326 },
2327
2328 /* third group of endpoints */
2329 .ep[11] = {
2330 .ep = {
2331 .name = "ep11in-bulk",
2332 .ops = &pxa2xx_ep_ops,
2333 .maxpacket = BULK_FIFO_SIZE,
2334 },
2335 .dev = &memory,
2336 .fifo_size = BULK_FIFO_SIZE,
2337 .bEndpointAddress = USB_DIR_IN | 11,
2338 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2339 .reg_udccs = &UDCCS11,
2340 .reg_uddr = &UDDR11,
2341 drcmr (35)
2342 },
2343 .ep[12] = {
2344 .ep = {
2345 .name = "ep12out-bulk",
2346 .ops = &pxa2xx_ep_ops,
2347 .maxpacket = BULK_FIFO_SIZE,
2348 },
2349 .dev = &memory,
2350 .fifo_size = BULK_FIFO_SIZE,
2351 .bEndpointAddress = 12,
2352 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2353 .reg_udccs = &UDCCS12,
2354 .reg_ubcr = &UBCR12,
2355 .reg_uddr = &UDDR12,
2356 drcmr (36)
2357 },
2358 .ep[13] = {
2359 .ep = {
2360 .name = "ep13in-iso",
2361 .ops = &pxa2xx_ep_ops,
2362 .maxpacket = ISO_FIFO_SIZE,
2363 },
2364 .dev = &memory,
2365 .fifo_size = ISO_FIFO_SIZE,
2366 .bEndpointAddress = USB_DIR_IN | 13,
2367 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2368 .reg_udccs = &UDCCS13,
2369 .reg_uddr = &UDDR13,
2370 drcmr (37)
2371 },
2372 .ep[14] = {
2373 .ep = {
2374 .name = "ep14out-iso",
2375 .ops = &pxa2xx_ep_ops,
2376 .maxpacket = ISO_FIFO_SIZE,
2377 },
2378 .dev = &memory,
2379 .fifo_size = ISO_FIFO_SIZE,
2380 .bEndpointAddress = 14,
2381 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2382 .reg_udccs = &UDCCS14,
2383 .reg_ubcr = &UBCR14,
2384 .reg_uddr = &UDDR14,
2385 drcmr (38)
2386 },
2387 .ep[15] = {
2388 .ep = {
2389 .name = "ep15in-int",
2390 .ops = &pxa2xx_ep_ops,
2391 .maxpacket = INT_FIFO_SIZE,
2392 },
2393 .dev = &memory,
2394 .fifo_size = INT_FIFO_SIZE,
2395 .bEndpointAddress = USB_DIR_IN | 15,
2396 .bmAttributes = USB_ENDPOINT_XFER_INT,
2397 .reg_udccs = &UDCCS15,
2398 .reg_uddr = &UDDR15,
2399 },
2400#endif /* !CONFIG_USB_PXA2XX_SMALL */
2401};
2402
2403#define CP15R0_VENDOR_MASK 0xffffe000
2404
2405#if defined(CONFIG_ARCH_PXA)
2406#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2407
2408#elif defined(CONFIG_ARCH_IXP4XX)
2409#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2410
2411#endif
2412
2413#define CP15R0_PROD_MASK 0x000003f0
2414#define PXA25x 0x00000100 /* and PXA26x */
2415#define PXA210 0x00000120
2416
2417#define CP15R0_REV_MASK 0x0000000f
2418
2419#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2420
2421#define PXA255_A0 0x00000106 /* or PXA260_B1 */
2422#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2423#define PXA250_B2 0x00000104
2424#define PXA250_B1 0x00000103 /* or PXA260_A0 */
2425#define PXA250_B0 0x00000102
2426#define PXA250_A1 0x00000101
2427#define PXA250_A0 0x00000100
2428
2429#define PXA210_C0 0x00000125
2430#define PXA210_B2 0x00000124
2431#define PXA210_B1 0x00000123
2432#define PXA210_B0 0x00000122
2433#define IXP425_A0 0x000001c1
Milan Svoboda043ea182006-05-29 03:34:00 -07002434#define IXP465_AD 0x00000200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
2436/*
2437 * probe - binds to the platform device
2438 */
Russell King3ae5eae2005-11-09 22:32:44 +00002439static int __init pxa2xx_udc_probe(struct platform_device *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440{
2441 struct pxa2xx_udc *dev = &memory;
2442 int retval, out_dma = 1;
2443 u32 chiprev;
2444
2445 /* insist on Intel/ARM/XScale */
2446 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2447 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2448 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2449 return -ENODEV;
2450 }
2451
2452 /* trigger chiprev-specific logic */
2453 switch (chiprev & CP15R0_PRODREV_MASK) {
2454#if defined(CONFIG_ARCH_PXA)
2455 case PXA255_A0:
2456 dev->has_cfr = 1;
2457 break;
2458 case PXA250_A0:
2459 case PXA250_A1:
2460 /* A0/A1 "not released"; ep 13, 15 unusable */
2461 /* fall through */
2462 case PXA250_B2: case PXA210_B2:
2463 case PXA250_B1: case PXA210_B1:
2464 case PXA250_B0: case PXA210_B0:
2465 out_dma = 0;
2466 /* fall through */
2467 case PXA250_C0: case PXA210_C0:
2468 break;
2469#elif defined(CONFIG_ARCH_IXP4XX)
2470 case IXP425_A0:
Milan Svoboda043ea182006-05-29 03:34:00 -07002471 case IXP465_AD:
2472 dev->has_cfr = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 out_dma = 0;
2474 break;
2475#endif
2476 default:
2477 out_dma = 0;
2478 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2479 driver_name, chiprev);
2480 /* iop3xx, ixp4xx, ... */
2481 return -ENODEV;
2482 }
2483
2484 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2485 dev->has_cfr ? "" : " (!cfr)",
2486 out_dma ? "" : " (broken dma-out)",
2487 SIZE_STR DMASTR
2488 );
2489
2490#ifdef USE_DMA
2491#ifndef USE_OUT_DMA
2492 out_dma = 0;
2493#endif
2494 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2495 if (!out_dma) {
2496 DMSG("disabled OUT dma\n");
2497 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2498 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2499 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2500 }
2501#endif
2502
2503 /* other non-static parts of init */
Russell King3ae5eae2005-11-09 22:32:44 +00002504 dev->dev = &pdev->dev;
2505 dev->mach = pdev->dev.platform_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
2507 init_timer(&dev->timer);
2508 dev->timer.function = udc_watchdog;
2509 dev->timer.data = (unsigned long) dev;
2510
2511 device_initialize(&dev->gadget.dev);
Russell King3ae5eae2005-11-09 22:32:44 +00002512 dev->gadget.dev.parent = &pdev->dev;
2513 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515 the_controller = dev;
Russell King3ae5eae2005-11-09 22:32:44 +00002516 platform_set_drvdata(pdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
2518 udc_disable(dev);
2519 udc_reinit(dev);
2520
David Brownell91987692005-05-07 13:20:19 -07002521 dev->vbus = is_vbus_present();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
2523 /* irq setup after old hardware state is cleaned up */
2524 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
2525 SA_INTERRUPT, driver_name, dev);
2526 if (retval != 0) {
2527 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2528 driver_name, IRQ_USB, retval);
2529 return -EBUSY;
2530 }
2531 dev->got_irq = 1;
2532
2533#ifdef CONFIG_ARCH_LUBBOCK
2534 if (machine_is_lubbock()) {
2535 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2536 lubbock_vbus_irq,
2537 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2538 driver_name, dev);
2539 if (retval != 0) {
2540 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2541 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2542lubbock_fail0:
2543 free_irq(IRQ_USB, dev);
2544 return -EBUSY;
2545 }
2546 retval = request_irq(LUBBOCK_USB_IRQ,
2547 lubbock_vbus_irq,
2548 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2549 driver_name, dev);
2550 if (retval != 0) {
2551 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2552 driver_name, LUBBOCK_USB_IRQ, retval);
2553 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2554 goto lubbock_fail0;
2555 }
2556#ifdef DEBUG
2557 /* with U-Boot (but not BLOB), hex is off by default */
2558 HEX_DISPLAY(dev->stats.irqs);
2559 LUB_DISC_BLNK_LED &= 0xff;
2560#endif
2561 }
2562#endif
2563 create_proc_files();
2564
2565 return 0;
2566}
David Brownell91987692005-05-07 13:20:19 -07002567
Russell King3ae5eae2005-11-09 22:32:44 +00002568static void pxa2xx_udc_shutdown(struct platform_device *_dev)
David Brownell91987692005-05-07 13:20:19 -07002569{
2570 pullup_off();
2571}
2572
Russell King3ae5eae2005-11-09 22:32:44 +00002573static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574{
Russell King3ae5eae2005-11-09 22:32:44 +00002575 struct pxa2xx_udc *dev = platform_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576
2577 udc_disable(dev);
2578 remove_proc_files();
2579 usb_gadget_unregister_driver(dev->driver);
2580
2581 if (dev->got_irq) {
2582 free_irq(IRQ_USB, dev);
2583 dev->got_irq = 0;
2584 }
Milan Svoboda44df45a2006-05-29 03:34:00 -07002585#ifdef CONFIG_ARCH_LUBBOCK
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 if (machine_is_lubbock()) {
2587 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2588 free_irq(LUBBOCK_USB_IRQ, dev);
2589 }
Milan Svoboda44df45a2006-05-29 03:34:00 -07002590#endif
Russell King3ae5eae2005-11-09 22:32:44 +00002591 platform_set_drvdata(pdev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 the_controller = NULL;
2593 return 0;
2594}
2595
2596/*-------------------------------------------------------------------------*/
2597
2598#ifdef CONFIG_PM
2599
2600/* USB suspend (controlled by the host) and system suspend (controlled
2601 * by the PXA) don't necessarily work well together. If USB is active,
2602 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2603 * mode, or any deeper PM saving state.
2604 *
2605 * For now, we punt and forcibly disconnect from the USB host when PXA
2606 * enters any suspend state. While we're disconnected, we always disable
2607 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2608 * Boards without software pullup control shouldn't use those states.
2609 * VBUS IRQs should probably be ignored so that the PXA device just acts
2610 * "dead" to USB hosts until system resume.
2611 */
Russell King3ae5eae2005-11-09 22:32:44 +00002612static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613{
Russell King3ae5eae2005-11-09 22:32:44 +00002614 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Russell King9480e302005-10-28 09:52:56 -07002616 if (!udc->mach->udc_command)
2617 WARN("USB host won't detect disconnect!\n");
2618 pullup(udc, 0);
2619
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 return 0;
2621}
2622
Russell King3ae5eae2005-11-09 22:32:44 +00002623static int pxa2xx_udc_resume(struct platform_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624{
Russell King3ae5eae2005-11-09 22:32:44 +00002625 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
Russell King9480e302005-10-28 09:52:56 -07002627 pullup(udc, 1);
2628
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 return 0;
2630}
2631
2632#else
2633#define pxa2xx_udc_suspend NULL
2634#define pxa2xx_udc_resume NULL
2635#endif
2636
2637/*-------------------------------------------------------------------------*/
2638
Russell King3ae5eae2005-11-09 22:32:44 +00002639static struct platform_driver udc_driver = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 .probe = pxa2xx_udc_probe,
David Brownell91987692005-05-07 13:20:19 -07002641 .shutdown = pxa2xx_udc_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 .remove = __exit_p(pxa2xx_udc_remove),
2643 .suspend = pxa2xx_udc_suspend,
2644 .resume = pxa2xx_udc_resume,
Russell King3ae5eae2005-11-09 22:32:44 +00002645 .driver = {
2646 .owner = THIS_MODULE,
2647 .name = "pxa2xx-udc",
2648 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649};
2650
2651static int __init udc_init(void)
2652{
2653 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
Russell King3ae5eae2005-11-09 22:32:44 +00002654 return platform_driver_register(&udc_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655}
2656module_init(udc_init);
2657
2658static void __exit udc_exit(void)
2659{
Russell King3ae5eae2005-11-09 22:32:44 +00002660 platform_driver_unregister(&udc_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661}
2662module_exit(udc_exit);
2663
2664MODULE_DESCRIPTION(DRIVER_DESC);
2665MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2666MODULE_LICENSE("GPL");
2667