| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2000-2004 by David Brownell | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify it | 
|  | 5 | * under the terms of the GNU General Public License as published by the | 
|  | 6 | * Free Software Foundation; either version 2 of the License, or (at your | 
|  | 7 | * option) any later version. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, but | 
|  | 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | 
|  | 11 | * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|  | 12 | * for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write to the Free Software Foundation, | 
|  | 16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
|  | 17 | */ | 
|  | 18 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/pci.h> | 
|  | 21 | #include <linux/dmapool.h> | 
|  | 22 | #include <linux/kernel.h> | 
|  | 23 | #include <linux/delay.h> | 
|  | 24 | #include <linux/ioport.h> | 
|  | 25 | #include <linux/sched.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/smp_lock.h> | 
|  | 28 | #include <linux/errno.h> | 
|  | 29 | #include <linux/init.h> | 
|  | 30 | #include <linux/timer.h> | 
|  | 31 | #include <linux/list.h> | 
|  | 32 | #include <linux/interrupt.h> | 
|  | 33 | #include <linux/reboot.h> | 
|  | 34 | #include <linux/usb.h> | 
|  | 35 | #include <linux/moduleparam.h> | 
|  | 36 | #include <linux/dma-mapping.h> | 
|  | 37 |  | 
|  | 38 | #include "../core/hcd.h" | 
|  | 39 |  | 
|  | 40 | #include <asm/byteorder.h> | 
|  | 41 | #include <asm/io.h> | 
|  | 42 | #include <asm/irq.h> | 
|  | 43 | #include <asm/system.h> | 
|  | 44 | #include <asm/unaligned.h> | 
|  | 45 |  | 
|  | 46 |  | 
|  | 47 | /*-------------------------------------------------------------------------*/ | 
|  | 48 |  | 
|  | 49 | /* | 
|  | 50 | * EHCI hc_driver implementation ... experimental, incomplete. | 
|  | 51 | * Based on the final 1.0 register interface specification. | 
|  | 52 | * | 
|  | 53 | * USB 2.0 shows up in upcoming www.pcmcia.org technology. | 
|  | 54 | * First was PCMCIA, like ISA; then CardBus, which is PCI. | 
|  | 55 | * Next comes "CardBay", using USB 2.0 signals. | 
|  | 56 | * | 
|  | 57 | * Contains additional contributions by Brad Hards, Rory Bolt, and others. | 
|  | 58 | * Special thanks to Intel and VIA for providing host controllers to | 
|  | 59 | * test this driver on, and Cypress (including In-System Design) for | 
|  | 60 | * providing early devices for those host controllers to talk to! | 
|  | 61 | * | 
|  | 62 | * HISTORY: | 
|  | 63 | * | 
|  | 64 | * 2004-05-10 Root hub and PCI suspend/resume support; remote wakeup. (db) | 
|  | 65 | * 2004-02-24 Replace pci_* with generic dma_* API calls (dsaxena@plexity.net) | 
|  | 66 | * 2003-12-29 Rewritten high speed iso transfer support (by Michal Sojka, | 
|  | 67 | *	<sojkam@centrum.cz>, updates by DB). | 
|  | 68 | * | 
|  | 69 | * 2002-11-29	Correct handling for hw async_next register. | 
|  | 70 | * 2002-08-06	Handling for bulk and interrupt transfers is mostly shared; | 
|  | 71 | *	only scheduling is different, no arbitrary limitations. | 
|  | 72 | * 2002-07-25	Sanity check PCI reads, mostly for better cardbus support, | 
|  | 73 | * 	clean up HC run state handshaking. | 
|  | 74 | * 2002-05-24	Preliminary FS/LS interrupts, using scheduling shortcuts | 
|  | 75 | * 2002-05-11	Clear TT errors for FS/LS ctrl/bulk.  Fill in some other | 
|  | 76 | *	missing pieces:  enabling 64bit dma, handoff from BIOS/SMM. | 
|  | 77 | * 2002-05-07	Some error path cleanups to report better errors; wmb(); | 
|  | 78 | *	use non-CVS version id; better iso bandwidth claim. | 
|  | 79 | * 2002-04-19	Control/bulk/interrupt submit no longer uses giveback() on | 
|  | 80 | *	errors in submit path.  Bugfixes to interrupt scheduling/processing. | 
|  | 81 | * 2002-03-05	Initial high-speed ISO support; reduce ITD memory; shift | 
|  | 82 | *	more checking to generic hcd framework (db).  Make it work with | 
|  | 83 | *	Philips EHCI; reduce PCI traffic; shorten IRQ path (Rory Bolt). | 
|  | 84 | * 2002-01-14	Minor cleanup; version synch. | 
|  | 85 | * 2002-01-08	Fix roothub handoff of FS/LS to companion controllers. | 
|  | 86 | * 2002-01-04	Control/Bulk queuing behaves. | 
|  | 87 | * | 
|  | 88 | * 2001-12-12	Initial patch version for Linux 2.5.1 kernel. | 
|  | 89 | * 2001-June	Works with usb-storage and NEC EHCI on 2.4 | 
|  | 90 | */ | 
|  | 91 |  | 
|  | 92 | #define DRIVER_VERSION "10 Dec 2004" | 
|  | 93 | #define DRIVER_AUTHOR "David Brownell" | 
|  | 94 | #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" | 
|  | 95 |  | 
|  | 96 | static const char	hcd_name [] = "ehci_hcd"; | 
|  | 97 |  | 
|  | 98 |  | 
|  | 99 | #undef EHCI_VERBOSE_DEBUG | 
|  | 100 | #undef EHCI_URB_TRACE | 
|  | 101 |  | 
|  | 102 | #ifdef DEBUG | 
|  | 103 | #define EHCI_STATS | 
|  | 104 | #endif | 
|  | 105 |  | 
|  | 106 | /* magic numbers that can affect system performance */ | 
|  | 107 | #define	EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */ | 
|  | 108 | #define	EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */ | 
|  | 109 | #define	EHCI_TUNE_RL_TT		0 | 
|  | 110 | #define	EHCI_TUNE_MULT_HS	1	/* 1-3 transactions/uframe; 4.10.3 */ | 
|  | 111 | #define	EHCI_TUNE_MULT_TT	1 | 
|  | 112 | #define	EHCI_TUNE_FLS		2	/* (small) 256 frame schedule */ | 
|  | 113 |  | 
|  | 114 | #define EHCI_IAA_JIFFIES	(HZ/100)	/* arbitrary; ~10 msec */ | 
|  | 115 | #define EHCI_IO_JIFFIES		(HZ/10)		/* io watchdog > irq_thresh */ | 
|  | 116 | #define EHCI_ASYNC_JIFFIES	(HZ/20)		/* async idle timeout */ | 
|  | 117 | #define EHCI_SHRINK_JIFFIES	(HZ/200)	/* async qh unlink delay */ | 
|  | 118 |  | 
|  | 119 | /* Initial IRQ latency:  faster than hw default */ | 
|  | 120 | static int log2_irq_thresh = 0;		// 0 to 6 | 
|  | 121 | module_param (log2_irq_thresh, int, S_IRUGO); | 
|  | 122 | MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); | 
|  | 123 |  | 
|  | 124 | /* initial park setting:  slower than hw default */ | 
|  | 125 | static unsigned park = 0; | 
|  | 126 | module_param (park, uint, S_IRUGO); | 
|  | 127 | MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); | 
|  | 128 |  | 
|  | 129 | #define	INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) | 
|  | 130 |  | 
|  | 131 | /*-------------------------------------------------------------------------*/ | 
|  | 132 |  | 
|  | 133 | #include "ehci.h" | 
|  | 134 | #include "ehci-dbg.c" | 
|  | 135 |  | 
|  | 136 | /*-------------------------------------------------------------------------*/ | 
|  | 137 |  | 
|  | 138 | /* | 
|  | 139 | * handshake - spin reading hc until handshake completes or fails | 
|  | 140 | * @ptr: address of hc register to be read | 
|  | 141 | * @mask: bits to look at in result of read | 
|  | 142 | * @done: value of those bits when handshake succeeds | 
|  | 143 | * @usec: timeout in microseconds | 
|  | 144 | * | 
|  | 145 | * Returns negative errno, or zero on success | 
|  | 146 | * | 
|  | 147 | * Success happens when the "mask" bits have the specified value (hardware | 
|  | 148 | * handshake done).  There are two failure modes:  "usec" have passed (major | 
|  | 149 | * hardware flakeout), or the register reads as all-ones (hardware removed). | 
|  | 150 | * | 
|  | 151 | * That last failure should_only happen in cases like physical cardbus eject | 
|  | 152 | * before driver shutdown. But it also seems to be caused by bugs in cardbus | 
|  | 153 | * bridge shutdown:  shutting down the bridge before the devices using it. | 
|  | 154 | */ | 
|  | 155 | static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec) | 
|  | 156 | { | 
|  | 157 | u32	result; | 
|  | 158 |  | 
|  | 159 | do { | 
|  | 160 | result = readl (ptr); | 
|  | 161 | if (result == ~(u32)0)		/* card removed */ | 
|  | 162 | return -ENODEV; | 
|  | 163 | result &= mask; | 
|  | 164 | if (result == done) | 
|  | 165 | return 0; | 
|  | 166 | udelay (1); | 
|  | 167 | usec--; | 
|  | 168 | } while (usec > 0); | 
|  | 169 | return -ETIMEDOUT; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | /* force HC to halt state from unknown (EHCI spec section 2.3) */ | 
|  | 173 | static int ehci_halt (struct ehci_hcd *ehci) | 
|  | 174 | { | 
|  | 175 | u32	temp = readl (&ehci->regs->status); | 
|  | 176 |  | 
| David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 177 | /* disable any irqs left enabled by previous code */ | 
|  | 178 | writel (0, &ehci->regs->intr_enable); | 
|  | 179 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | if ((temp & STS_HALT) != 0) | 
|  | 181 | return 0; | 
|  | 182 |  | 
|  | 183 | temp = readl (&ehci->regs->command); | 
|  | 184 | temp &= ~CMD_RUN; | 
|  | 185 | writel (temp, &ehci->regs->command); | 
|  | 186 | return handshake (&ehci->regs->status, STS_HALT, STS_HALT, 16 * 125); | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | /* put TDI/ARC silicon into EHCI mode */ | 
|  | 190 | static void tdi_reset (struct ehci_hcd *ehci) | 
|  | 191 | { | 
|  | 192 | u32 __iomem	*reg_ptr; | 
|  | 193 | u32		tmp; | 
|  | 194 |  | 
|  | 195 | reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68); | 
|  | 196 | tmp = readl (reg_ptr); | 
|  | 197 | tmp |= 0x3; | 
|  | 198 | writel (tmp, reg_ptr); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | /* reset a non-running (STS_HALT == 1) controller */ | 
|  | 202 | static int ehci_reset (struct ehci_hcd *ehci) | 
|  | 203 | { | 
|  | 204 | int	retval; | 
|  | 205 | u32	command = readl (&ehci->regs->command); | 
|  | 206 |  | 
|  | 207 | command |= CMD_RESET; | 
|  | 208 | dbg_cmd (ehci, "reset", command); | 
|  | 209 | writel (command, &ehci->regs->command); | 
|  | 210 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | 
|  | 211 | ehci->next_statechange = jiffies; | 
|  | 212 | retval = handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000); | 
|  | 213 |  | 
|  | 214 | if (retval) | 
|  | 215 | return retval; | 
|  | 216 |  | 
|  | 217 | if (ehci_is_TDI(ehci)) | 
|  | 218 | tdi_reset (ehci); | 
|  | 219 |  | 
|  | 220 | return retval; | 
|  | 221 | } | 
|  | 222 |  | 
|  | 223 | /* idle the controller (from running) */ | 
|  | 224 | static void ehci_quiesce (struct ehci_hcd *ehci) | 
|  | 225 | { | 
|  | 226 | u32	temp; | 
|  | 227 |  | 
|  | 228 | #ifdef DEBUG | 
|  | 229 | if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) | 
|  | 230 | BUG (); | 
|  | 231 | #endif | 
|  | 232 |  | 
|  | 233 | /* wait for any schedule enables/disables to take effect */ | 
|  | 234 | temp = readl (&ehci->regs->command) << 10; | 
|  | 235 | temp &= STS_ASS | STS_PSS; | 
|  | 236 | if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, | 
|  | 237 | temp, 16 * 125) != 0) { | 
|  | 238 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | 
|  | 239 | return; | 
|  | 240 | } | 
|  | 241 |  | 
|  | 242 | /* then disable anything that's still active */ | 
|  | 243 | temp = readl (&ehci->regs->command); | 
|  | 244 | temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); | 
|  | 245 | writel (temp, &ehci->regs->command); | 
|  | 246 |  | 
|  | 247 | /* hardware can take 16 microframes to turn off ... */ | 
|  | 248 | if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, | 
|  | 249 | 0, 16 * 125) != 0) { | 
|  | 250 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | 
|  | 251 | return; | 
|  | 252 | } | 
|  | 253 | } | 
|  | 254 |  | 
|  | 255 | /*-------------------------------------------------------------------------*/ | 
|  | 256 |  | 
|  | 257 | static void ehci_work(struct ehci_hcd *ehci, struct pt_regs *regs); | 
|  | 258 |  | 
|  | 259 | #include "ehci-hub.c" | 
|  | 260 | #include "ehci-mem.c" | 
|  | 261 | #include "ehci-q.c" | 
|  | 262 | #include "ehci-sched.c" | 
|  | 263 |  | 
|  | 264 | /*-------------------------------------------------------------------------*/ | 
|  | 265 |  | 
|  | 266 | static void ehci_watchdog (unsigned long param) | 
|  | 267 | { | 
|  | 268 | struct ehci_hcd		*ehci = (struct ehci_hcd *) param; | 
|  | 269 | unsigned long		flags; | 
|  | 270 |  | 
|  | 271 | spin_lock_irqsave (&ehci->lock, flags); | 
|  | 272 |  | 
|  | 273 | /* lost IAA irqs wedge things badly; seen with a vt8235 */ | 
|  | 274 | if (ehci->reclaim) { | 
|  | 275 | u32		status = readl (&ehci->regs->status); | 
|  | 276 |  | 
|  | 277 | if (status & STS_IAA) { | 
|  | 278 | ehci_vdbg (ehci, "lost IAA\n"); | 
|  | 279 | COUNT (ehci->stats.lost_iaa); | 
|  | 280 | writel (STS_IAA, &ehci->regs->status); | 
|  | 281 | ehci->reclaim_ready = 1; | 
|  | 282 | } | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | /* stop async processing after it's idled a bit */ | 
|  | 286 | if (test_bit (TIMER_ASYNC_OFF, &ehci->actions)) | 
|  | 287 | start_unlink_async (ehci, ehci->async); | 
|  | 288 |  | 
|  | 289 | /* ehci could run by timer, without IRQs ... */ | 
|  | 290 | ehci_work (ehci, NULL); | 
|  | 291 |  | 
|  | 292 | spin_unlock_irqrestore (&ehci->lock, flags); | 
|  | 293 | } | 
|  | 294 |  | 
| David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 295 | /* Reboot notifiers kick in for silicon on any bus (not just pci, etc). | 
|  | 296 | * This forcibly disables dma and IRQs, helping kexec and other cases | 
|  | 297 | * where the next system software may expect clean state. | 
|  | 298 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | static int | 
|  | 300 | ehci_reboot (struct notifier_block *self, unsigned long code, void *null) | 
|  | 301 | { | 
|  | 302 | struct ehci_hcd		*ehci; | 
|  | 303 |  | 
|  | 304 | ehci = container_of (self, struct ehci_hcd, reboot_notifier); | 
| David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 305 | (void) ehci_halt (ehci); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 |  | 
|  | 307 | /* make BIOS/etc use companion controller during reboot */ | 
|  | 308 | writel (0, &ehci->regs->configured_flag); | 
|  | 309 | return 0; | 
|  | 310 | } | 
|  | 311 |  | 
| David Brownell | 56c1e26 | 2005-04-09 09:00:29 -0700 | [diff] [blame] | 312 | static void ehci_port_power (struct ehci_hcd *ehci, int is_on) | 
|  | 313 | { | 
|  | 314 | unsigned port; | 
|  | 315 |  | 
|  | 316 | if (!HCS_PPC (ehci->hcs_params)) | 
|  | 317 | return; | 
|  | 318 |  | 
|  | 319 | ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down"); | 
|  | 320 | for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) | 
|  | 321 | (void) ehci_hub_control(ehci_to_hcd(ehci), | 
|  | 322 | is_on ? SetPortFeature : ClearPortFeature, | 
|  | 323 | USB_PORT_FEAT_POWER, | 
|  | 324 | port--, NULL, 0); | 
|  | 325 | msleep(20); | 
|  | 326 | } | 
|  | 327 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 328 | /*-------------------------------------------------------------------------*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 330 | /* | 
|  | 331 | * ehci_work is called from some interrupts, timers, and so on. | 
|  | 332 | * it calls driver completion functions, after dropping ehci->lock. | 
|  | 333 | */ | 
|  | 334 | static void ehci_work (struct ehci_hcd *ehci, struct pt_regs *regs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | { | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 336 | timer_action_done (ehci, TIMER_IO_WATCHDOG); | 
|  | 337 | if (ehci->reclaim_ready) | 
|  | 338 | end_unlink_async (ehci, regs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 340 | /* another CPU may drop ehci->lock during a schedule scan while | 
|  | 341 | * it reports urb completions.  this flag guards against bogus | 
|  | 342 | * attempts at re-entrant schedule scanning. | 
|  | 343 | */ | 
|  | 344 | if (ehci->scanning) | 
|  | 345 | return; | 
|  | 346 | ehci->scanning = 1; | 
|  | 347 | scan_async (ehci, regs); | 
|  | 348 | if (ehci->next_uframe != -1) | 
|  | 349 | scan_periodic (ehci, regs); | 
|  | 350 | ehci->scanning = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 352 | /* the IO watchdog guards against hardware or driver bugs that | 
|  | 353 | * misplace IRQs, and should let us run completely without IRQs. | 
|  | 354 | * such lossage has been observed on both VT6202 and VT8235. | 
|  | 355 | */ | 
|  | 356 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && | 
|  | 357 | (ehci->async->qh_next.ptr != NULL || | 
|  | 358 | ehci->periodic_sched != 0)) | 
|  | 359 | timer_action (ehci, TIMER_IO_WATCHDOG); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | } | 
|  | 361 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 362 | static void ehci_stop (struct usb_hcd *hcd) | 
|  | 363 | { | 
|  | 364 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 365 |  | 
|  | 366 | ehci_dbg (ehci, "stop\n"); | 
|  | 367 |  | 
|  | 368 | /* Turn off port power on all root hub ports. */ | 
|  | 369 | ehci_port_power (ehci, 0); | 
|  | 370 |  | 
|  | 371 | /* no more interrupts ... */ | 
|  | 372 | del_timer_sync (&ehci->watchdog); | 
|  | 373 |  | 
|  | 374 | spin_lock_irq(&ehci->lock); | 
|  | 375 | if (HC_IS_RUNNING (hcd->state)) | 
|  | 376 | ehci_quiesce (ehci); | 
|  | 377 |  | 
|  | 378 | ehci_reset (ehci); | 
|  | 379 | writel (0, &ehci->regs->intr_enable); | 
|  | 380 | spin_unlock_irq(&ehci->lock); | 
|  | 381 |  | 
|  | 382 | /* let companion controllers work when we aren't */ | 
|  | 383 | writel (0, &ehci->regs->configured_flag); | 
|  | 384 | unregister_reboot_notifier (&ehci->reboot_notifier); | 
|  | 385 |  | 
|  | 386 | remove_debug_files (ehci); | 
|  | 387 |  | 
|  | 388 | /* root hub is shut down separately (first, when possible) */ | 
|  | 389 | spin_lock_irq (&ehci->lock); | 
|  | 390 | if (ehci->async) | 
|  | 391 | ehci_work (ehci, NULL); | 
|  | 392 | spin_unlock_irq (&ehci->lock); | 
|  | 393 | ehci_mem_cleanup (ehci); | 
|  | 394 |  | 
|  | 395 | #ifdef	EHCI_STATS | 
|  | 396 | ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", | 
|  | 397 | ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, | 
|  | 398 | ehci->stats.lost_iaa); | 
|  | 399 | ehci_dbg (ehci, "complete %ld unlink %ld\n", | 
|  | 400 | ehci->stats.complete, ehci->stats.unlink); | 
|  | 401 | #endif | 
|  | 402 |  | 
|  | 403 | dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status)); | 
|  | 404 | } | 
|  | 405 |  | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 406 | /* one-time init, only for memory state */ | 
|  | 407 | static int ehci_init(struct usb_hcd *hcd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | { | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 409 | struct ehci_hcd		*ehci = hcd_to_ehci(hcd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | u32			temp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | int			retval; | 
|  | 412 | u32			hcc_params; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 |  | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 414 | spin_lock_init(&ehci->lock); | 
|  | 415 |  | 
|  | 416 | init_timer(&ehci->watchdog); | 
|  | 417 | ehci->watchdog.function = ehci_watchdog; | 
|  | 418 | ehci->watchdog.data = (unsigned long) ehci; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 |  | 
|  | 420 | /* | 
|  | 421 | * hw default: 1K periodic list heads, one per frame. | 
|  | 422 | * periodic_size can shrink by USBCMD update if hcc_params allows. | 
|  | 423 | */ | 
|  | 424 | ehci->periodic_size = DEFAULT_I_TDPS; | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 425 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | return retval; | 
|  | 427 |  | 
|  | 428 | /* controllers may cache some of the periodic schedule ... */ | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 429 | hcc_params = readl(&ehci->caps->hcc_params); | 
|  | 430 | if (HCC_ISOC_CACHE(hcc_params)) 	// full frame cache | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | ehci->i_thresh = 8; | 
|  | 432 | else					// N microframes cached | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 433 | ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 |  | 
|  | 435 | ehci->reclaim = NULL; | 
|  | 436 | ehci->reclaim_ready = 0; | 
|  | 437 | ehci->next_uframe = -1; | 
|  | 438 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | /* | 
|  | 440 | * dedicate a qh for the async ring head, since we couldn't unlink | 
|  | 441 | * a 'real' qh without stopping the async schedule [4.8].  use it | 
|  | 442 | * as the 'reclamation list head' too. | 
|  | 443 | * its dummy is used in hw_alt_next of many tds, to prevent the qh | 
|  | 444 | * from automatically advancing to the next td after short reads. | 
|  | 445 | */ | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 446 | ehci->async->qh_next.qh = NULL; | 
|  | 447 | ehci->async->hw_next = QH_NEXT(ehci->async->qh_dma); | 
|  | 448 | ehci->async->hw_info1 = cpu_to_le32(QH_HEAD); | 
|  | 449 | ehci->async->hw_token = cpu_to_le32(QTD_STS_HALT); | 
|  | 450 | ehci->async->hw_qtd_next = EHCI_LIST_END; | 
|  | 451 | ehci->async->qh_state = QH_STATE_LINKED; | 
|  | 452 | ehci->async->hw_alt_next = QTD_NEXT(ehci->async->dummy->qtd_dma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 |  | 
|  | 454 | /* clear interrupt enables, set irq latency */ | 
|  | 455 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) | 
|  | 456 | log2_irq_thresh = 0; | 
|  | 457 | temp = 1 << (16 + log2_irq_thresh); | 
|  | 458 | if (HCC_CANPARK(hcc_params)) { | 
|  | 459 | /* HW default park == 3, on hardware that supports it (like | 
|  | 460 | * NVidia and ALI silicon), maximizes throughput on the async | 
|  | 461 | * schedule by avoiding QH fetches between transfers. | 
|  | 462 | * | 
|  | 463 | * With fast usb storage devices and NForce2, "park" seems to | 
|  | 464 | * make problems:  throughput reduction (!), data errors... | 
|  | 465 | */ | 
|  | 466 | if (park) { | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 467 | park = min(park, (unsigned) 3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | temp |= CMD_PARK; | 
|  | 469 | temp |= park << 8; | 
|  | 470 | } | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 471 | ehci_dbg(ehci, "park %d\n", park); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | } | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 473 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | /* periodic schedule size can be smaller than default */ | 
|  | 475 | temp &= ~(3 << 2); | 
|  | 476 | temp |= (EHCI_TUNE_FLS << 2); | 
|  | 477 | switch (EHCI_TUNE_FLS) { | 
|  | 478 | case 0: ehci->periodic_size = 1024; break; | 
|  | 479 | case 1: ehci->periodic_size = 512; break; | 
|  | 480 | case 2: ehci->periodic_size = 256; break; | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 481 | default:	BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | } | 
|  | 483 | } | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 484 | ehci->command = temp; | 
|  | 485 |  | 
|  | 486 | ehci->reboot_notifier.notifier_call = ehci_reboot; | 
|  | 487 | register_reboot_notifier(&ehci->reboot_notifier); | 
|  | 488 |  | 
|  | 489 | return 0; | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | /* start HC running; it's halted, ehci_init() has been run (once) */ | 
|  | 493 | static int ehci_run (struct usb_hcd *hcd) | 
|  | 494 | { | 
|  | 495 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 496 | int			retval; | 
|  | 497 | u32			temp; | 
|  | 498 | u32			hcc_params; | 
|  | 499 |  | 
|  | 500 | /* EHCI spec section 4.1 */ | 
|  | 501 | if ((retval = ehci_reset(ehci)) != 0) { | 
|  | 502 | unregister_reboot_notifier(&ehci->reboot_notifier); | 
|  | 503 | ehci_mem_cleanup(ehci); | 
|  | 504 | return retval; | 
|  | 505 | } | 
|  | 506 | writel(ehci->periodic_dma, &ehci->regs->frame_list); | 
|  | 507 | writel((u32)ehci->async->qh_dma, &ehci->regs->async_next); | 
|  | 508 |  | 
|  | 509 | /* | 
|  | 510 | * hcc_params controls whether ehci->regs->segment must (!!!) | 
|  | 511 | * be used; it constrains QH/ITD/SITD and QTD locations. | 
|  | 512 | * pci_pool consistent memory always uses segment zero. | 
|  | 513 | * streaming mappings for I/O buffers, like pci_map_single(), | 
|  | 514 | * can return segments above 4GB, if the device allows. | 
|  | 515 | * | 
|  | 516 | * NOTE:  the dma mask is visible through dma_supported(), so | 
|  | 517 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, | 
|  | 518 | * Scsi_Host.highmem_io, and so forth.  It's readonly to all | 
|  | 519 | * host side drivers though. | 
|  | 520 | */ | 
|  | 521 | hcc_params = readl(&ehci->caps->hcc_params); | 
|  | 522 | if (HCC_64BIT_ADDR(hcc_params)) { | 
|  | 523 | writel(0, &ehci->regs->segment); | 
|  | 524 | #if 0 | 
|  | 525 | // this is deeply broken on almost all architectures | 
|  | 526 | if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK)) | 
|  | 527 | ehci_info(ehci, "enabled 64bit DMA\n"); | 
|  | 528 | #endif | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | // Philips, Intel, and maybe others need CMD_RUN before the | 
|  | 533 | // root hub will detect new devices (why?); NEC doesn't | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 534 | ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); | 
|  | 535 | ehci->command |= CMD_RUN; | 
|  | 536 | writel (ehci->command, &ehci->regs->command); | 
|  | 537 | dbg_cmd (ehci, "init", ehci->command); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | /* | 
|  | 540 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices | 
|  | 541 | * are explicitly handed to companion controller(s), so no TT is | 
|  | 542 | * involved with the root hub.  (Except where one is integrated, | 
|  | 543 | * and there's no companion controller unless maybe for USB OTG.) | 
|  | 544 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | hcd->state = HC_STATE_RUNNING; | 
|  | 546 | writel (FLAG_CF, &ehci->regs->configured_flag); | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 547 | readl (&ehci->regs->command);	/* unblock posted writes */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 |  | 
|  | 549 | temp = HC_VERSION(readl (&ehci->caps->hc_capbase)); | 
|  | 550 | ehci_info (ehci, | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 551 | "USB %x.%x started, EHCI %x.%02x, driver %s\n", | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 552 | ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | temp >> 8, temp & 0xff, DRIVER_VERSION); | 
|  | 554 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */ | 
|  | 556 |  | 
| David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 557 | /* GRR this is run-once init(), being done every time the HC starts. | 
|  | 558 | * So long as they're part of class devices, we can't do it init() | 
|  | 559 | * since the class device isn't created that early. | 
|  | 560 | */ | 
|  | 561 | create_debug_files(ehci); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 |  | 
|  | 563 | return 0; | 
|  | 564 | } | 
|  | 565 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | /*-------------------------------------------------------------------------*/ | 
|  | 567 |  | 
|  | 568 | static irqreturn_t ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs) | 
|  | 569 | { | 
|  | 570 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 571 | u32			status; | 
|  | 572 | int			bh; | 
|  | 573 |  | 
|  | 574 | spin_lock (&ehci->lock); | 
|  | 575 |  | 
|  | 576 | status = readl (&ehci->regs->status); | 
|  | 577 |  | 
|  | 578 | /* e.g. cardbus physical eject */ | 
|  | 579 | if (status == ~(u32) 0) { | 
|  | 580 | ehci_dbg (ehci, "device removed\n"); | 
|  | 581 | goto dead; | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | status &= INTR_MASK; | 
|  | 585 | if (!status) {			/* irq sharing? */ | 
|  | 586 | spin_unlock(&ehci->lock); | 
|  | 587 | return IRQ_NONE; | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | /* clear (just) interrupts */ | 
|  | 591 | writel (status, &ehci->regs->status); | 
|  | 592 | readl (&ehci->regs->command);	/* unblock posted write */ | 
|  | 593 | bh = 0; | 
|  | 594 |  | 
|  | 595 | #ifdef	EHCI_VERBOSE_DEBUG | 
|  | 596 | /* unrequested/ignored: Frame List Rollover */ | 
|  | 597 | dbg_status (ehci, "irq", status); | 
|  | 598 | #endif | 
|  | 599 |  | 
|  | 600 | /* INT, ERR, and IAA interrupt rates can be throttled */ | 
|  | 601 |  | 
|  | 602 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ | 
|  | 603 | if (likely ((status & (STS_INT|STS_ERR)) != 0)) { | 
|  | 604 | if (likely ((status & STS_ERR) == 0)) | 
|  | 605 | COUNT (ehci->stats.normal); | 
|  | 606 | else | 
|  | 607 | COUNT (ehci->stats.error); | 
|  | 608 | bh = 1; | 
|  | 609 | } | 
|  | 610 |  | 
|  | 611 | /* complete the unlinking of some qh [4.15.2.3] */ | 
|  | 612 | if (status & STS_IAA) { | 
|  | 613 | COUNT (ehci->stats.reclaim); | 
|  | 614 | ehci->reclaim_ready = 1; | 
|  | 615 | bh = 1; | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | /* remote wakeup [4.3.1] */ | 
| David Brownell | d97cc2f | 2005-12-22 17:05:18 -0800 | [diff] [blame] | 619 | if (status & STS_PCD) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | unsigned	i = HCS_N_PORTS (ehci->hcs_params); | 
|  | 621 |  | 
|  | 622 | /* resume root hub? */ | 
|  | 623 | status = readl (&ehci->regs->command); | 
|  | 624 | if (!(status & CMD_RUN)) | 
|  | 625 | writel (status | CMD_RUN, &ehci->regs->command); | 
|  | 626 |  | 
|  | 627 | while (i--) { | 
| David Brownell | b972b68 | 2006-06-30 02:34:42 -0700 | [diff] [blame] | 628 | int pstatus = readl (&ehci->regs->port_status [i]); | 
|  | 629 |  | 
|  | 630 | if (pstatus & PORT_OWNER) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | continue; | 
| David Brownell | b972b68 | 2006-06-30 02:34:42 -0700 | [diff] [blame] | 632 | if (!(pstatus & PORT_RESUME) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | || ehci->reset_done [i] != 0) | 
|  | 634 | continue; | 
|  | 635 |  | 
|  | 636 | /* start 20 msec resume signaling from this port, | 
|  | 637 | * and make khubd collect PORT_STAT_C_SUSPEND to | 
|  | 638 | * stop that signaling. | 
|  | 639 | */ | 
|  | 640 | ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); | 
| David Brownell | f03c17f | 2005-11-23 15:45:28 -0800 | [diff] [blame] | 642 | usb_hcd_resume_root_hub(hcd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | } | 
|  | 644 | } | 
|  | 645 |  | 
|  | 646 | /* PCI errors [4.15.2.4] */ | 
|  | 647 | if (unlikely ((status & STS_FATAL) != 0)) { | 
|  | 648 | /* bogus "fatal" IRQs appear on some chips... why?  */ | 
|  | 649 | status = readl (&ehci->regs->status); | 
|  | 650 | dbg_cmd (ehci, "fatal", readl (&ehci->regs->command)); | 
|  | 651 | dbg_status (ehci, "fatal", status); | 
|  | 652 | if (status & STS_HALT) { | 
|  | 653 | ehci_err (ehci, "fatal error\n"); | 
|  | 654 | dead: | 
|  | 655 | ehci_reset (ehci); | 
|  | 656 | writel (0, &ehci->regs->configured_flag); | 
|  | 657 | /* generic layer kills/unlinks all urbs, then | 
|  | 658 | * uses ehci_stop to clean up the rest | 
|  | 659 | */ | 
|  | 660 | bh = 1; | 
|  | 661 | } | 
|  | 662 | } | 
|  | 663 |  | 
|  | 664 | if (bh) | 
|  | 665 | ehci_work (ehci, regs); | 
|  | 666 | spin_unlock (&ehci->lock); | 
|  | 667 | return IRQ_HANDLED; | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | /*-------------------------------------------------------------------------*/ | 
|  | 671 |  | 
|  | 672 | /* | 
|  | 673 | * non-error returns are a promise to giveback() the urb later | 
|  | 674 | * we drop ownership so next owner (or urb unlink) can get it | 
|  | 675 | * | 
|  | 676 | * urb + dev is in hcd.self.controller.urb_list | 
|  | 677 | * we're queueing TDs onto software and hardware lists | 
|  | 678 | * | 
|  | 679 | * hcd-specific init for hcpriv hasn't been done yet | 
|  | 680 | * | 
|  | 681 | * NOTE:  control, bulk, and interrupt share the same code to append TDs | 
|  | 682 | * to a (possibly active) QH, and the same QH scanning code. | 
|  | 683 | */ | 
|  | 684 | static int ehci_urb_enqueue ( | 
|  | 685 | struct usb_hcd	*hcd, | 
|  | 686 | struct usb_host_endpoint *ep, | 
|  | 687 | struct urb	*urb, | 
| Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 688 | gfp_t		mem_flags | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | ) { | 
|  | 690 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 691 | struct list_head	qtd_list; | 
|  | 692 |  | 
|  | 693 | INIT_LIST_HEAD (&qtd_list); | 
|  | 694 |  | 
|  | 695 | switch (usb_pipetype (urb->pipe)) { | 
|  | 696 | // case PIPE_CONTROL: | 
|  | 697 | // case PIPE_BULK: | 
|  | 698 | default: | 
|  | 699 | if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) | 
|  | 700 | return -ENOMEM; | 
|  | 701 | return submit_async (ehci, ep, urb, &qtd_list, mem_flags); | 
|  | 702 |  | 
|  | 703 | case PIPE_INTERRUPT: | 
|  | 704 | if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) | 
|  | 705 | return -ENOMEM; | 
|  | 706 | return intr_submit (ehci, ep, urb, &qtd_list, mem_flags); | 
|  | 707 |  | 
|  | 708 | case PIPE_ISOCHRONOUS: | 
|  | 709 | if (urb->dev->speed == USB_SPEED_HIGH) | 
|  | 710 | return itd_submit (ehci, urb, mem_flags); | 
|  | 711 | else | 
|  | 712 | return sitd_submit (ehci, urb, mem_flags); | 
|  | 713 | } | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | 
|  | 717 | { | 
|  | 718 | /* if we need to use IAA and it's busy, defer */ | 
|  | 719 | if (qh->qh_state == QH_STATE_LINKED | 
|  | 720 | && ehci->reclaim | 
|  | 721 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) { | 
|  | 722 | struct ehci_qh		*last; | 
|  | 723 |  | 
|  | 724 | for (last = ehci->reclaim; | 
|  | 725 | last->reclaim; | 
|  | 726 | last = last->reclaim) | 
|  | 727 | continue; | 
|  | 728 | qh->qh_state = QH_STATE_UNLINK_WAIT; | 
|  | 729 | last->reclaim = qh; | 
|  | 730 |  | 
|  | 731 | /* bypass IAA if the hc can't care */ | 
|  | 732 | } else if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->reclaim) | 
|  | 733 | end_unlink_async (ehci, NULL); | 
|  | 734 |  | 
|  | 735 | /* something else might have unlinked the qh by now */ | 
|  | 736 | if (qh->qh_state == QH_STATE_LINKED) | 
|  | 737 | start_unlink_async (ehci, qh); | 
|  | 738 | } | 
|  | 739 |  | 
|  | 740 | /* remove from hardware lists | 
|  | 741 | * completions normally happen asynchronously | 
|  | 742 | */ | 
|  | 743 |  | 
|  | 744 | static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) | 
|  | 745 | { | 
|  | 746 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 747 | struct ehci_qh		*qh; | 
|  | 748 | unsigned long		flags; | 
|  | 749 |  | 
|  | 750 | spin_lock_irqsave (&ehci->lock, flags); | 
|  | 751 | switch (usb_pipetype (urb->pipe)) { | 
|  | 752 | // case PIPE_CONTROL: | 
|  | 753 | // case PIPE_BULK: | 
|  | 754 | default: | 
|  | 755 | qh = (struct ehci_qh *) urb->hcpriv; | 
|  | 756 | if (!qh) | 
|  | 757 | break; | 
|  | 758 | unlink_async (ehci, qh); | 
|  | 759 | break; | 
|  | 760 |  | 
|  | 761 | case PIPE_INTERRUPT: | 
|  | 762 | qh = (struct ehci_qh *) urb->hcpriv; | 
|  | 763 | if (!qh) | 
|  | 764 | break; | 
|  | 765 | switch (qh->qh_state) { | 
|  | 766 | case QH_STATE_LINKED: | 
|  | 767 | intr_deschedule (ehci, qh); | 
|  | 768 | /* FALL THROUGH */ | 
|  | 769 | case QH_STATE_IDLE: | 
|  | 770 | qh_completions (ehci, qh, NULL); | 
|  | 771 | break; | 
|  | 772 | default: | 
|  | 773 | ehci_dbg (ehci, "bogus qh %p state %d\n", | 
|  | 774 | qh, qh->qh_state); | 
|  | 775 | goto done; | 
|  | 776 | } | 
|  | 777 |  | 
|  | 778 | /* reschedule QH iff another request is queued */ | 
|  | 779 | if (!list_empty (&qh->qtd_list) | 
|  | 780 | && HC_IS_RUNNING (hcd->state)) { | 
|  | 781 | int status; | 
|  | 782 |  | 
|  | 783 | status = qh_schedule (ehci, qh); | 
|  | 784 | spin_unlock_irqrestore (&ehci->lock, flags); | 
|  | 785 |  | 
|  | 786 | if (status != 0) { | 
|  | 787 | // shouldn't happen often, but ... | 
|  | 788 | // FIXME kill those tds' urbs | 
|  | 789 | err ("can't reschedule qh %p, err %d", | 
|  | 790 | qh, status); | 
|  | 791 | } | 
|  | 792 | return status; | 
|  | 793 | } | 
|  | 794 | break; | 
|  | 795 |  | 
|  | 796 | case PIPE_ISOCHRONOUS: | 
|  | 797 | // itd or sitd ... | 
|  | 798 |  | 
|  | 799 | // wait till next completion, do it then. | 
|  | 800 | // completion irqs can wait up to 1024 msec, | 
|  | 801 | break; | 
|  | 802 | } | 
|  | 803 | done: | 
|  | 804 | spin_unlock_irqrestore (&ehci->lock, flags); | 
|  | 805 | return 0; | 
|  | 806 | } | 
|  | 807 |  | 
|  | 808 | /*-------------------------------------------------------------------------*/ | 
|  | 809 |  | 
|  | 810 | // bulk qh holds the data toggle | 
|  | 811 |  | 
|  | 812 | static void | 
|  | 813 | ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) | 
|  | 814 | { | 
|  | 815 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 816 | unsigned long		flags; | 
|  | 817 | struct ehci_qh		*qh, *tmp; | 
|  | 818 |  | 
|  | 819 | /* ASSERT:  any requests/urbs are being unlinked */ | 
|  | 820 | /* ASSERT:  nobody can be submitting urbs for this any more */ | 
|  | 821 |  | 
|  | 822 | rescan: | 
|  | 823 | spin_lock_irqsave (&ehci->lock, flags); | 
|  | 824 | qh = ep->hcpriv; | 
|  | 825 | if (!qh) | 
|  | 826 | goto done; | 
|  | 827 |  | 
|  | 828 | /* endpoints can be iso streams.  for now, we don't | 
|  | 829 | * accelerate iso completions ... so spin a while. | 
|  | 830 | */ | 
|  | 831 | if (qh->hw_info1 == 0) { | 
|  | 832 | ehci_vdbg (ehci, "iso delay\n"); | 
|  | 833 | goto idle_timeout; | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | if (!HC_IS_RUNNING (hcd->state)) | 
|  | 837 | qh->qh_state = QH_STATE_IDLE; | 
|  | 838 | switch (qh->qh_state) { | 
|  | 839 | case QH_STATE_LINKED: | 
|  | 840 | for (tmp = ehci->async->qh_next.qh; | 
|  | 841 | tmp && tmp != qh; | 
|  | 842 | tmp = tmp->qh_next.qh) | 
|  | 843 | continue; | 
|  | 844 | /* periodic qh self-unlinks on empty */ | 
|  | 845 | if (!tmp) | 
|  | 846 | goto nogood; | 
|  | 847 | unlink_async (ehci, qh); | 
|  | 848 | /* FALL THROUGH */ | 
|  | 849 | case QH_STATE_UNLINK:		/* wait for hw to finish? */ | 
|  | 850 | idle_timeout: | 
|  | 851 | spin_unlock_irqrestore (&ehci->lock, flags); | 
| Nishanth Aravamudan | 22c4386 | 2005-08-15 11:30:11 -0700 | [diff] [blame] | 852 | schedule_timeout_uninterruptible(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | goto rescan; | 
|  | 854 | case QH_STATE_IDLE:		/* fully unlinked */ | 
|  | 855 | if (list_empty (&qh->qtd_list)) { | 
|  | 856 | qh_put (qh); | 
|  | 857 | break; | 
|  | 858 | } | 
|  | 859 | /* else FALL THROUGH */ | 
|  | 860 | default: | 
|  | 861 | nogood: | 
|  | 862 | /* caller was supposed to have unlinked any requests; | 
|  | 863 | * that's not our job.  just leak this memory. | 
|  | 864 | */ | 
|  | 865 | ehci_err (ehci, "qh %p (#%02x) state %d%s\n", | 
|  | 866 | qh, ep->desc.bEndpointAddress, qh->qh_state, | 
|  | 867 | list_empty (&qh->qtd_list) ? "" : "(has tds)"); | 
|  | 868 | break; | 
|  | 869 | } | 
|  | 870 | ep->hcpriv = NULL; | 
|  | 871 | done: | 
|  | 872 | spin_unlock_irqrestore (&ehci->lock, flags); | 
|  | 873 | return; | 
|  | 874 | } | 
|  | 875 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 876 | static int ehci_get_frame (struct usb_hcd *hcd) | 
|  | 877 | { | 
|  | 878 | struct ehci_hcd		*ehci = hcd_to_ehci (hcd); | 
|  | 879 | return (readl (&ehci->regs->frame_index) >> 3) % ehci->periodic_size; | 
|  | 880 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 |  | 
|  | 882 | /*-------------------------------------------------------------------------*/ | 
|  | 883 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | #define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC | 
|  | 885 |  | 
|  | 886 | MODULE_DESCRIPTION (DRIVER_INFO); | 
|  | 887 | MODULE_AUTHOR (DRIVER_AUTHOR); | 
|  | 888 | MODULE_LICENSE ("GPL"); | 
|  | 889 |  | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 890 | #ifdef CONFIG_PCI | 
|  | 891 | #include "ehci-pci.c" | 
| Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 892 | #define	PCI_DRIVER		ehci_pci_driver | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 893 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 |  | 
| Li Yang | a11570f | 2006-07-14 19:58:14 +0800 | [diff] [blame] | 895 | #ifdef CONFIG_MPC834x | 
| Randy Vinson | 80cb9ae | 2006-01-20 13:53:38 -0800 | [diff] [blame] | 896 | #include "ehci-fsl.c" | 
| Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 897 | #define	PLATFORM_DRIVER		ehci_fsl_driver | 
| Randy Vinson | 80cb9ae | 2006-01-20 13:53:38 -0800 | [diff] [blame] | 898 | #endif | 
|  | 899 |  | 
| Ralf Baechle | dfbaa7d | 2006-06-03 23:58:55 +0100 | [diff] [blame] | 900 | #ifdef CONFIG_SOC_AU1200 | 
| Jordan Crouse | 76fa9a2 | 2006-01-20 14:06:09 -0800 | [diff] [blame] | 901 | #include "ehci-au1xxx.c" | 
| Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 902 | #define	PLATFORM_DRIVER		ehci_hcd_au1xxx_driver | 
| Jordan Crouse | 76fa9a2 | 2006-01-20 14:06:09 -0800 | [diff] [blame] | 903 | #endif | 
|  | 904 |  | 
| Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 905 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) | 
| Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 906 | #error "missing bus glue for ehci-hcd" | 
|  | 907 | #endif | 
| Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 908 |  | 
|  | 909 | static int __init ehci_hcd_init(void) | 
|  | 910 | { | 
|  | 911 | int retval = 0; | 
|  | 912 |  | 
|  | 913 | pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", | 
|  | 914 | hcd_name, | 
|  | 915 | sizeof(struct ehci_qh), sizeof(struct ehci_qtd), | 
|  | 916 | sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); | 
|  | 917 |  | 
|  | 918 | #ifdef PLATFORM_DRIVER | 
|  | 919 | retval = platform_driver_register(&PLATFORM_DRIVER); | 
|  | 920 | if (retval < 0) | 
|  | 921 | return retval; | 
|  | 922 | #endif | 
|  | 923 |  | 
|  | 924 | #ifdef PCI_DRIVER | 
|  | 925 | retval = pci_register_driver(&PCI_DRIVER); | 
|  | 926 | if (retval < 0) { | 
|  | 927 | #ifdef PLATFORM_DRIVER | 
|  | 928 | platform_driver_unregister(&PLATFORM_DRIVER); | 
|  | 929 | #endif | 
|  | 930 | } | 
|  | 931 | #endif | 
|  | 932 |  | 
|  | 933 | return retval; | 
|  | 934 | } | 
|  | 935 | module_init(ehci_hcd_init); | 
|  | 936 |  | 
|  | 937 | static void __exit ehci_hcd_cleanup(void) | 
|  | 938 | { | 
|  | 939 | #ifdef PLATFORM_DRIVER | 
|  | 940 | platform_driver_unregister(&PLATFORM_DRIVER); | 
|  | 941 | #endif | 
|  | 942 | #ifdef PCI_DRIVER | 
|  | 943 | pci_unregister_driver(&PCI_DRIVER); | 
|  | 944 | #endif | 
|  | 945 | } | 
|  | 946 | module_exit(ehci_hcd_cleanup); | 
|  | 947 |  |