| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1 | /* | 
 | 2 |  * MUSB OTG driver host support | 
 | 3 |  * | 
 | 4 |  * Copyright 2005 Mentor Graphics Corporation | 
 | 5 |  * Copyright (C) 2005-2006 by Texas Instruments | 
 | 6 |  * Copyright (C) 2006-2007 Nokia Corporation | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or | 
 | 9 |  * modify it under the terms of the GNU General Public License | 
 | 10 |  * version 2 as published by the Free Software Foundation. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, but | 
 | 13 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 14 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 15 |  * General Public License for more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License | 
 | 18 |  * along with this program; if not, write to the Free Software | 
 | 19 |  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | 
 | 20 |  * 02110-1301 USA | 
 | 21 |  * | 
 | 22 |  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | 
 | 23 |  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | 
 | 24 |  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN | 
 | 25 |  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | 
 | 26 |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 
 | 27 |  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | 
 | 28 |  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | 
 | 29 |  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
 | 30 |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 
 | 31 |  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
 | 32 |  * | 
 | 33 |  */ | 
 | 34 |  | 
 | 35 | #include <linux/module.h> | 
 | 36 | #include <linux/kernel.h> | 
 | 37 | #include <linux/delay.h> | 
 | 38 | #include <linux/sched.h> | 
 | 39 | #include <linux/slab.h> | 
 | 40 | #include <linux/errno.h> | 
 | 41 | #include <linux/init.h> | 
 | 42 | #include <linux/list.h> | 
 | 43 |  | 
 | 44 | #include "musb_core.h" | 
 | 45 | #include "musb_host.h" | 
 | 46 |  | 
 | 47 |  | 
 | 48 | /* MUSB HOST status 22-mar-2006 | 
 | 49 |  * | 
 | 50 |  * - There's still lots of partial code duplication for fault paths, so | 
 | 51 |  *   they aren't handled as consistently as they need to be. | 
 | 52 |  * | 
 | 53 |  * - PIO mostly behaved when last tested. | 
 | 54 |  *     + including ep0, with all usbtest cases 9, 10 | 
 | 55 |  *     + usbtest 14 (ep0out) doesn't seem to run at all | 
 | 56 |  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest | 
 | 57 |  *       configurations, but otherwise double buffering passes basic tests. | 
 | 58 |  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework. | 
 | 59 |  * | 
 | 60 |  * - DMA (CPPI) ... partially behaves, not currently recommended | 
 | 61 |  *     + about 1/15 the speed of typical EHCI implementations (PCI) | 
 | 62 |  *     + RX, all too often reqpkt seems to misbehave after tx | 
 | 63 |  *     + TX, no known issues (other than evident silicon issue) | 
 | 64 |  * | 
 | 65 |  * - DMA (Mentor/OMAP) ...has at least toggle update problems | 
 | 66 |  * | 
 | 67 |  * - Still no traffic scheduling code to make NAKing for bulk or control | 
 | 68 |  *   transfers unable to starve other requests; or to make efficient use | 
 | 69 |  *   of hardware with periodic transfers.  (Note that network drivers | 
 | 70 |  *   commonly post bulk reads that stay pending for a long time; these | 
 | 71 |  *   would make very visible trouble.) | 
 | 72 |  * | 
 | 73 |  * - Not tested with HNP, but some SRP paths seem to behave. | 
 | 74 |  * | 
 | 75 |  * NOTE 24-August-2006: | 
 | 76 |  * | 
 | 77 |  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an | 
 | 78 |  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That | 
 | 79 |  *   mostly works, except that with "usbnet" it's easy to trigger cases | 
 | 80 |  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f", | 
 | 81 |  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses | 
 | 82 |  *   although ARP RX wins.  (That test was done with a full speed link.) | 
 | 83 |  */ | 
 | 84 |  | 
 | 85 |  | 
 | 86 | /* | 
 | 87 |  * NOTE on endpoint usage: | 
 | 88 |  * | 
 | 89 |  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN | 
 | 90 |  * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | 
 | 91 |  * | 
 | 92 |  * (Yes, bulk _could_ use more of the endpoints than that, and would even | 
 | 93 |  * benefit from it ... one remote device may easily be NAKing while others | 
 | 94 |  * need to perform transfers in that same direction.  The same thing could | 
 | 95 |  * be done in software though, assuming dma cooperates.) | 
 | 96 |  * | 
 | 97 |  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | 
 | 98 |  * So far that scheduling is both dumb and optimistic:  the endpoint will be | 
 | 99 |  * "claimed" until its software queue is no longer refilled.  No multiplexing | 
 | 100 |  * of transfers between endpoints, or anything clever. | 
 | 101 |  */ | 
 | 102 |  | 
 | 103 |  | 
 | 104 | static void musb_ep_program(struct musb *musb, u8 epnum, | 
 | 105 | 			struct urb *urb, unsigned int nOut, | 
 | 106 | 			u8 *buf, u32 len); | 
 | 107 |  | 
 | 108 | /* | 
 | 109 |  * Clear TX fifo. Needed to avoid BABBLE errors. | 
 | 110 |  */ | 
| David Brownell | c767c1c | 2008-09-11 11:53:23 +0300 | [diff] [blame] | 111 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 112 | { | 
 | 113 | 	void __iomem	*epio = ep->regs; | 
 | 114 | 	u16		csr; | 
 | 115 | 	int		retries = 1000; | 
 | 116 |  | 
 | 117 | 	csr = musb_readw(epio, MUSB_TXCSR); | 
 | 118 | 	while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 
 | 119 | 		DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | 
 | 120 | 		csr |= MUSB_TXCSR_FLUSHFIFO; | 
 | 121 | 		musb_writew(epio, MUSB_TXCSR, csr); | 
 | 122 | 		csr = musb_readw(epio, MUSB_TXCSR); | 
 | 123 | 		if (retries-- < 1) { | 
 | 124 | 			ERR("Could not flush host TX fifo: csr: %04x\n", csr); | 
 | 125 | 			return; | 
 | 126 | 		} | 
 | 127 | 		mdelay(1); | 
 | 128 | 	} | 
 | 129 | } | 
 | 130 |  | 
 | 131 | /* | 
 | 132 |  * Start transmit. Caller is responsible for locking shared resources. | 
 | 133 |  * musb must be locked. | 
 | 134 |  */ | 
 | 135 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) | 
 | 136 | { | 
 | 137 | 	u16	txcsr; | 
 | 138 |  | 
 | 139 | 	/* NOTE: no locks here; caller should lock and select EP */ | 
 | 140 | 	if (ep->epnum) { | 
 | 141 | 		txcsr = musb_readw(ep->regs, MUSB_TXCSR); | 
 | 142 | 		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; | 
 | 143 | 		musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 
 | 144 | 	} else { | 
 | 145 | 		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; | 
 | 146 | 		musb_writew(ep->regs, MUSB_CSR0, txcsr); | 
 | 147 | 	} | 
 | 148 |  | 
 | 149 | } | 
 | 150 |  | 
 | 151 | static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) | 
 | 152 | { | 
 | 153 | 	u16	txcsr; | 
 | 154 |  | 
 | 155 | 	/* NOTE: no locks here; caller should lock and select EP */ | 
 | 156 | 	txcsr = musb_readw(ep->regs, MUSB_TXCSR); | 
 | 157 | 	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | 
 | 158 | 	musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 
 | 159 | } | 
 | 160 |  | 
 | 161 | /* | 
 | 162 |  * Start the URB at the front of an endpoint's queue | 
 | 163 |  * end must be claimed from the caller. | 
 | 164 |  * | 
 | 165 |  * Context: controller locked, irqs blocked | 
 | 166 |  */ | 
 | 167 | static void | 
 | 168 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | 
 | 169 | { | 
 | 170 | 	u16			frame; | 
 | 171 | 	u32			len; | 
 | 172 | 	void			*buf; | 
 | 173 | 	void __iomem		*mbase =  musb->mregs; | 
 | 174 | 	struct urb		*urb = next_urb(qh); | 
 | 175 | 	struct musb_hw_ep	*hw_ep = qh->hw_ep; | 
 | 176 | 	unsigned		pipe = urb->pipe; | 
 | 177 | 	u8			address = usb_pipedevice(pipe); | 
 | 178 | 	int			epnum = hw_ep->epnum; | 
 | 179 |  | 
 | 180 | 	/* initialize software qh state */ | 
 | 181 | 	qh->offset = 0; | 
 | 182 | 	qh->segsize = 0; | 
 | 183 |  | 
 | 184 | 	/* gather right source of data */ | 
 | 185 | 	switch (qh->type) { | 
 | 186 | 	case USB_ENDPOINT_XFER_CONTROL: | 
 | 187 | 		/* control transfers always start with SETUP */ | 
 | 188 | 		is_in = 0; | 
 | 189 | 		hw_ep->out_qh = qh; | 
 | 190 | 		musb->ep0_stage = MUSB_EP0_START; | 
 | 191 | 		buf = urb->setup_packet; | 
 | 192 | 		len = 8; | 
 | 193 | 		break; | 
 | 194 | 	case USB_ENDPOINT_XFER_ISOC: | 
 | 195 | 		qh->iso_idx = 0; | 
 | 196 | 		qh->frame = 0; | 
 | 197 | 		buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; | 
 | 198 | 		len = urb->iso_frame_desc[0].length; | 
 | 199 | 		break; | 
 | 200 | 	default:		/* bulk, interrupt */ | 
 | 201 | 		buf = urb->transfer_buffer; | 
 | 202 | 		len = urb->transfer_buffer_length; | 
 | 203 | 	} | 
 | 204 |  | 
 | 205 | 	DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | 
 | 206 | 			qh, urb, address, qh->epnum, | 
 | 207 | 			is_in ? "in" : "out", | 
 | 208 | 			({char *s; switch (qh->type) { | 
 | 209 | 			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break; | 
 | 210 | 			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break; | 
 | 211 | 			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break; | 
 | 212 | 			default:			s = "-intr"; break; | 
 | 213 | 			}; s; }), | 
 | 214 | 			epnum, buf, len); | 
 | 215 |  | 
 | 216 | 	/* Configure endpoint */ | 
 | 217 | 	if (is_in || hw_ep->is_shared_fifo) | 
 | 218 | 		hw_ep->in_qh = qh; | 
 | 219 | 	else | 
 | 220 | 		hw_ep->out_qh = qh; | 
 | 221 | 	musb_ep_program(musb, epnum, urb, !is_in, buf, len); | 
 | 222 |  | 
 | 223 | 	/* transmit may have more work: start it when it is time */ | 
 | 224 | 	if (is_in) | 
 | 225 | 		return; | 
 | 226 |  | 
 | 227 | 	/* determine if the time is right for a periodic transfer */ | 
 | 228 | 	switch (qh->type) { | 
 | 229 | 	case USB_ENDPOINT_XFER_ISOC: | 
 | 230 | 	case USB_ENDPOINT_XFER_INT: | 
 | 231 | 		DBG(3, "check whether there's still time for periodic Tx\n"); | 
 | 232 | 		qh->iso_idx = 0; | 
 | 233 | 		frame = musb_readw(mbase, MUSB_FRAME); | 
 | 234 | 		/* FIXME this doesn't implement that scheduling policy ... | 
 | 235 | 		 * or handle framecounter wrapping | 
 | 236 | 		 */ | 
 | 237 | 		if ((urb->transfer_flags & URB_ISO_ASAP) | 
 | 238 | 				|| (frame >= urb->start_frame)) { | 
 | 239 | 			/* REVISIT the SOF irq handler shouldn't duplicate | 
 | 240 | 			 * this code; and we don't init urb->start_frame... | 
 | 241 | 			 */ | 
 | 242 | 			qh->frame = 0; | 
 | 243 | 			goto start; | 
 | 244 | 		} else { | 
 | 245 | 			qh->frame = urb->start_frame; | 
 | 246 | 			/* enable SOF interrupt so we can count down */ | 
 | 247 | 			DBG(1, "SOF for %d\n", epnum); | 
 | 248 | #if 1 /* ifndef	CONFIG_ARCH_DAVINCI */ | 
 | 249 | 			musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | 
 | 250 | #endif | 
 | 251 | 		} | 
 | 252 | 		break; | 
 | 253 | 	default: | 
 | 254 | start: | 
 | 255 | 		DBG(4, "Start TX%d %s\n", epnum, | 
 | 256 | 			hw_ep->tx_channel ? "dma" : "pio"); | 
 | 257 |  | 
 | 258 | 		if (!hw_ep->tx_channel) | 
 | 259 | 			musb_h_tx_start(hw_ep); | 
 | 260 | 		else if (is_cppi_enabled() || tusb_dma_omap()) | 
 | 261 | 			cppi_host_txdma_start(hw_ep); | 
 | 262 | 	} | 
 | 263 | } | 
 | 264 |  | 
 | 265 | /* caller owns controller lock, irqs are blocked */ | 
 | 266 | static void | 
 | 267 | __musb_giveback(struct musb *musb, struct urb *urb, int status) | 
 | 268 | __releases(musb->lock) | 
 | 269 | __acquires(musb->lock) | 
 | 270 | { | 
 | 271 | 	DBG(({ int level; switch (urb->status) { | 
 | 272 | 				case 0: | 
 | 273 | 					level = 4; | 
 | 274 | 					break; | 
 | 275 | 				/* common/boring faults */ | 
 | 276 | 				case -EREMOTEIO: | 
 | 277 | 				case -ESHUTDOWN: | 
 | 278 | 				case -ECONNRESET: | 
 | 279 | 				case -EPIPE: | 
 | 280 | 					level = 3; | 
 | 281 | 					break; | 
 | 282 | 				default: | 
 | 283 | 					level = 2; | 
 | 284 | 					break; | 
 | 285 | 				}; level; }), | 
 | 286 | 			"complete %p (%d), dev%d ep%d%s, %d/%d\n", | 
 | 287 | 			urb, urb->status, | 
 | 288 | 			usb_pipedevice(urb->pipe), | 
 | 289 | 			usb_pipeendpoint(urb->pipe), | 
 | 290 | 			usb_pipein(urb->pipe) ? "in" : "out", | 
 | 291 | 			urb->actual_length, urb->transfer_buffer_length | 
 | 292 | 			); | 
 | 293 |  | 
| Ajay Kumar Gupta | 2492e67 | 2008-09-11 11:53:21 +0300 | [diff] [blame] | 294 | 	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 295 | 	spin_unlock(&musb->lock); | 
 | 296 | 	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); | 
 | 297 | 	spin_lock(&musb->lock); | 
 | 298 | } | 
 | 299 |  | 
 | 300 | /* for bulk/interrupt endpoints only */ | 
 | 301 | static inline void | 
 | 302 | musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) | 
 | 303 | { | 
 | 304 | 	struct usb_device	*udev = urb->dev; | 
 | 305 | 	u16			csr; | 
 | 306 | 	void __iomem		*epio = ep->regs; | 
 | 307 | 	struct musb_qh		*qh; | 
 | 308 |  | 
 | 309 | 	/* FIXME:  the current Mentor DMA code seems to have | 
 | 310 | 	 * problems getting toggle correct. | 
 | 311 | 	 */ | 
 | 312 |  | 
 | 313 | 	if (is_in || ep->is_shared_fifo) | 
 | 314 | 		qh = ep->in_qh; | 
 | 315 | 	else | 
 | 316 | 		qh = ep->out_qh; | 
 | 317 |  | 
 | 318 | 	if (!is_in) { | 
 | 319 | 		csr = musb_readw(epio, MUSB_TXCSR); | 
 | 320 | 		usb_settoggle(udev, qh->epnum, 1, | 
 | 321 | 			(csr & MUSB_TXCSR_H_DATATOGGLE) | 
 | 322 | 				? 1 : 0); | 
 | 323 | 	} else { | 
 | 324 | 		csr = musb_readw(epio, MUSB_RXCSR); | 
 | 325 | 		usb_settoggle(udev, qh->epnum, 0, | 
 | 326 | 			(csr & MUSB_RXCSR_H_DATATOGGLE) | 
 | 327 | 				? 1 : 0); | 
 | 328 | 	} | 
 | 329 | } | 
 | 330 |  | 
 | 331 | /* caller owns controller lock, irqs are blocked */ | 
 | 332 | static struct musb_qh * | 
 | 333 | musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | 
 | 334 | { | 
 | 335 | 	int			is_in; | 
 | 336 | 	struct musb_hw_ep	*ep = qh->hw_ep; | 
 | 337 | 	struct musb		*musb = ep->musb; | 
 | 338 | 	int			ready = qh->is_ready; | 
 | 339 |  | 
 | 340 | 	if (ep->is_shared_fifo) | 
 | 341 | 		is_in = 1; | 
 | 342 | 	else | 
 | 343 | 		is_in = usb_pipein(urb->pipe); | 
 | 344 |  | 
 | 345 | 	/* save toggle eagerly, for paranoia */ | 
 | 346 | 	switch (qh->type) { | 
 | 347 | 	case USB_ENDPOINT_XFER_BULK: | 
 | 348 | 	case USB_ENDPOINT_XFER_INT: | 
 | 349 | 		musb_save_toggle(ep, is_in, urb); | 
 | 350 | 		break; | 
 | 351 | 	case USB_ENDPOINT_XFER_ISOC: | 
 | 352 | 		if (status == 0 && urb->error_count) | 
 | 353 | 			status = -EXDEV; | 
 | 354 | 		break; | 
 | 355 | 	} | 
 | 356 |  | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 357 | 	qh->is_ready = 0; | 
 | 358 | 	__musb_giveback(musb, urb, status); | 
 | 359 | 	qh->is_ready = ready; | 
 | 360 |  | 
 | 361 | 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and | 
 | 362 | 	 * invalidate qh as soon as list_empty(&hep->urb_list) | 
 | 363 | 	 */ | 
 | 364 | 	if (list_empty(&qh->hep->urb_list)) { | 
 | 365 | 		struct list_head	*head; | 
 | 366 |  | 
 | 367 | 		if (is_in) | 
 | 368 | 			ep->rx_reinit = 1; | 
 | 369 | 		else | 
 | 370 | 			ep->tx_reinit = 1; | 
 | 371 |  | 
 | 372 | 		/* clobber old pointers to this qh */ | 
 | 373 | 		if (is_in || ep->is_shared_fifo) | 
 | 374 | 			ep->in_qh = NULL; | 
 | 375 | 		else | 
 | 376 | 			ep->out_qh = NULL; | 
 | 377 | 		qh->hep->hcpriv = NULL; | 
 | 378 |  | 
 | 379 | 		switch (qh->type) { | 
 | 380 |  | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 381 | 		case USB_ENDPOINT_XFER_CONTROL: | 
 | 382 | 		case USB_ENDPOINT_XFER_BULK: | 
 | 383 | 			/* fifo policy for these lists, except that NAKing | 
 | 384 | 			 * should rotate a qh to the end (for fairness). | 
 | 385 | 			 */ | 
 | 386 | 			if (qh->mux == 1) { | 
 | 387 | 				head = qh->ring.prev; | 
 | 388 | 				list_del(&qh->ring); | 
 | 389 | 				kfree(qh); | 
 | 390 | 				qh = first_qh(head); | 
 | 391 | 				break; | 
 | 392 | 			} | 
 | 393 |  | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 394 | 		case USB_ENDPOINT_XFER_ISOC: | 
 | 395 | 		case USB_ENDPOINT_XFER_INT: | 
 | 396 | 			/* this is where periodic bandwidth should be | 
 | 397 | 			 * de-allocated if it's tracked and allocated; | 
 | 398 | 			 * and where we'd update the schedule tree... | 
 | 399 | 			 */ | 
 | 400 | 			musb->periodic[ep->epnum] = NULL; | 
 | 401 | 			kfree(qh); | 
 | 402 | 			qh = NULL; | 
 | 403 | 			break; | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 404 | 		} | 
 | 405 | 	} | 
 | 406 | 	return qh; | 
 | 407 | } | 
 | 408 |  | 
 | 409 | /* | 
 | 410 |  * Advance this hardware endpoint's queue, completing the specified urb and | 
 | 411 |  * advancing to either the next urb queued to that qh, or else invalidating | 
 | 412 |  * that qh and advancing to the next qh scheduled after the current one. | 
 | 413 |  * | 
 | 414 |  * Context: caller owns controller lock, irqs are blocked | 
 | 415 |  */ | 
 | 416 | static void | 
 | 417 | musb_advance_schedule(struct musb *musb, struct urb *urb, | 
 | 418 | 		struct musb_hw_ep *hw_ep, int is_in) | 
 | 419 | { | 
 | 420 | 	struct musb_qh	*qh; | 
 | 421 |  | 
 | 422 | 	if (is_in || hw_ep->is_shared_fifo) | 
 | 423 | 		qh = hw_ep->in_qh; | 
 | 424 | 	else | 
 | 425 | 		qh = hw_ep->out_qh; | 
 | 426 |  | 
 | 427 | 	if (urb->status == -EINPROGRESS) | 
 | 428 | 		qh = musb_giveback(qh, urb, 0); | 
 | 429 | 	else | 
 | 430 | 		qh = musb_giveback(qh, urb, urb->status); | 
 | 431 |  | 
 | 432 | 	if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { | 
 | 433 | 		DBG(4, "... next ep%d %cX urb %p\n", | 
 | 434 | 				hw_ep->epnum, is_in ? 'R' : 'T', | 
 | 435 | 				next_urb(qh)); | 
 | 436 | 		musb_start_urb(musb, is_in, qh); | 
 | 437 | 	} | 
 | 438 | } | 
 | 439 |  | 
| David Brownell | c767c1c | 2008-09-11 11:53:23 +0300 | [diff] [blame] | 440 | static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 441 | { | 
 | 442 | 	/* we don't want fifo to fill itself again; | 
 | 443 | 	 * ignore dma (various models), | 
 | 444 | 	 * leave toggle alone (may not have been saved yet) | 
 | 445 | 	 */ | 
 | 446 | 	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; | 
 | 447 | 	csr &= ~(MUSB_RXCSR_H_REQPKT | 
 | 448 | 		| MUSB_RXCSR_H_AUTOREQ | 
 | 449 | 		| MUSB_RXCSR_AUTOCLEAR); | 
 | 450 |  | 
 | 451 | 	/* write 2x to allow double buffering */ | 
 | 452 | 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 
 | 453 | 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 
 | 454 |  | 
 | 455 | 	/* flush writebuffer */ | 
 | 456 | 	return musb_readw(hw_ep->regs, MUSB_RXCSR); | 
 | 457 | } | 
 | 458 |  | 
 | 459 | /* | 
 | 460 |  * PIO RX for a packet (or part of it). | 
 | 461 |  */ | 
 | 462 | static bool | 
 | 463 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | 
 | 464 | { | 
 | 465 | 	u16			rx_count; | 
 | 466 | 	u8			*buf; | 
 | 467 | 	u16			csr; | 
 | 468 | 	bool			done = false; | 
 | 469 | 	u32			length; | 
 | 470 | 	int			do_flush = 0; | 
 | 471 | 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum; | 
 | 472 | 	void __iomem		*epio = hw_ep->regs; | 
 | 473 | 	struct musb_qh		*qh = hw_ep->in_qh; | 
 | 474 | 	int			pipe = urb->pipe; | 
 | 475 | 	void			*buffer = urb->transfer_buffer; | 
 | 476 |  | 
 | 477 | 	/* musb_ep_select(mbase, epnum); */ | 
 | 478 | 	rx_count = musb_readw(epio, MUSB_RXCOUNT); | 
 | 479 | 	DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | 
 | 480 | 			urb->transfer_buffer, qh->offset, | 
 | 481 | 			urb->transfer_buffer_length); | 
 | 482 |  | 
 | 483 | 	/* unload FIFO */ | 
 | 484 | 	if (usb_pipeisoc(pipe)) { | 
 | 485 | 		int					status = 0; | 
 | 486 | 		struct usb_iso_packet_descriptor	*d; | 
 | 487 |  | 
 | 488 | 		if (iso_err) { | 
 | 489 | 			status = -EILSEQ; | 
 | 490 | 			urb->error_count++; | 
 | 491 | 		} | 
 | 492 |  | 
 | 493 | 		d = urb->iso_frame_desc + qh->iso_idx; | 
 | 494 | 		buf = buffer + d->offset; | 
 | 495 | 		length = d->length; | 
 | 496 | 		if (rx_count > length) { | 
 | 497 | 			if (status == 0) { | 
 | 498 | 				status = -EOVERFLOW; | 
 | 499 | 				urb->error_count++; | 
 | 500 | 			} | 
 | 501 | 			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | 
 | 502 | 			do_flush = 1; | 
 | 503 | 		} else | 
 | 504 | 			length = rx_count; | 
 | 505 | 		urb->actual_length += length; | 
 | 506 | 		d->actual_length = length; | 
 | 507 |  | 
 | 508 | 		d->status = status; | 
 | 509 |  | 
 | 510 | 		/* see if we are done */ | 
 | 511 | 		done = (++qh->iso_idx >= urb->number_of_packets); | 
 | 512 | 	} else { | 
 | 513 | 		/* non-isoch */ | 
 | 514 | 		buf = buffer + qh->offset; | 
 | 515 | 		length = urb->transfer_buffer_length - qh->offset; | 
 | 516 | 		if (rx_count > length) { | 
 | 517 | 			if (urb->status == -EINPROGRESS) | 
 | 518 | 				urb->status = -EOVERFLOW; | 
 | 519 | 			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | 
 | 520 | 			do_flush = 1; | 
 | 521 | 		} else | 
 | 522 | 			length = rx_count; | 
 | 523 | 		urb->actual_length += length; | 
 | 524 | 		qh->offset += length; | 
 | 525 |  | 
 | 526 | 		/* see if we are done */ | 
 | 527 | 		done = (urb->actual_length == urb->transfer_buffer_length) | 
 | 528 | 			|| (rx_count < qh->maxpacket) | 
 | 529 | 			|| (urb->status != -EINPROGRESS); | 
 | 530 | 		if (done | 
 | 531 | 				&& (urb->status == -EINPROGRESS) | 
 | 532 | 				&& (urb->transfer_flags & URB_SHORT_NOT_OK) | 
 | 533 | 				&& (urb->actual_length | 
 | 534 | 					< urb->transfer_buffer_length)) | 
 | 535 | 			urb->status = -EREMOTEIO; | 
 | 536 | 	} | 
 | 537 |  | 
 | 538 | 	musb_read_fifo(hw_ep, length, buf); | 
 | 539 |  | 
 | 540 | 	csr = musb_readw(epio, MUSB_RXCSR); | 
 | 541 | 	csr |= MUSB_RXCSR_H_WZC_BITS; | 
 | 542 | 	if (unlikely(do_flush)) | 
 | 543 | 		musb_h_flush_rxfifo(hw_ep, csr); | 
 | 544 | 	else { | 
 | 545 | 		/* REVISIT this assumes AUTOCLEAR is never set */ | 
 | 546 | 		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); | 
 | 547 | 		if (!done) | 
 | 548 | 			csr |= MUSB_RXCSR_H_REQPKT; | 
 | 549 | 		musb_writew(epio, MUSB_RXCSR, csr); | 
 | 550 | 	} | 
 | 551 |  | 
 | 552 | 	return done; | 
 | 553 | } | 
 | 554 |  | 
 | 555 | /* we don't always need to reinit a given side of an endpoint... | 
 | 556 |  * when we do, use tx/rx reinit routine and then construct a new CSR | 
 | 557 |  * to address data toggle, NYET, and DMA or PIO. | 
 | 558 |  * | 
 | 559 |  * it's possible that driver bugs (especially for DMA) or aborting a | 
 | 560 |  * transfer might have left the endpoint busier than it should be. | 
 | 561 |  * the busy/not-empty tests are basically paranoia. | 
 | 562 |  */ | 
 | 563 | static void | 
 | 564 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | 
 | 565 | { | 
 | 566 | 	u16	csr; | 
 | 567 |  | 
 | 568 | 	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0. | 
 | 569 | 	 * That always uses tx_reinit since ep0 repurposes TX register | 
 | 570 | 	 * offsets; the initial SETUP packet is also a kind of OUT. | 
 | 571 | 	 */ | 
 | 572 |  | 
 | 573 | 	/* if programmed for Tx, put it in RX mode */ | 
 | 574 | 	if (ep->is_shared_fifo) { | 
 | 575 | 		csr = musb_readw(ep->regs, MUSB_TXCSR); | 
 | 576 | 		if (csr & MUSB_TXCSR_MODE) { | 
 | 577 | 			musb_h_tx_flush_fifo(ep); | 
 | 578 | 			musb_writew(ep->regs, MUSB_TXCSR, | 
 | 579 | 					MUSB_TXCSR_FRCDATATOG); | 
 | 580 | 		} | 
 | 581 | 		/* clear mode (and everything else) to enable Rx */ | 
 | 582 | 		musb_writew(ep->regs, MUSB_TXCSR, 0); | 
 | 583 |  | 
 | 584 | 	/* scrub all previous state, clearing toggle */ | 
 | 585 | 	} else { | 
 | 586 | 		csr = musb_readw(ep->regs, MUSB_RXCSR); | 
 | 587 | 		if (csr & MUSB_RXCSR_RXPKTRDY) | 
 | 588 | 			WARNING("rx%d, packet/%d ready?\n", ep->epnum, | 
 | 589 | 				musb_readw(ep->regs, MUSB_RXCOUNT)); | 
 | 590 |  | 
 | 591 | 		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | 
 | 592 | 	} | 
 | 593 |  | 
 | 594 | 	/* target addr and (for multipoint) hub addr/port */ | 
 | 595 | 	if (musb->is_multipoint) { | 
 | 596 | 		musb_writeb(ep->target_regs, MUSB_RXFUNCADDR, | 
 | 597 | 			qh->addr_reg); | 
 | 598 | 		musb_writeb(ep->target_regs, MUSB_RXHUBADDR, | 
 | 599 | 			qh->h_addr_reg); | 
 | 600 | 		musb_writeb(ep->target_regs, MUSB_RXHUBPORT, | 
 | 601 | 			qh->h_port_reg); | 
 | 602 | 	} else | 
 | 603 | 		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); | 
 | 604 |  | 
 | 605 | 	/* protocol/endpoint, interval/NAKlimit, i/o size */ | 
 | 606 | 	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | 
 | 607 | 	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | 
 | 608 | 	/* NOTE: bulk combining rewrites high bits of maxpacket */ | 
 | 609 | 	musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); | 
 | 610 |  | 
 | 611 | 	ep->rx_reinit = 0; | 
 | 612 | } | 
 | 613 |  | 
 | 614 |  | 
 | 615 | /* | 
 | 616 |  * Program an HDRC endpoint as per the given URB | 
 | 617 |  * Context: irqs blocked, controller lock held | 
 | 618 |  */ | 
 | 619 | static void musb_ep_program(struct musb *musb, u8 epnum, | 
 | 620 | 			struct urb *urb, unsigned int is_out, | 
 | 621 | 			u8 *buf, u32 len) | 
 | 622 | { | 
 | 623 | 	struct dma_controller	*dma_controller; | 
 | 624 | 	struct dma_channel	*dma_channel; | 
 | 625 | 	u8			dma_ok; | 
 | 626 | 	void __iomem		*mbase = musb->mregs; | 
 | 627 | 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum; | 
 | 628 | 	void __iomem		*epio = hw_ep->regs; | 
 | 629 | 	struct musb_qh		*qh; | 
 | 630 | 	u16			packet_sz; | 
 | 631 |  | 
 | 632 | 	if (!is_out || hw_ep->is_shared_fifo) | 
 | 633 | 		qh = hw_ep->in_qh; | 
 | 634 | 	else | 
 | 635 | 		qh = hw_ep->out_qh; | 
 | 636 |  | 
 | 637 | 	packet_sz = qh->maxpacket; | 
 | 638 |  | 
 | 639 | 	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | 
 | 640 | 				"h_addr%02x h_port%02x bytes %d\n", | 
 | 641 | 			is_out ? "-->" : "<--", | 
 | 642 | 			epnum, urb, urb->dev->speed, | 
 | 643 | 			qh->addr_reg, qh->epnum, is_out ? "out" : "in", | 
 | 644 | 			qh->h_addr_reg, qh->h_port_reg, | 
 | 645 | 			len); | 
 | 646 |  | 
 | 647 | 	musb_ep_select(mbase, epnum); | 
 | 648 |  | 
 | 649 | 	/* candidate for DMA? */ | 
 | 650 | 	dma_controller = musb->dma_controller; | 
 | 651 | 	if (is_dma_capable() && epnum && dma_controller) { | 
 | 652 | 		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; | 
 | 653 | 		if (!dma_channel) { | 
 | 654 | 			dma_channel = dma_controller->channel_alloc( | 
 | 655 | 					dma_controller, hw_ep, is_out); | 
 | 656 | 			if (is_out) | 
 | 657 | 				hw_ep->tx_channel = dma_channel; | 
 | 658 | 			else | 
 | 659 | 				hw_ep->rx_channel = dma_channel; | 
 | 660 | 		} | 
 | 661 | 	} else | 
 | 662 | 		dma_channel = NULL; | 
 | 663 |  | 
 | 664 | 	/* make sure we clear DMAEnab, autoSet bits from previous run */ | 
 | 665 |  | 
 | 666 | 	/* OUT/transmit/EP0 or IN/receive? */ | 
 | 667 | 	if (is_out) { | 
 | 668 | 		u16	csr; | 
 | 669 | 		u16	int_txe; | 
 | 670 | 		u16	load_count; | 
 | 671 |  | 
 | 672 | 		csr = musb_readw(epio, MUSB_TXCSR); | 
 | 673 |  | 
 | 674 | 		/* disable interrupt in case we flush */ | 
 | 675 | 		int_txe = musb_readw(mbase, MUSB_INTRTXE); | 
 | 676 | 		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | 
 | 677 |  | 
 | 678 | 		/* general endpoint setup */ | 
 | 679 | 		if (epnum) { | 
 | 680 | 			/* ASSERT:  TXCSR_DMAENAB was already cleared */ | 
 | 681 |  | 
 | 682 | 			/* flush all old state, set default */ | 
 | 683 | 			musb_h_tx_flush_fifo(hw_ep); | 
 | 684 | 			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | 
 | 685 | 					| MUSB_TXCSR_DMAMODE | 
 | 686 | 					| MUSB_TXCSR_FRCDATATOG | 
 | 687 | 					| MUSB_TXCSR_H_RXSTALL | 
 | 688 | 					| MUSB_TXCSR_H_ERROR | 
 | 689 | 					| MUSB_TXCSR_TXPKTRDY | 
 | 690 | 					); | 
 | 691 | 			csr |= MUSB_TXCSR_MODE; | 
 | 692 |  | 
 | 693 | 			if (usb_gettoggle(urb->dev, | 
 | 694 | 					qh->epnum, 1)) | 
 | 695 | 				csr |= MUSB_TXCSR_H_WR_DATATOGGLE | 
 | 696 | 					| MUSB_TXCSR_H_DATATOGGLE; | 
 | 697 | 			else | 
 | 698 | 				csr |= MUSB_TXCSR_CLRDATATOG; | 
 | 699 |  | 
 | 700 | 			/* twice in case of double packet buffering */ | 
 | 701 | 			musb_writew(epio, MUSB_TXCSR, csr); | 
 | 702 | 			/* REVISIT may need to clear FLUSHFIFO ... */ | 
 | 703 | 			musb_writew(epio, MUSB_TXCSR, csr); | 
 | 704 | 			csr = musb_readw(epio, MUSB_TXCSR); | 
 | 705 | 		} else { | 
 | 706 | 			/* endpoint 0: just flush */ | 
 | 707 | 			musb_writew(epio, MUSB_CSR0, | 
 | 708 | 				csr | MUSB_CSR0_FLUSHFIFO); | 
 | 709 | 			musb_writew(epio, MUSB_CSR0, | 
 | 710 | 				csr | MUSB_CSR0_FLUSHFIFO); | 
 | 711 | 		} | 
 | 712 |  | 
 | 713 | 		/* target addr and (for multipoint) hub addr/port */ | 
 | 714 | 		if (musb->is_multipoint) { | 
 | 715 | 			musb_writeb(mbase, | 
 | 716 | 				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), | 
 | 717 | 				qh->addr_reg); | 
 | 718 | 			musb_writeb(mbase, | 
 | 719 | 				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), | 
 | 720 | 				qh->h_addr_reg); | 
 | 721 | 			musb_writeb(mbase, | 
 | 722 | 				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), | 
 | 723 | 				qh->h_port_reg); | 
 | 724 | /* FIXME if !epnum, do the same for RX ... */ | 
 | 725 | 		} else | 
 | 726 | 			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); | 
 | 727 |  | 
 | 728 | 		/* protocol/endpoint/interval/NAKlimit */ | 
 | 729 | 		if (epnum) { | 
 | 730 | 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | 
 | 731 | 			if (can_bulk_split(musb, qh->type)) | 
 | 732 | 				musb_writew(epio, MUSB_TXMAXP, | 
 | 733 | 					packet_sz | 
 | 734 | 					| ((hw_ep->max_packet_sz_tx / | 
 | 735 | 						packet_sz) - 1) << 11); | 
 | 736 | 			else | 
 | 737 | 				musb_writew(epio, MUSB_TXMAXP, | 
 | 738 | 					packet_sz); | 
 | 739 | 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | 
 | 740 | 		} else { | 
 | 741 | 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | 
 | 742 | 			if (musb->is_multipoint) | 
 | 743 | 				musb_writeb(epio, MUSB_TYPE0, | 
 | 744 | 						qh->type_reg); | 
 | 745 | 		} | 
 | 746 |  | 
 | 747 | 		if (can_bulk_split(musb, qh->type)) | 
 | 748 | 			load_count = min((u32) hw_ep->max_packet_sz_tx, | 
 | 749 | 						len); | 
 | 750 | 		else | 
 | 751 | 			load_count = min((u32) packet_sz, len); | 
 | 752 |  | 
 | 753 | #ifdef CONFIG_USB_INVENTRA_DMA | 
 | 754 | 		if (dma_channel) { | 
 | 755 |  | 
 | 756 | 			/* clear previous state */ | 
 | 757 | 			csr = musb_readw(epio, MUSB_TXCSR); | 
 | 758 | 			csr &= ~(MUSB_TXCSR_AUTOSET | 
 | 759 | 				| MUSB_TXCSR_DMAMODE | 
 | 760 | 				| MUSB_TXCSR_DMAENAB); | 
 | 761 | 			csr |= MUSB_TXCSR_MODE; | 
 | 762 | 			musb_writew(epio, MUSB_TXCSR, | 
 | 763 | 				csr | MUSB_TXCSR_MODE); | 
 | 764 |  | 
 | 765 | 			qh->segsize = min(len, dma_channel->max_len); | 
 | 766 |  | 
 | 767 | 			if (qh->segsize <= packet_sz) | 
 | 768 | 				dma_channel->desired_mode = 0; | 
 | 769 | 			else | 
 | 770 | 				dma_channel->desired_mode = 1; | 
 | 771 |  | 
 | 772 |  | 
 | 773 | 			if (dma_channel->desired_mode == 0) { | 
 | 774 | 				csr &= ~(MUSB_TXCSR_AUTOSET | 
 | 775 | 					| MUSB_TXCSR_DMAMODE); | 
 | 776 | 				csr |= (MUSB_TXCSR_DMAENAB); | 
 | 777 | 					/* against programming guide */ | 
 | 778 | 			} else | 
 | 779 | 				csr |= (MUSB_TXCSR_AUTOSET | 
 | 780 | 					| MUSB_TXCSR_DMAENAB | 
 | 781 | 					| MUSB_TXCSR_DMAMODE); | 
 | 782 |  | 
 | 783 | 			musb_writew(epio, MUSB_TXCSR, csr); | 
 | 784 |  | 
 | 785 | 			dma_ok = dma_controller->channel_program( | 
 | 786 | 					dma_channel, packet_sz, | 
 | 787 | 					dma_channel->desired_mode, | 
 | 788 | 					urb->transfer_dma, | 
 | 789 | 					qh->segsize); | 
 | 790 | 			if (dma_ok) { | 
 | 791 | 				load_count = 0; | 
 | 792 | 			} else { | 
 | 793 | 				dma_controller->channel_release(dma_channel); | 
 | 794 | 				if (is_out) | 
 | 795 | 					hw_ep->tx_channel = NULL; | 
 | 796 | 				else | 
 | 797 | 					hw_ep->rx_channel = NULL; | 
 | 798 | 				dma_channel = NULL; | 
 | 799 | 			} | 
 | 800 | 		} | 
 | 801 | #endif | 
 | 802 |  | 
 | 803 | 		/* candidate for DMA */ | 
 | 804 | 		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | 
 | 805 |  | 
 | 806 | 			/* program endpoint CSRs first, then setup DMA. | 
 | 807 | 			 * assume CPPI setup succeeds. | 
 | 808 | 			 * defer enabling dma. | 
 | 809 | 			 */ | 
 | 810 | 			csr = musb_readw(epio, MUSB_TXCSR); | 
 | 811 | 			csr &= ~(MUSB_TXCSR_AUTOSET | 
 | 812 | 					| MUSB_TXCSR_DMAMODE | 
 | 813 | 					| MUSB_TXCSR_DMAENAB); | 
 | 814 | 			csr |= MUSB_TXCSR_MODE; | 
 | 815 | 			musb_writew(epio, MUSB_TXCSR, | 
 | 816 | 				csr | MUSB_TXCSR_MODE); | 
 | 817 |  | 
 | 818 | 			dma_channel->actual_len = 0L; | 
 | 819 | 			qh->segsize = len; | 
 | 820 |  | 
 | 821 | 			/* TX uses "rndis" mode automatically, but needs help | 
 | 822 | 			 * to identify the zero-length-final-packet case. | 
 | 823 | 			 */ | 
 | 824 | 			dma_ok = dma_controller->channel_program( | 
 | 825 | 					dma_channel, packet_sz, | 
 | 826 | 					(urb->transfer_flags | 
 | 827 | 							& URB_ZERO_PACKET) | 
 | 828 | 						== URB_ZERO_PACKET, | 
 | 829 | 					urb->transfer_dma, | 
 | 830 | 					qh->segsize); | 
 | 831 | 			if (dma_ok) { | 
 | 832 | 				load_count = 0; | 
 | 833 | 			} else { | 
 | 834 | 				dma_controller->channel_release(dma_channel); | 
 | 835 | 				hw_ep->tx_channel = NULL; | 
 | 836 | 				dma_channel = NULL; | 
 | 837 |  | 
 | 838 | 				/* REVISIT there's an error path here that | 
 | 839 | 				 * needs handling:  can't do dma, but | 
 | 840 | 				 * there's no pio buffer address... | 
 | 841 | 				 */ | 
 | 842 | 			} | 
 | 843 | 		} | 
 | 844 |  | 
 | 845 | 		if (load_count) { | 
 | 846 | 			/* ASSERT:  TXCSR_DMAENAB was already cleared */ | 
 | 847 |  | 
 | 848 | 			/* PIO to load FIFO */ | 
 | 849 | 			qh->segsize = load_count; | 
 | 850 | 			musb_write_fifo(hw_ep, load_count, buf); | 
 | 851 | 			csr = musb_readw(epio, MUSB_TXCSR); | 
 | 852 | 			csr &= ~(MUSB_TXCSR_DMAENAB | 
 | 853 | 				| MUSB_TXCSR_DMAMODE | 
 | 854 | 				| MUSB_TXCSR_AUTOSET); | 
 | 855 | 			/* write CSR */ | 
 | 856 | 			csr |= MUSB_TXCSR_MODE; | 
 | 857 |  | 
 | 858 | 			if (epnum) | 
 | 859 | 				musb_writew(epio, MUSB_TXCSR, csr); | 
 | 860 | 		} | 
 | 861 |  | 
 | 862 | 		/* re-enable interrupt */ | 
 | 863 | 		musb_writew(mbase, MUSB_INTRTXE, int_txe); | 
 | 864 |  | 
 | 865 | 	/* IN/receive */ | 
 | 866 | 	} else { | 
 | 867 | 		u16	csr; | 
 | 868 |  | 
 | 869 | 		if (hw_ep->rx_reinit) { | 
 | 870 | 			musb_rx_reinit(musb, qh, hw_ep); | 
 | 871 |  | 
 | 872 | 			/* init new state: toggle and NYET, maybe DMA later */ | 
 | 873 | 			if (usb_gettoggle(urb->dev, qh->epnum, 0)) | 
 | 874 | 				csr = MUSB_RXCSR_H_WR_DATATOGGLE | 
 | 875 | 					| MUSB_RXCSR_H_DATATOGGLE; | 
 | 876 | 			else | 
 | 877 | 				csr = 0; | 
 | 878 | 			if (qh->type == USB_ENDPOINT_XFER_INT) | 
 | 879 | 				csr |= MUSB_RXCSR_DISNYET; | 
 | 880 |  | 
 | 881 | 		} else { | 
 | 882 | 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | 
 | 883 |  | 
 | 884 | 			if (csr & (MUSB_RXCSR_RXPKTRDY | 
 | 885 | 					| MUSB_RXCSR_DMAENAB | 
 | 886 | 					| MUSB_RXCSR_H_REQPKT)) | 
 | 887 | 				ERR("broken !rx_reinit, ep%d csr %04x\n", | 
 | 888 | 						hw_ep->epnum, csr); | 
 | 889 |  | 
 | 890 | 			/* scrub any stale state, leaving toggle alone */ | 
 | 891 | 			csr &= MUSB_RXCSR_DISNYET; | 
 | 892 | 		} | 
 | 893 |  | 
 | 894 | 		/* kick things off */ | 
 | 895 |  | 
 | 896 | 		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | 
 | 897 | 			/* candidate for DMA */ | 
 | 898 | 			if (dma_channel) { | 
 | 899 | 				dma_channel->actual_len = 0L; | 
 | 900 | 				qh->segsize = len; | 
 | 901 |  | 
 | 902 | 				/* AUTOREQ is in a DMA register */ | 
 | 903 | 				musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 
 | 904 | 				csr = musb_readw(hw_ep->regs, | 
 | 905 | 						MUSB_RXCSR); | 
 | 906 |  | 
 | 907 | 				/* unless caller treats short rx transfers as | 
 | 908 | 				 * errors, we dare not queue multiple transfers. | 
 | 909 | 				 */ | 
 | 910 | 				dma_ok = dma_controller->channel_program( | 
 | 911 | 						dma_channel, packet_sz, | 
 | 912 | 						!(urb->transfer_flags | 
 | 913 | 							& URB_SHORT_NOT_OK), | 
 | 914 | 						urb->transfer_dma, | 
 | 915 | 						qh->segsize); | 
 | 916 | 				if (!dma_ok) { | 
 | 917 | 					dma_controller->channel_release( | 
 | 918 | 							dma_channel); | 
 | 919 | 					hw_ep->rx_channel = NULL; | 
 | 920 | 					dma_channel = NULL; | 
 | 921 | 				} else | 
 | 922 | 					csr |= MUSB_RXCSR_DMAENAB; | 
 | 923 | 			} | 
 | 924 | 		} | 
 | 925 |  | 
 | 926 | 		csr |= MUSB_RXCSR_H_REQPKT; | 
 | 927 | 		DBG(7, "RXCSR%d := %04x\n", epnum, csr); | 
 | 928 | 		musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 
 | 929 | 		csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | 
 | 930 | 	} | 
 | 931 | } | 
 | 932 |  | 
 | 933 |  | 
 | 934 | /* | 
 | 935 |  * Service the default endpoint (ep0) as host. | 
 | 936 |  * Return true until it's time to start the status stage. | 
 | 937 |  */ | 
 | 938 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | 
 | 939 | { | 
 | 940 | 	bool			 more = false; | 
 | 941 | 	u8			*fifo_dest = NULL; | 
 | 942 | 	u16			fifo_count = 0; | 
 | 943 | 	struct musb_hw_ep	*hw_ep = musb->control_ep; | 
 | 944 | 	struct musb_qh		*qh = hw_ep->in_qh; | 
 | 945 | 	struct usb_ctrlrequest	*request; | 
 | 946 |  | 
 | 947 | 	switch (musb->ep0_stage) { | 
 | 948 | 	case MUSB_EP0_IN: | 
 | 949 | 		fifo_dest = urb->transfer_buffer + urb->actual_length; | 
 | 950 | 		fifo_count = min(len, ((u16) (urb->transfer_buffer_length | 
 | 951 | 					- urb->actual_length))); | 
 | 952 | 		if (fifo_count < len) | 
 | 953 | 			urb->status = -EOVERFLOW; | 
 | 954 |  | 
 | 955 | 		musb_read_fifo(hw_ep, fifo_count, fifo_dest); | 
 | 956 |  | 
 | 957 | 		urb->actual_length += fifo_count; | 
 | 958 | 		if (len < qh->maxpacket) { | 
 | 959 | 			/* always terminate on short read; it's | 
 | 960 | 			 * rarely reported as an error. | 
 | 961 | 			 */ | 
 | 962 | 		} else if (urb->actual_length < | 
 | 963 | 				urb->transfer_buffer_length) | 
 | 964 | 			more = true; | 
 | 965 | 		break; | 
 | 966 | 	case MUSB_EP0_START: | 
 | 967 | 		request = (struct usb_ctrlrequest *) urb->setup_packet; | 
 | 968 |  | 
 | 969 | 		if (!request->wLength) { | 
 | 970 | 			DBG(4, "start no-DATA\n"); | 
 | 971 | 			break; | 
 | 972 | 		} else if (request->bRequestType & USB_DIR_IN) { | 
 | 973 | 			DBG(4, "start IN-DATA\n"); | 
 | 974 | 			musb->ep0_stage = MUSB_EP0_IN; | 
 | 975 | 			more = true; | 
 | 976 | 			break; | 
 | 977 | 		} else { | 
 | 978 | 			DBG(4, "start OUT-DATA\n"); | 
 | 979 | 			musb->ep0_stage = MUSB_EP0_OUT; | 
 | 980 | 			more = true; | 
 | 981 | 		} | 
 | 982 | 		/* FALLTHROUGH */ | 
 | 983 | 	case MUSB_EP0_OUT: | 
 | 984 | 		fifo_count = min(qh->maxpacket, ((u16) | 
 | 985 | 				(urb->transfer_buffer_length | 
 | 986 | 				- urb->actual_length))); | 
 | 987 |  | 
 | 988 | 		if (fifo_count) { | 
 | 989 | 			fifo_dest = (u8 *) (urb->transfer_buffer | 
 | 990 | 					+ urb->actual_length); | 
 | 991 | 			DBG(3, "Sending %d bytes to %p\n", | 
 | 992 | 					fifo_count, fifo_dest); | 
 | 993 | 			musb_write_fifo(hw_ep, fifo_count, fifo_dest); | 
 | 994 |  | 
 | 995 | 			urb->actual_length += fifo_count; | 
 | 996 | 			more = true; | 
 | 997 | 		} | 
 | 998 | 		break; | 
 | 999 | 	default: | 
 | 1000 | 		ERR("bogus ep0 stage %d\n", musb->ep0_stage); | 
 | 1001 | 		break; | 
 | 1002 | 	} | 
 | 1003 |  | 
 | 1004 | 	return more; | 
 | 1005 | } | 
 | 1006 |  | 
 | 1007 | /* | 
 | 1008 |  * Handle default endpoint interrupt as host. Only called in IRQ time | 
| David Brownell | c767c1c | 2008-09-11 11:53:23 +0300 | [diff] [blame] | 1009 |  * from musb_interrupt(). | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1010 |  * | 
 | 1011 |  * called with controller irqlocked | 
 | 1012 |  */ | 
 | 1013 | irqreturn_t musb_h_ep0_irq(struct musb *musb) | 
 | 1014 | { | 
 | 1015 | 	struct urb		*urb; | 
 | 1016 | 	u16			csr, len; | 
 | 1017 | 	int			status = 0; | 
 | 1018 | 	void __iomem		*mbase = musb->mregs; | 
 | 1019 | 	struct musb_hw_ep	*hw_ep = musb->control_ep; | 
 | 1020 | 	void __iomem		*epio = hw_ep->regs; | 
 | 1021 | 	struct musb_qh		*qh = hw_ep->in_qh; | 
 | 1022 | 	bool			complete = false; | 
 | 1023 | 	irqreturn_t		retval = IRQ_NONE; | 
 | 1024 |  | 
 | 1025 | 	/* ep0 only has one queue, "in" */ | 
 | 1026 | 	urb = next_urb(qh); | 
 | 1027 |  | 
 | 1028 | 	musb_ep_select(mbase, 0); | 
 | 1029 | 	csr = musb_readw(epio, MUSB_CSR0); | 
 | 1030 | 	len = (csr & MUSB_CSR0_RXPKTRDY) | 
 | 1031 | 			? musb_readb(epio, MUSB_COUNT0) | 
 | 1032 | 			: 0; | 
 | 1033 |  | 
 | 1034 | 	DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | 
 | 1035 | 		csr, qh, len, urb, musb->ep0_stage); | 
 | 1036 |  | 
 | 1037 | 	/* if we just did status stage, we are done */ | 
 | 1038 | 	if (MUSB_EP0_STATUS == musb->ep0_stage) { | 
 | 1039 | 		retval = IRQ_HANDLED; | 
 | 1040 | 		complete = true; | 
 | 1041 | 	} | 
 | 1042 |  | 
 | 1043 | 	/* prepare status */ | 
 | 1044 | 	if (csr & MUSB_CSR0_H_RXSTALL) { | 
 | 1045 | 		DBG(6, "STALLING ENDPOINT\n"); | 
 | 1046 | 		status = -EPIPE; | 
 | 1047 |  | 
 | 1048 | 	} else if (csr & MUSB_CSR0_H_ERROR) { | 
 | 1049 | 		DBG(2, "no response, csr0 %04x\n", csr); | 
 | 1050 | 		status = -EPROTO; | 
 | 1051 |  | 
 | 1052 | 	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | 
 | 1053 | 		DBG(2, "control NAK timeout\n"); | 
 | 1054 |  | 
 | 1055 | 		/* NOTE:  this code path would be a good place to PAUSE a | 
 | 1056 | 		 * control transfer, if another one is queued, so that | 
 | 1057 | 		 * ep0 is more likely to stay busy. | 
 | 1058 | 		 * | 
 | 1059 | 		 * if (qh->ring.next != &musb->control), then | 
 | 1060 | 		 * we have a candidate... NAKing is *NOT* an error | 
 | 1061 | 		 */ | 
 | 1062 | 		musb_writew(epio, MUSB_CSR0, 0); | 
 | 1063 | 		retval = IRQ_HANDLED; | 
 | 1064 | 	} | 
 | 1065 |  | 
 | 1066 | 	if (status) { | 
 | 1067 | 		DBG(6, "aborting\n"); | 
 | 1068 | 		retval = IRQ_HANDLED; | 
 | 1069 | 		if (urb) | 
 | 1070 | 			urb->status = status; | 
 | 1071 | 		complete = true; | 
 | 1072 |  | 
 | 1073 | 		/* use the proper sequence to abort the transfer */ | 
 | 1074 | 		if (csr & MUSB_CSR0_H_REQPKT) { | 
 | 1075 | 			csr &= ~MUSB_CSR0_H_REQPKT; | 
 | 1076 | 			musb_writew(epio, MUSB_CSR0, csr); | 
 | 1077 | 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | 
 | 1078 | 			musb_writew(epio, MUSB_CSR0, csr); | 
 | 1079 | 		} else { | 
 | 1080 | 			csr |= MUSB_CSR0_FLUSHFIFO; | 
 | 1081 | 			musb_writew(epio, MUSB_CSR0, csr); | 
 | 1082 | 			musb_writew(epio, MUSB_CSR0, csr); | 
 | 1083 | 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | 
 | 1084 | 			musb_writew(epio, MUSB_CSR0, csr); | 
 | 1085 | 		} | 
 | 1086 |  | 
 | 1087 | 		musb_writeb(epio, MUSB_NAKLIMIT0, 0); | 
 | 1088 |  | 
 | 1089 | 		/* clear it */ | 
 | 1090 | 		musb_writew(epio, MUSB_CSR0, 0); | 
 | 1091 | 	} | 
 | 1092 |  | 
 | 1093 | 	if (unlikely(!urb)) { | 
 | 1094 | 		/* stop endpoint since we have no place for its data, this | 
 | 1095 | 		 * SHOULD NEVER HAPPEN! */ | 
 | 1096 | 		ERR("no URB for end 0\n"); | 
 | 1097 |  | 
 | 1098 | 		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | 
 | 1099 | 		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | 
 | 1100 | 		musb_writew(epio, MUSB_CSR0, 0); | 
 | 1101 |  | 
 | 1102 | 		goto done; | 
 | 1103 | 	} | 
 | 1104 |  | 
 | 1105 | 	if (!complete) { | 
 | 1106 | 		/* call common logic and prepare response */ | 
 | 1107 | 		if (musb_h_ep0_continue(musb, len, urb)) { | 
 | 1108 | 			/* more packets required */ | 
 | 1109 | 			csr = (MUSB_EP0_IN == musb->ep0_stage) | 
 | 1110 | 				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; | 
 | 1111 | 		} else { | 
 | 1112 | 			/* data transfer complete; perform status phase */ | 
 | 1113 | 			if (usb_pipeout(urb->pipe) | 
 | 1114 | 					|| !urb->transfer_buffer_length) | 
 | 1115 | 				csr = MUSB_CSR0_H_STATUSPKT | 
 | 1116 | 					| MUSB_CSR0_H_REQPKT; | 
 | 1117 | 			else | 
 | 1118 | 				csr = MUSB_CSR0_H_STATUSPKT | 
 | 1119 | 					| MUSB_CSR0_TXPKTRDY; | 
 | 1120 |  | 
 | 1121 | 			/* flag status stage */ | 
 | 1122 | 			musb->ep0_stage = MUSB_EP0_STATUS; | 
 | 1123 |  | 
 | 1124 | 			DBG(5, "ep0 STATUS, csr %04x\n", csr); | 
 | 1125 |  | 
 | 1126 | 		} | 
 | 1127 | 		musb_writew(epio, MUSB_CSR0, csr); | 
 | 1128 | 		retval = IRQ_HANDLED; | 
 | 1129 | 	} else | 
 | 1130 | 		musb->ep0_stage = MUSB_EP0_IDLE; | 
 | 1131 |  | 
 | 1132 | 	/* call completion handler if done */ | 
 | 1133 | 	if (complete) | 
 | 1134 | 		musb_advance_schedule(musb, urb, hw_ep, 1); | 
 | 1135 | done: | 
 | 1136 | 	return retval; | 
 | 1137 | } | 
 | 1138 |  | 
 | 1139 |  | 
 | 1140 | #ifdef CONFIG_USB_INVENTRA_DMA | 
 | 1141 |  | 
 | 1142 | /* Host side TX (OUT) using Mentor DMA works as follows: | 
 | 1143 | 	submit_urb -> | 
 | 1144 | 		- if queue was empty, Program Endpoint | 
 | 1145 | 		- ... which starts DMA to fifo in mode 1 or 0 | 
 | 1146 |  | 
 | 1147 | 	DMA Isr (transfer complete) -> TxAvail() | 
 | 1148 | 		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens | 
 | 1149 | 					only in musb_cleanup_urb) | 
 | 1150 | 		- TxPktRdy has to be set in mode 0 or for | 
 | 1151 | 			short packets in mode 1. | 
 | 1152 | */ | 
 | 1153 |  | 
 | 1154 | #endif | 
 | 1155 |  | 
 | 1156 | /* Service a Tx-Available or dma completion irq for the endpoint */ | 
 | 1157 | void musb_host_tx(struct musb *musb, u8 epnum) | 
 | 1158 | { | 
 | 1159 | 	int			pipe; | 
 | 1160 | 	bool			done = false; | 
 | 1161 | 	u16			tx_csr; | 
 | 1162 | 	size_t			wLength = 0; | 
 | 1163 | 	u8			*buf = NULL; | 
 | 1164 | 	struct urb		*urb; | 
 | 1165 | 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum; | 
 | 1166 | 	void __iomem		*epio = hw_ep->regs; | 
 | 1167 | 	struct musb_qh		*qh = hw_ep->out_qh; | 
 | 1168 | 	u32			status = 0; | 
 | 1169 | 	void __iomem		*mbase = musb->mregs; | 
 | 1170 | 	struct dma_channel	*dma; | 
 | 1171 |  | 
 | 1172 | 	urb = next_urb(qh); | 
 | 1173 |  | 
 | 1174 | 	musb_ep_select(mbase, epnum); | 
 | 1175 | 	tx_csr = musb_readw(epio, MUSB_TXCSR); | 
 | 1176 |  | 
 | 1177 | 	/* with CPPI, DMA sometimes triggers "extra" irqs */ | 
 | 1178 | 	if (!urb) { | 
 | 1179 | 		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 
 | 1180 | 		goto finish; | 
 | 1181 | 	} | 
 | 1182 |  | 
 | 1183 | 	pipe = urb->pipe; | 
 | 1184 | 	dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | 
 | 1185 | 	DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | 
 | 1186 | 			dma ? ", dma" : ""); | 
 | 1187 |  | 
 | 1188 | 	/* check for errors */ | 
 | 1189 | 	if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | 
 | 1190 | 		/* dma was disabled, fifo flushed */ | 
 | 1191 | 		DBG(3, "TX end %d stall\n", epnum); | 
 | 1192 |  | 
 | 1193 | 		/* stall; record URB status */ | 
 | 1194 | 		status = -EPIPE; | 
 | 1195 |  | 
 | 1196 | 	} else if (tx_csr & MUSB_TXCSR_H_ERROR) { | 
 | 1197 | 		/* (NON-ISO) dma was disabled, fifo flushed */ | 
 | 1198 | 		DBG(3, "TX 3strikes on ep=%d\n", epnum); | 
 | 1199 |  | 
 | 1200 | 		status = -ETIMEDOUT; | 
 | 1201 |  | 
 | 1202 | 	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | 
 | 1203 | 		DBG(6, "TX end=%d device not responding\n", epnum); | 
 | 1204 |  | 
 | 1205 | 		/* NOTE:  this code path would be a good place to PAUSE a | 
 | 1206 | 		 * transfer, if there's some other (nonperiodic) tx urb | 
 | 1207 | 		 * that could use this fifo.  (dma complicates it...) | 
 | 1208 | 		 * | 
 | 1209 | 		 * if (bulk && qh->ring.next != &musb->out_bulk), then | 
 | 1210 | 		 * we have a candidate... NAKing is *NOT* an error | 
 | 1211 | 		 */ | 
 | 1212 | 		musb_ep_select(mbase, epnum); | 
 | 1213 | 		musb_writew(epio, MUSB_TXCSR, | 
 | 1214 | 				MUSB_TXCSR_H_WZC_BITS | 
 | 1215 | 				| MUSB_TXCSR_TXPKTRDY); | 
 | 1216 | 		goto finish; | 
 | 1217 | 	} | 
 | 1218 |  | 
 | 1219 | 	if (status) { | 
 | 1220 | 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 
 | 1221 | 			dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 
 | 1222 | 			(void) musb->dma_controller->channel_abort(dma); | 
 | 1223 | 		} | 
 | 1224 |  | 
 | 1225 | 		/* do the proper sequence to abort the transfer in the | 
 | 1226 | 		 * usb core; the dma engine should already be stopped. | 
 | 1227 | 		 */ | 
 | 1228 | 		musb_h_tx_flush_fifo(hw_ep); | 
 | 1229 | 		tx_csr &= ~(MUSB_TXCSR_AUTOSET | 
 | 1230 | 				| MUSB_TXCSR_DMAENAB | 
 | 1231 | 				| MUSB_TXCSR_H_ERROR | 
 | 1232 | 				| MUSB_TXCSR_H_RXSTALL | 
 | 1233 | 				| MUSB_TXCSR_H_NAKTIMEOUT | 
 | 1234 | 				); | 
 | 1235 |  | 
 | 1236 | 		musb_ep_select(mbase, epnum); | 
 | 1237 | 		musb_writew(epio, MUSB_TXCSR, tx_csr); | 
 | 1238 | 		/* REVISIT may need to clear FLUSHFIFO ... */ | 
 | 1239 | 		musb_writew(epio, MUSB_TXCSR, tx_csr); | 
 | 1240 | 		musb_writeb(epio, MUSB_TXINTERVAL, 0); | 
 | 1241 |  | 
 | 1242 | 		done = true; | 
 | 1243 | 	} | 
 | 1244 |  | 
 | 1245 | 	/* second cppi case */ | 
 | 1246 | 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 
 | 1247 | 		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 
 | 1248 | 		goto finish; | 
 | 1249 |  | 
 | 1250 | 	} | 
 | 1251 |  | 
 | 1252 | 	/* REVISIT this looks wrong... */ | 
 | 1253 | 	if (!status || dma || usb_pipeisoc(pipe)) { | 
 | 1254 | 		if (dma) | 
 | 1255 | 			wLength = dma->actual_len; | 
 | 1256 | 		else | 
 | 1257 | 			wLength = qh->segsize; | 
 | 1258 | 		qh->offset += wLength; | 
 | 1259 |  | 
 | 1260 | 		if (usb_pipeisoc(pipe)) { | 
 | 1261 | 			struct usb_iso_packet_descriptor	*d; | 
 | 1262 |  | 
 | 1263 | 			d = urb->iso_frame_desc + qh->iso_idx; | 
 | 1264 | 			d->actual_length = qh->segsize; | 
 | 1265 | 			if (++qh->iso_idx >= urb->number_of_packets) { | 
 | 1266 | 				done = true; | 
 | 1267 | 			} else { | 
 | 1268 | 				d++; | 
 | 1269 | 				buf = urb->transfer_buffer + d->offset; | 
 | 1270 | 				wLength = d->length; | 
 | 1271 | 			} | 
 | 1272 | 		} else if (dma) { | 
 | 1273 | 			done = true; | 
 | 1274 | 		} else { | 
 | 1275 | 			/* see if we need to send more data, or ZLP */ | 
 | 1276 | 			if (qh->segsize < qh->maxpacket) | 
 | 1277 | 				done = true; | 
 | 1278 | 			else if (qh->offset == urb->transfer_buffer_length | 
 | 1279 | 					&& !(urb->transfer_flags | 
 | 1280 | 						& URB_ZERO_PACKET)) | 
 | 1281 | 				done = true; | 
 | 1282 | 			if (!done) { | 
 | 1283 | 				buf = urb->transfer_buffer | 
 | 1284 | 						+ qh->offset; | 
 | 1285 | 				wLength = urb->transfer_buffer_length | 
 | 1286 | 						- qh->offset; | 
 | 1287 | 			} | 
 | 1288 | 		} | 
 | 1289 | 	} | 
 | 1290 |  | 
 | 1291 | 	/* urb->status != -EINPROGRESS means request has been faulted, | 
 | 1292 | 	 * so we must abort this transfer after cleanup | 
 | 1293 | 	 */ | 
 | 1294 | 	if (urb->status != -EINPROGRESS) { | 
 | 1295 | 		done = true; | 
 | 1296 | 		if (status == 0) | 
 | 1297 | 			status = urb->status; | 
 | 1298 | 	} | 
 | 1299 |  | 
 | 1300 | 	if (done) { | 
 | 1301 | 		/* set status */ | 
 | 1302 | 		urb->status = status; | 
 | 1303 | 		urb->actual_length = qh->offset; | 
 | 1304 | 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | 
 | 1305 |  | 
 | 1306 | 	} else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { | 
 | 1307 | 		/* WARN_ON(!buf); */ | 
 | 1308 |  | 
 | 1309 | 		/* REVISIT:  some docs say that when hw_ep->tx_double_buffered, | 
 | 1310 | 		 * (and presumably, fifo is not half-full) we should write TWO | 
 | 1311 | 		 * packets before updating TXCSR ... other docs disagree ... | 
 | 1312 | 		 */ | 
 | 1313 | 		/* PIO:  start next packet in this URB */ | 
 | 1314 | 		wLength = min(qh->maxpacket, (u16) wLength); | 
 | 1315 | 		musb_write_fifo(hw_ep, wLength, buf); | 
 | 1316 | 		qh->segsize = wLength; | 
 | 1317 |  | 
 | 1318 | 		musb_ep_select(mbase, epnum); | 
 | 1319 | 		musb_writew(epio, MUSB_TXCSR, | 
 | 1320 | 				MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | 
 | 1321 | 	} else | 
 | 1322 | 		DBG(1, "not complete, but dma enabled?\n"); | 
 | 1323 |  | 
 | 1324 | finish: | 
 | 1325 | 	return; | 
 | 1326 | } | 
 | 1327 |  | 
 | 1328 |  | 
 | 1329 | #ifdef CONFIG_USB_INVENTRA_DMA | 
 | 1330 |  | 
 | 1331 | /* Host side RX (IN) using Mentor DMA works as follows: | 
 | 1332 | 	submit_urb -> | 
 | 1333 | 		- if queue was empty, ProgramEndpoint | 
 | 1334 | 		- first IN token is sent out (by setting ReqPkt) | 
 | 1335 | 	LinuxIsr -> RxReady() | 
 | 1336 | 	/\	=> first packet is received | 
 | 1337 | 	|	- Set in mode 0 (DmaEnab, ~ReqPkt) | 
 | 1338 | 	|		-> DMA Isr (transfer complete) -> RxReady() | 
 | 1339 | 	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | 
 | 1340 | 	|		    - if urb not complete, send next IN token (ReqPkt) | 
 | 1341 | 	|			   |		else complete urb. | 
 | 1342 | 	|			   | | 
 | 1343 | 	--------------------------- | 
 | 1344 |  * | 
 | 1345 |  * Nuances of mode 1: | 
 | 1346 |  *	For short packets, no ack (+RxPktRdy) is sent automatically | 
 | 1347 |  *	(even if AutoClear is ON) | 
 | 1348 |  *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent | 
 | 1349 |  *	automatically => major problem, as collecting the next packet becomes | 
 | 1350 |  *	difficult. Hence mode 1 is not used. | 
 | 1351 |  * | 
 | 1352 |  * REVISIT | 
 | 1353 |  *	All we care about at this driver level is that | 
 | 1354 |  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty; | 
 | 1355 |  *       (b) termination conditions are: short RX, or buffer full; | 
 | 1356 |  *       (c) fault modes include | 
 | 1357 |  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. | 
 | 1358 |  *             (and that endpoint's dma queue stops immediately) | 
 | 1359 |  *           - overflow (full, PLUS more bytes in the terminal packet) | 
 | 1360 |  * | 
 | 1361 |  *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would | 
 | 1362 |  *	thus be a great candidate for using mode 1 ... for all but the | 
 | 1363 |  *	last packet of one URB's transfer. | 
 | 1364 |  */ | 
 | 1365 |  | 
 | 1366 | #endif | 
 | 1367 |  | 
 | 1368 | /* | 
 | 1369 |  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | 
 | 1370 |  * and high-bandwidth IN transfer cases. | 
 | 1371 |  */ | 
 | 1372 | void musb_host_rx(struct musb *musb, u8 epnum) | 
 | 1373 | { | 
 | 1374 | 	struct urb		*urb; | 
 | 1375 | 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum; | 
 | 1376 | 	void __iomem		*epio = hw_ep->regs; | 
 | 1377 | 	struct musb_qh		*qh = hw_ep->in_qh; | 
 | 1378 | 	size_t			xfer_len; | 
 | 1379 | 	void __iomem		*mbase = musb->mregs; | 
 | 1380 | 	int			pipe; | 
 | 1381 | 	u16			rx_csr, val; | 
 | 1382 | 	bool			iso_err = false; | 
 | 1383 | 	bool			done = false; | 
 | 1384 | 	u32			status; | 
 | 1385 | 	struct dma_channel	*dma; | 
 | 1386 |  | 
 | 1387 | 	musb_ep_select(mbase, epnum); | 
 | 1388 |  | 
 | 1389 | 	urb = next_urb(qh); | 
 | 1390 | 	dma = is_dma_capable() ? hw_ep->rx_channel : NULL; | 
 | 1391 | 	status = 0; | 
 | 1392 | 	xfer_len = 0; | 
 | 1393 |  | 
 | 1394 | 	rx_csr = musb_readw(epio, MUSB_RXCSR); | 
 | 1395 | 	val = rx_csr; | 
 | 1396 |  | 
 | 1397 | 	if (unlikely(!urb)) { | 
 | 1398 | 		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least | 
 | 1399 | 		 * usbtest #11 (unlinks) triggers it regularly, sometimes | 
 | 1400 | 		 * with fifo full.  (Only with DMA??) | 
 | 1401 | 		 */ | 
 | 1402 | 		DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | 
 | 1403 | 			musb_readw(epio, MUSB_RXCOUNT)); | 
 | 1404 | 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | 
 | 1405 | 		return; | 
 | 1406 | 	} | 
 | 1407 |  | 
 | 1408 | 	pipe = urb->pipe; | 
 | 1409 |  | 
 | 1410 | 	DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | 
 | 1411 | 		epnum, rx_csr, urb->actual_length, | 
 | 1412 | 		dma ? dma->actual_len : 0); | 
 | 1413 |  | 
 | 1414 | 	/* check for errors, concurrent stall & unlink is not really | 
 | 1415 | 	 * handled yet! */ | 
 | 1416 | 	if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | 
 | 1417 | 		DBG(3, "RX end %d STALL\n", epnum); | 
 | 1418 |  | 
 | 1419 | 		/* stall; record URB status */ | 
 | 1420 | 		status = -EPIPE; | 
 | 1421 |  | 
 | 1422 | 	} else if (rx_csr & MUSB_RXCSR_H_ERROR) { | 
 | 1423 | 		DBG(3, "end %d RX proto error\n", epnum); | 
 | 1424 |  | 
 | 1425 | 		status = -EPROTO; | 
 | 1426 | 		musb_writeb(epio, MUSB_RXINTERVAL, 0); | 
 | 1427 |  | 
 | 1428 | 	} else if (rx_csr & MUSB_RXCSR_DATAERROR) { | 
 | 1429 |  | 
 | 1430 | 		if (USB_ENDPOINT_XFER_ISOC != qh->type) { | 
 | 1431 | 			/* NOTE this code path would be a good place to PAUSE a | 
 | 1432 | 			 * transfer, if there's some other (nonperiodic) rx urb | 
 | 1433 | 			 * that could use this fifo.  (dma complicates it...) | 
 | 1434 | 			 * | 
 | 1435 | 			 * if (bulk && qh->ring.next != &musb->in_bulk), then | 
 | 1436 | 			 * we have a candidate... NAKing is *NOT* an error | 
 | 1437 | 			 */ | 
 | 1438 | 			DBG(6, "RX end %d NAK timeout\n", epnum); | 
 | 1439 | 			musb_ep_select(mbase, epnum); | 
 | 1440 | 			musb_writew(epio, MUSB_RXCSR, | 
 | 1441 | 					MUSB_RXCSR_H_WZC_BITS | 
 | 1442 | 					| MUSB_RXCSR_H_REQPKT); | 
 | 1443 |  | 
 | 1444 | 			goto finish; | 
 | 1445 | 		} else { | 
 | 1446 | 			DBG(4, "RX end %d ISO data error\n", epnum); | 
 | 1447 | 			/* packet error reported later */ | 
 | 1448 | 			iso_err = true; | 
 | 1449 | 		} | 
 | 1450 | 	} | 
 | 1451 |  | 
 | 1452 | 	/* faults abort the transfer */ | 
 | 1453 | 	if (status) { | 
 | 1454 | 		/* clean up dma and collect transfer count */ | 
 | 1455 | 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 
 | 1456 | 			dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 
 | 1457 | 			(void) musb->dma_controller->channel_abort(dma); | 
 | 1458 | 			xfer_len = dma->actual_len; | 
 | 1459 | 		} | 
 | 1460 | 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | 
 | 1461 | 		musb_writeb(epio, MUSB_RXINTERVAL, 0); | 
 | 1462 | 		done = true; | 
 | 1463 | 		goto finish; | 
 | 1464 | 	} | 
 | 1465 |  | 
 | 1466 | 	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { | 
 | 1467 | 		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ | 
 | 1468 | 		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); | 
 | 1469 | 		goto finish; | 
 | 1470 | 	} | 
 | 1471 |  | 
 | 1472 | 	/* thorough shutdown for now ... given more precise fault handling | 
 | 1473 | 	 * and better queueing support, we might keep a DMA pipeline going | 
 | 1474 | 	 * while processing this irq for earlier completions. | 
 | 1475 | 	 */ | 
 | 1476 |  | 
 | 1477 | 	/* FIXME this is _way_ too much in-line logic for Mentor DMA */ | 
 | 1478 |  | 
 | 1479 | #ifndef CONFIG_USB_INVENTRA_DMA | 
 | 1480 | 	if (rx_csr & MUSB_RXCSR_H_REQPKT)  { | 
 | 1481 | 		/* REVISIT this happened for a while on some short reads... | 
 | 1482 | 		 * the cleanup still needs investigation... looks bad... | 
 | 1483 | 		 * and also duplicates dma cleanup code above ... plus, | 
 | 1484 | 		 * shouldn't this be the "half full" double buffer case? | 
 | 1485 | 		 */ | 
 | 1486 | 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 
 | 1487 | 			dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 
 | 1488 | 			(void) musb->dma_controller->channel_abort(dma); | 
 | 1489 | 			xfer_len = dma->actual_len; | 
 | 1490 | 			done = true; | 
 | 1491 | 		} | 
 | 1492 |  | 
 | 1493 | 		DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | 
 | 1494 | 				xfer_len, dma ? ", dma" : ""); | 
 | 1495 | 		rx_csr &= ~MUSB_RXCSR_H_REQPKT; | 
 | 1496 |  | 
 | 1497 | 		musb_ep_select(mbase, epnum); | 
 | 1498 | 		musb_writew(epio, MUSB_RXCSR, | 
 | 1499 | 				MUSB_RXCSR_H_WZC_BITS | rx_csr); | 
 | 1500 | 	} | 
 | 1501 | #endif | 
 | 1502 | 	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { | 
 | 1503 | 		xfer_len = dma->actual_len; | 
 | 1504 |  | 
 | 1505 | 		val &= ~(MUSB_RXCSR_DMAENAB | 
 | 1506 | 			| MUSB_RXCSR_H_AUTOREQ | 
 | 1507 | 			| MUSB_RXCSR_AUTOCLEAR | 
 | 1508 | 			| MUSB_RXCSR_RXPKTRDY); | 
 | 1509 | 		musb_writew(hw_ep->regs, MUSB_RXCSR, val); | 
 | 1510 |  | 
 | 1511 | #ifdef CONFIG_USB_INVENTRA_DMA | 
| Ajay Kumar Gupta | f82a689 | 2008-10-29 15:10:31 +0200 | [diff] [blame] | 1512 | 		if (usb_pipeisoc(pipe)) { | 
 | 1513 | 			struct usb_iso_packet_descriptor *d; | 
 | 1514 |  | 
 | 1515 | 			d = urb->iso_frame_desc + qh->iso_idx; | 
 | 1516 | 			d->actual_length = xfer_len; | 
 | 1517 |  | 
 | 1518 | 			/* even if there was an error, we did the dma | 
 | 1519 | 			 * for iso_frame_desc->length | 
 | 1520 | 			 */ | 
 | 1521 | 			if (d->status != EILSEQ && d->status != -EOVERFLOW) | 
 | 1522 | 				d->status = 0; | 
 | 1523 |  | 
 | 1524 | 			if (++qh->iso_idx >= urb->number_of_packets) | 
 | 1525 | 				done = true; | 
 | 1526 | 			else | 
 | 1527 | 				done = false; | 
 | 1528 |  | 
 | 1529 | 		} else  { | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1530 | 		/* done if urb buffer is full or short packet is recd */ | 
 | 1531 | 		done = (urb->actual_length + xfer_len >= | 
 | 1532 | 				urb->transfer_buffer_length | 
 | 1533 | 			|| dma->actual_len < qh->maxpacket); | 
| Ajay Kumar Gupta | f82a689 | 2008-10-29 15:10:31 +0200 | [diff] [blame] | 1534 | 		} | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1535 |  | 
 | 1536 | 		/* send IN token for next packet, without AUTOREQ */ | 
 | 1537 | 		if (!done) { | 
 | 1538 | 			val |= MUSB_RXCSR_H_REQPKT; | 
 | 1539 | 			musb_writew(epio, MUSB_RXCSR, | 
 | 1540 | 				MUSB_RXCSR_H_WZC_BITS | val); | 
 | 1541 | 		} | 
 | 1542 |  | 
 | 1543 | 		DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | 
 | 1544 | 			done ? "off" : "reset", | 
 | 1545 | 			musb_readw(epio, MUSB_RXCSR), | 
 | 1546 | 			musb_readw(epio, MUSB_RXCOUNT)); | 
 | 1547 | #else | 
 | 1548 | 		done = true; | 
 | 1549 | #endif | 
 | 1550 | 	} else if (urb->status == -EINPROGRESS) { | 
 | 1551 | 		/* if no errors, be sure a packet is ready for unloading */ | 
 | 1552 | 		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { | 
 | 1553 | 			status = -EPROTO; | 
 | 1554 | 			ERR("Rx interrupt with no errors or packet!\n"); | 
 | 1555 |  | 
 | 1556 | 			/* FIXME this is another "SHOULD NEVER HAPPEN" */ | 
 | 1557 |  | 
 | 1558 | /* SCRUB (RX) */ | 
 | 1559 | 			/* do the proper sequence to abort the transfer */ | 
 | 1560 | 			musb_ep_select(mbase, epnum); | 
 | 1561 | 			val &= ~MUSB_RXCSR_H_REQPKT; | 
 | 1562 | 			musb_writew(epio, MUSB_RXCSR, val); | 
 | 1563 | 			goto finish; | 
 | 1564 | 		} | 
 | 1565 |  | 
 | 1566 | 		/* we are expecting IN packets */ | 
 | 1567 | #ifdef CONFIG_USB_INVENTRA_DMA | 
 | 1568 | 		if (dma) { | 
 | 1569 | 			struct dma_controller	*c; | 
 | 1570 | 			u16			rx_count; | 
| Ajay Kumar Gupta | f82a689 | 2008-10-29 15:10:31 +0200 | [diff] [blame] | 1571 | 			int			ret, length; | 
 | 1572 | 			dma_addr_t		buf; | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1573 |  | 
 | 1574 | 			rx_count = musb_readw(epio, MUSB_RXCOUNT); | 
 | 1575 |  | 
 | 1576 | 			DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", | 
 | 1577 | 					epnum, rx_count, | 
 | 1578 | 					urb->transfer_dma | 
 | 1579 | 						+ urb->actual_length, | 
 | 1580 | 					qh->offset, | 
 | 1581 | 					urb->transfer_buffer_length); | 
 | 1582 |  | 
 | 1583 | 			c = musb->dma_controller; | 
 | 1584 |  | 
| Ajay Kumar Gupta | f82a689 | 2008-10-29 15:10:31 +0200 | [diff] [blame] | 1585 | 			if (usb_pipeisoc(pipe)) { | 
 | 1586 | 				int status = 0; | 
 | 1587 | 				struct usb_iso_packet_descriptor *d; | 
 | 1588 |  | 
 | 1589 | 				d = urb->iso_frame_desc + qh->iso_idx; | 
 | 1590 |  | 
 | 1591 | 				if (iso_err) { | 
 | 1592 | 					status = -EILSEQ; | 
 | 1593 | 					urb->error_count++; | 
 | 1594 | 				} | 
 | 1595 | 				if (rx_count > d->length) { | 
 | 1596 | 					if (status == 0) { | 
 | 1597 | 						status = -EOVERFLOW; | 
 | 1598 | 						urb->error_count++; | 
 | 1599 | 					} | 
 | 1600 | 					DBG(2, "** OVERFLOW %d into %d\n",\ | 
 | 1601 | 					    rx_count, d->length); | 
 | 1602 |  | 
 | 1603 | 					length = d->length; | 
 | 1604 | 				} else | 
 | 1605 | 					length = rx_count; | 
 | 1606 | 				d->status = status; | 
 | 1607 | 				buf = urb->transfer_dma + d->offset; | 
 | 1608 | 			} else { | 
 | 1609 | 				length = rx_count; | 
 | 1610 | 				buf = urb->transfer_dma + | 
 | 1611 | 						urb->actual_length; | 
 | 1612 | 			} | 
 | 1613 |  | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1614 | 			dma->desired_mode = 0; | 
 | 1615 | #ifdef USE_MODE1 | 
 | 1616 | 			/* because of the issue below, mode 1 will | 
 | 1617 | 			 * only rarely behave with correct semantics. | 
 | 1618 | 			 */ | 
 | 1619 | 			if ((urb->transfer_flags & | 
 | 1620 | 						URB_SHORT_NOT_OK) | 
 | 1621 | 				&& (urb->transfer_buffer_length - | 
 | 1622 | 						urb->actual_length) | 
 | 1623 | 					> qh->maxpacket) | 
 | 1624 | 				dma->desired_mode = 1; | 
| Ajay Kumar Gupta | f82a689 | 2008-10-29 15:10:31 +0200 | [diff] [blame] | 1625 | 			if (rx_count < hw_ep->max_packet_sz_rx) { | 
 | 1626 | 				length = rx_count; | 
 | 1627 | 				dma->bDesiredMode = 0; | 
 | 1628 | 			} else { | 
 | 1629 | 				length = urb->transfer_buffer_length; | 
 | 1630 | 			} | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1631 | #endif | 
 | 1632 |  | 
 | 1633 | /* Disadvantage of using mode 1: | 
 | 1634 |  *	It's basically usable only for mass storage class; essentially all | 
 | 1635 |  *	other protocols also terminate transfers on short packets. | 
 | 1636 |  * | 
 | 1637 |  * Details: | 
 | 1638 |  *	An extra IN token is sent at the end of the transfer (due to AUTOREQ) | 
 | 1639 |  *	If you try to use mode 1 for (transfer_buffer_length - 512), and try | 
 | 1640 |  *	to use the extra IN token to grab the last packet using mode 0, then | 
 | 1641 |  *	the problem is that you cannot be sure when the device will send the | 
 | 1642 |  *	last packet and RxPktRdy set. Sometimes the packet is recd too soon | 
 | 1643 |  *	such that it gets lost when RxCSR is re-set at the end of the mode 1 | 
 | 1644 |  *	transfer, while sometimes it is recd just a little late so that if you | 
 | 1645 |  *	try to configure for mode 0 soon after the mode 1 transfer is | 
 | 1646 |  *	completed, you will find rxcount 0. Okay, so you might think why not | 
 | 1647 |  *	wait for an interrupt when the pkt is recd. Well, you won't get any! | 
 | 1648 |  */ | 
 | 1649 |  | 
 | 1650 | 			val = musb_readw(epio, MUSB_RXCSR); | 
 | 1651 | 			val &= ~MUSB_RXCSR_H_REQPKT; | 
 | 1652 |  | 
 | 1653 | 			if (dma->desired_mode == 0) | 
 | 1654 | 				val &= ~MUSB_RXCSR_H_AUTOREQ; | 
 | 1655 | 			else | 
 | 1656 | 				val |= MUSB_RXCSR_H_AUTOREQ; | 
 | 1657 | 			val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; | 
 | 1658 |  | 
 | 1659 | 			musb_writew(epio, MUSB_RXCSR, | 
 | 1660 | 				MUSB_RXCSR_H_WZC_BITS | val); | 
 | 1661 |  | 
 | 1662 | 			/* REVISIT if when actual_length != 0, | 
 | 1663 | 			 * transfer_buffer_length needs to be | 
 | 1664 | 			 * adjusted first... | 
 | 1665 | 			 */ | 
 | 1666 | 			ret = c->channel_program( | 
 | 1667 | 				dma, qh->maxpacket, | 
| Ajay Kumar Gupta | f82a689 | 2008-10-29 15:10:31 +0200 | [diff] [blame] | 1668 | 				dma->desired_mode, buf, length); | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1669 |  | 
 | 1670 | 			if (!ret) { | 
 | 1671 | 				c->channel_release(dma); | 
 | 1672 | 				hw_ep->rx_channel = NULL; | 
 | 1673 | 				dma = NULL; | 
 | 1674 | 				/* REVISIT reset CSR */ | 
 | 1675 | 			} | 
 | 1676 | 		} | 
 | 1677 | #endif	/* Mentor DMA */ | 
 | 1678 |  | 
 | 1679 | 		if (!dma) { | 
 | 1680 | 			done = musb_host_packet_rx(musb, urb, | 
 | 1681 | 					epnum, iso_err); | 
 | 1682 | 			DBG(6, "read %spacket\n", done ? "last " : ""); | 
 | 1683 | 		} | 
 | 1684 | 	} | 
 | 1685 |  | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1686 | finish: | 
 | 1687 | 	urb->actual_length += xfer_len; | 
 | 1688 | 	qh->offset += xfer_len; | 
 | 1689 | 	if (done) { | 
 | 1690 | 		if (urb->status == -EINPROGRESS) | 
 | 1691 | 			urb->status = status; | 
 | 1692 | 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); | 
 | 1693 | 	} | 
 | 1694 | } | 
 | 1695 |  | 
 | 1696 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. | 
 | 1697 |  * the software schedule associates multiple such nodes with a given | 
 | 1698 |  * host side hardware endpoint + direction; scheduling may activate | 
 | 1699 |  * that hardware endpoint. | 
 | 1700 |  */ | 
 | 1701 | static int musb_schedule( | 
 | 1702 | 	struct musb		*musb, | 
 | 1703 | 	struct musb_qh		*qh, | 
 | 1704 | 	int			is_in) | 
 | 1705 | { | 
 | 1706 | 	int			idle; | 
 | 1707 | 	int			best_diff; | 
 | 1708 | 	int			best_end, epnum; | 
 | 1709 | 	struct musb_hw_ep	*hw_ep = NULL; | 
 | 1710 | 	struct list_head	*head = NULL; | 
 | 1711 |  | 
 | 1712 | 	/* use fixed hardware for control and bulk */ | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 1713 | 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1714 | 		head = &musb->control; | 
 | 1715 | 		hw_ep = musb->control_ep; | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1716 | 		goto success; | 
 | 1717 | 	} | 
 | 1718 |  | 
 | 1719 | 	/* else, periodic transfers get muxed to other endpoints */ | 
 | 1720 |  | 
 | 1721 | 	/* FIXME this doesn't consider direction, so it can only | 
 | 1722 | 	 * work for one half of the endpoint hardware, and assumes | 
 | 1723 | 	 * the previous cases handled all non-shared endpoints... | 
 | 1724 | 	 */ | 
 | 1725 |  | 
 | 1726 | 	/* we know this qh hasn't been scheduled, so all we need to do | 
 | 1727 | 	 * is choose which hardware endpoint to put it on ... | 
 | 1728 | 	 * | 
 | 1729 | 	 * REVISIT what we really want here is a regular schedule tree | 
 | 1730 | 	 * like e.g. OHCI uses, but for now musb->periodic is just an | 
 | 1731 | 	 * array of the _single_ logical endpoint associated with a | 
 | 1732 | 	 * given physical one (identity mapping logical->physical). | 
 | 1733 | 	 * | 
 | 1734 | 	 * that simplistic approach makes TT scheduling a lot simpler; | 
 | 1735 | 	 * there is none, and thus none of its complexity... | 
 | 1736 | 	 */ | 
 | 1737 | 	best_diff = 4096; | 
 | 1738 | 	best_end = -1; | 
 | 1739 |  | 
 | 1740 | 	for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { | 
 | 1741 | 		int	diff; | 
 | 1742 |  | 
 | 1743 | 		if (musb->periodic[epnum]) | 
 | 1744 | 			continue; | 
 | 1745 | 		hw_ep = &musb->endpoints[epnum]; | 
 | 1746 | 		if (hw_ep == musb->bulk_ep) | 
 | 1747 | 			continue; | 
 | 1748 |  | 
 | 1749 | 		if (is_in) | 
 | 1750 | 			diff = hw_ep->max_packet_sz_rx - qh->maxpacket; | 
 | 1751 | 		else | 
 | 1752 | 			diff = hw_ep->max_packet_sz_tx - qh->maxpacket; | 
 | 1753 |  | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 1754 | 		if (diff >= 0 && best_diff > diff) { | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1755 | 			best_diff = diff; | 
 | 1756 | 			best_end = epnum; | 
 | 1757 | 		} | 
 | 1758 | 	} | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 1759 | 	/* use bulk reserved ep1 if no other ep is free */ | 
 | 1760 | 	if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) { | 
 | 1761 | 		hw_ep = musb->bulk_ep; | 
 | 1762 | 		if (is_in) | 
 | 1763 | 			head = &musb->in_bulk; | 
 | 1764 | 		else | 
 | 1765 | 			head = &musb->out_bulk; | 
 | 1766 | 		goto success; | 
 | 1767 | 	} else if (best_end < 0) { | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1768 | 		return -ENOSPC; | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 1769 | 	} | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1770 |  | 
 | 1771 | 	idle = 1; | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 1772 | 	qh->mux = 0; | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1773 | 	hw_ep = musb->endpoints + best_end; | 
 | 1774 | 	musb->periodic[best_end] = qh; | 
 | 1775 | 	DBG(4, "qh %p periodic slot %d\n", qh, best_end); | 
 | 1776 | success: | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 1777 | 	if (head) { | 
 | 1778 | 		idle = list_empty(head); | 
 | 1779 | 		list_add_tail(&qh->ring, head); | 
 | 1780 | 		qh->mux = 1; | 
 | 1781 | 	} | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1782 | 	qh->hw_ep = hw_ep; | 
 | 1783 | 	qh->hep->hcpriv = qh; | 
 | 1784 | 	if (idle) | 
 | 1785 | 		musb_start_urb(musb, is_in, qh); | 
 | 1786 | 	return 0; | 
 | 1787 | } | 
 | 1788 |  | 
 | 1789 | static int musb_urb_enqueue( | 
 | 1790 | 	struct usb_hcd			*hcd, | 
 | 1791 | 	struct urb			*urb, | 
 | 1792 | 	gfp_t				mem_flags) | 
 | 1793 | { | 
 | 1794 | 	unsigned long			flags; | 
 | 1795 | 	struct musb			*musb = hcd_to_musb(hcd); | 
 | 1796 | 	struct usb_host_endpoint	*hep = urb->ep; | 
 | 1797 | 	struct musb_qh			*qh = hep->hcpriv; | 
 | 1798 | 	struct usb_endpoint_descriptor	*epd = &hep->desc; | 
 | 1799 | 	int				ret; | 
 | 1800 | 	unsigned			type_reg; | 
 | 1801 | 	unsigned			interval; | 
 | 1802 |  | 
 | 1803 | 	/* host role must be active */ | 
 | 1804 | 	if (!is_host_active(musb) || !musb->is_active) | 
 | 1805 | 		return -ENODEV; | 
 | 1806 |  | 
 | 1807 | 	spin_lock_irqsave(&musb->lock, flags); | 
 | 1808 | 	ret = usb_hcd_link_urb_to_ep(hcd, urb); | 
 | 1809 | 	spin_unlock_irqrestore(&musb->lock, flags); | 
 | 1810 | 	if (ret) | 
 | 1811 | 		return ret; | 
 | 1812 |  | 
 | 1813 | 	/* DMA mapping was already done, if needed, and this urb is on | 
 | 1814 | 	 * hep->urb_list ... so there's little to do unless hep wasn't | 
 | 1815 | 	 * yet scheduled onto a live qh. | 
 | 1816 | 	 * | 
 | 1817 | 	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets | 
 | 1818 | 	 * disabled, testing for empty qh->ring and avoiding qh setup costs | 
 | 1819 | 	 * except for the first urb queued after a config change. | 
 | 1820 | 	 */ | 
 | 1821 | 	if (qh) { | 
 | 1822 | 		urb->hcpriv = qh; | 
 | 1823 | 		return 0; | 
 | 1824 | 	} | 
 | 1825 |  | 
 | 1826 | 	/* Allocate and initialize qh, minimizing the work done each time | 
 | 1827 | 	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it. | 
 | 1828 | 	 * | 
 | 1829 | 	 * REVISIT consider a dedicated qh kmem_cache, so it's harder | 
 | 1830 | 	 * for bugs in other kernel code to break this driver... | 
 | 1831 | 	 */ | 
 | 1832 | 	qh = kzalloc(sizeof *qh, mem_flags); | 
 | 1833 | 	if (!qh) { | 
| Ajay Kumar Gupta | 2492e67 | 2008-09-11 11:53:21 +0300 | [diff] [blame] | 1834 | 		spin_lock_irqsave(&musb->lock, flags); | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1835 | 		usb_hcd_unlink_urb_from_ep(hcd, urb); | 
| Ajay Kumar Gupta | 2492e67 | 2008-09-11 11:53:21 +0300 | [diff] [blame] | 1836 | 		spin_unlock_irqrestore(&musb->lock, flags); | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1837 | 		return -ENOMEM; | 
 | 1838 | 	} | 
 | 1839 |  | 
 | 1840 | 	qh->hep = hep; | 
 | 1841 | 	qh->dev = urb->dev; | 
 | 1842 | 	INIT_LIST_HEAD(&qh->ring); | 
 | 1843 | 	qh->is_ready = 1; | 
 | 1844 |  | 
 | 1845 | 	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); | 
 | 1846 |  | 
 | 1847 | 	/* no high bandwidth support yet */ | 
 | 1848 | 	if (qh->maxpacket & ~0x7ff) { | 
 | 1849 | 		ret = -EMSGSIZE; | 
 | 1850 | 		goto done; | 
 | 1851 | 	} | 
 | 1852 |  | 
 | 1853 | 	qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 
 | 1854 | 	qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; | 
 | 1855 |  | 
 | 1856 | 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | 
 | 1857 | 	qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | 
 | 1858 |  | 
 | 1859 | 	/* precompute rxtype/txtype/type0 register */ | 
 | 1860 | 	type_reg = (qh->type << 4) | qh->epnum; | 
 | 1861 | 	switch (urb->dev->speed) { | 
 | 1862 | 	case USB_SPEED_LOW: | 
 | 1863 | 		type_reg |= 0xc0; | 
 | 1864 | 		break; | 
 | 1865 | 	case USB_SPEED_FULL: | 
 | 1866 | 		type_reg |= 0x80; | 
 | 1867 | 		break; | 
 | 1868 | 	default: | 
 | 1869 | 		type_reg |= 0x40; | 
 | 1870 | 	} | 
 | 1871 | 	qh->type_reg = type_reg; | 
 | 1872 |  | 
 | 1873 | 	/* precompute rxinterval/txinterval register */ | 
 | 1874 | 	interval = min((u8)16, epd->bInterval);	/* log encoding */ | 
 | 1875 | 	switch (qh->type) { | 
 | 1876 | 	case USB_ENDPOINT_XFER_INT: | 
 | 1877 | 		/* fullspeed uses linear encoding */ | 
 | 1878 | 		if (USB_SPEED_FULL == urb->dev->speed) { | 
 | 1879 | 			interval = epd->bInterval; | 
 | 1880 | 			if (!interval) | 
 | 1881 | 				interval = 1; | 
 | 1882 | 		} | 
 | 1883 | 		/* FALLTHROUGH */ | 
 | 1884 | 	case USB_ENDPOINT_XFER_ISOC: | 
 | 1885 | 		/* iso always uses log encoding */ | 
 | 1886 | 		break; | 
 | 1887 | 	default: | 
 | 1888 | 		/* REVISIT we actually want to use NAK limits, hinting to the | 
 | 1889 | 		 * transfer scheduling logic to try some other qh, e.g. try | 
 | 1890 | 		 * for 2 msec first: | 
 | 1891 | 		 * | 
 | 1892 | 		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; | 
 | 1893 | 		 * | 
 | 1894 | 		 * The downside of disabling this is that transfer scheduling | 
 | 1895 | 		 * gets VERY unfair for nonperiodic transfers; a misbehaving | 
 | 1896 | 		 * peripheral could make that hurt.  Or for reads, one that's | 
 | 1897 | 		 * perfectly normal:  network and other drivers keep reads | 
 | 1898 | 		 * posted at all times, having one pending for a week should | 
 | 1899 | 		 * be perfectly safe. | 
 | 1900 | 		 * | 
 | 1901 | 		 * The upside of disabling it is avoidng transfer scheduling | 
 | 1902 | 		 * code to put this aside for while. | 
 | 1903 | 		 */ | 
 | 1904 | 		interval = 0; | 
 | 1905 | 	} | 
 | 1906 | 	qh->intv_reg = interval; | 
 | 1907 |  | 
 | 1908 | 	/* precompute addressing for external hub/tt ports */ | 
 | 1909 | 	if (musb->is_multipoint) { | 
 | 1910 | 		struct usb_device	*parent = urb->dev->parent; | 
 | 1911 |  | 
 | 1912 | 		if (parent != hcd->self.root_hub) { | 
 | 1913 | 			qh->h_addr_reg = (u8) parent->devnum; | 
 | 1914 |  | 
 | 1915 | 			/* set up tt info if needed */ | 
 | 1916 | 			if (urb->dev->tt) { | 
 | 1917 | 				qh->h_port_reg = (u8) urb->dev->ttport; | 
| Ajay Kumar Gupta | ae5ad29 | 2008-09-11 11:53:20 +0300 | [diff] [blame] | 1918 | 				if (urb->dev->tt->hub) | 
 | 1919 | 					qh->h_addr_reg = | 
 | 1920 | 						(u8) urb->dev->tt->hub->devnum; | 
 | 1921 | 				if (urb->dev->tt->multi) | 
 | 1922 | 					qh->h_addr_reg |= 0x80; | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1923 | 			} | 
 | 1924 | 		} | 
 | 1925 | 	} | 
 | 1926 |  | 
 | 1927 | 	/* invariant: hep->hcpriv is null OR the qh that's already scheduled. | 
 | 1928 | 	 * until we get real dma queues (with an entry for each urb/buffer), | 
 | 1929 | 	 * we only have work to do in the former case. | 
 | 1930 | 	 */ | 
 | 1931 | 	spin_lock_irqsave(&musb->lock, flags); | 
 | 1932 | 	if (hep->hcpriv) { | 
 | 1933 | 		/* some concurrent activity submitted another urb to hep... | 
 | 1934 | 		 * odd, rare, error prone, but legal. | 
 | 1935 | 		 */ | 
 | 1936 | 		kfree(qh); | 
 | 1937 | 		ret = 0; | 
 | 1938 | 	} else | 
 | 1939 | 		ret = musb_schedule(musb, qh, | 
 | 1940 | 				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); | 
 | 1941 |  | 
 | 1942 | 	if (ret == 0) { | 
 | 1943 | 		urb->hcpriv = qh; | 
 | 1944 | 		/* FIXME set urb->start_frame for iso/intr, it's tested in | 
 | 1945 | 		 * musb_start_urb(), but otherwise only konicawc cares ... | 
 | 1946 | 		 */ | 
 | 1947 | 	} | 
 | 1948 | 	spin_unlock_irqrestore(&musb->lock, flags); | 
 | 1949 |  | 
 | 1950 | done: | 
 | 1951 | 	if (ret != 0) { | 
| Ajay Kumar Gupta | 2492e67 | 2008-09-11 11:53:21 +0300 | [diff] [blame] | 1952 | 		spin_lock_irqsave(&musb->lock, flags); | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1953 | 		usb_hcd_unlink_urb_from_ep(hcd, urb); | 
| Ajay Kumar Gupta | 2492e67 | 2008-09-11 11:53:21 +0300 | [diff] [blame] | 1954 | 		spin_unlock_irqrestore(&musb->lock, flags); | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1955 | 		kfree(qh); | 
 | 1956 | 	} | 
 | 1957 | 	return ret; | 
 | 1958 | } | 
 | 1959 |  | 
 | 1960 |  | 
 | 1961 | /* | 
 | 1962 |  * abort a transfer that's at the head of a hardware queue. | 
 | 1963 |  * called with controller locked, irqs blocked | 
 | 1964 |  * that hardware queue advances to the next transfer, unless prevented | 
 | 1965 |  */ | 
 | 1966 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | 
 | 1967 | { | 
 | 1968 | 	struct musb_hw_ep	*ep = qh->hw_ep; | 
 | 1969 | 	void __iomem		*epio = ep->regs; | 
 | 1970 | 	unsigned		hw_end = ep->epnum; | 
 | 1971 | 	void __iomem		*regs = ep->musb->mregs; | 
 | 1972 | 	u16			csr; | 
 | 1973 | 	int			status = 0; | 
 | 1974 |  | 
 | 1975 | 	musb_ep_select(regs, hw_end); | 
 | 1976 |  | 
 | 1977 | 	if (is_dma_capable()) { | 
 | 1978 | 		struct dma_channel	*dma; | 
 | 1979 |  | 
 | 1980 | 		dma = is_in ? ep->rx_channel : ep->tx_channel; | 
 | 1981 | 		if (dma) { | 
 | 1982 | 			status = ep->musb->dma_controller->channel_abort(dma); | 
 | 1983 | 			DBG(status ? 1 : 3, | 
 | 1984 | 				"abort %cX%d DMA for urb %p --> %d\n", | 
 | 1985 | 				is_in ? 'R' : 'T', ep->epnum, | 
 | 1986 | 				urb, status); | 
 | 1987 | 			urb->actual_length += dma->actual_len; | 
 | 1988 | 		} | 
 | 1989 | 	} | 
 | 1990 |  | 
 | 1991 | 	/* turn off DMA requests, discard state, stop polling ... */ | 
 | 1992 | 	if (is_in) { | 
 | 1993 | 		/* giveback saves bulk toggle */ | 
 | 1994 | 		csr = musb_h_flush_rxfifo(ep, 0); | 
 | 1995 |  | 
 | 1996 | 		/* REVISIT we still get an irq; should likely clear the | 
 | 1997 | 		 * endpoint's irq status here to avoid bogus irqs. | 
 | 1998 | 		 * clearing that status is platform-specific... | 
 | 1999 | 		 */ | 
 | 2000 | 	} else { | 
 | 2001 | 		musb_h_tx_flush_fifo(ep); | 
 | 2002 | 		csr = musb_readw(epio, MUSB_TXCSR); | 
 | 2003 | 		csr &= ~(MUSB_TXCSR_AUTOSET | 
 | 2004 | 			| MUSB_TXCSR_DMAENAB | 
 | 2005 | 			| MUSB_TXCSR_H_RXSTALL | 
 | 2006 | 			| MUSB_TXCSR_H_NAKTIMEOUT | 
 | 2007 | 			| MUSB_TXCSR_H_ERROR | 
 | 2008 | 			| MUSB_TXCSR_TXPKTRDY); | 
 | 2009 | 		musb_writew(epio, MUSB_TXCSR, csr); | 
 | 2010 | 		/* REVISIT may need to clear FLUSHFIFO ... */ | 
 | 2011 | 		musb_writew(epio, MUSB_TXCSR, csr); | 
 | 2012 | 		/* flush cpu writebuffer */ | 
 | 2013 | 		csr = musb_readw(epio, MUSB_TXCSR); | 
 | 2014 | 	} | 
 | 2015 | 	if (status == 0) | 
 | 2016 | 		musb_advance_schedule(ep->musb, urb, ep, is_in); | 
 | 2017 | 	return status; | 
 | 2018 | } | 
 | 2019 |  | 
 | 2020 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | 
 | 2021 | { | 
 | 2022 | 	struct musb		*musb = hcd_to_musb(hcd); | 
 | 2023 | 	struct musb_qh		*qh; | 
 | 2024 | 	struct list_head	*sched; | 
 | 2025 | 	unsigned long		flags; | 
 | 2026 | 	int			ret; | 
 | 2027 |  | 
 | 2028 | 	DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | 
 | 2029 | 			usb_pipedevice(urb->pipe), | 
 | 2030 | 			usb_pipeendpoint(urb->pipe), | 
 | 2031 | 			usb_pipein(urb->pipe) ? "in" : "out"); | 
 | 2032 |  | 
 | 2033 | 	spin_lock_irqsave(&musb->lock, flags); | 
 | 2034 | 	ret = usb_hcd_check_unlink_urb(hcd, urb, status); | 
 | 2035 | 	if (ret) | 
 | 2036 | 		goto done; | 
 | 2037 |  | 
 | 2038 | 	qh = urb->hcpriv; | 
 | 2039 | 	if (!qh) | 
 | 2040 | 		goto done; | 
 | 2041 |  | 
 | 2042 | 	/* Any URB not actively programmed into endpoint hardware can be | 
 | 2043 | 	 * immediately given back.  Such an URB must be at the head of its | 
 | 2044 | 	 * endpoint queue, unless someday we get real DMA queues.  And even | 
 | 2045 | 	 * then, it might not be known to the hardware... | 
 | 2046 | 	 * | 
 | 2047 | 	 * Otherwise abort current transfer, pending dma, etc.; urb->status | 
 | 2048 | 	 * has already been updated.  This is a synchronous abort; it'd be | 
 | 2049 | 	 * OK to hold off until after some IRQ, though. | 
 | 2050 | 	 */ | 
 | 2051 | 	if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) | 
 | 2052 | 		ret = -EINPROGRESS; | 
 | 2053 | 	else { | 
 | 2054 | 		switch (qh->type) { | 
 | 2055 | 		case USB_ENDPOINT_XFER_CONTROL: | 
 | 2056 | 			sched = &musb->control; | 
 | 2057 | 			break; | 
 | 2058 | 		case USB_ENDPOINT_XFER_BULK: | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 2059 | 			if (qh->mux == 1) { | 
 | 2060 | 				if (usb_pipein(urb->pipe)) | 
 | 2061 | 					sched = &musb->in_bulk; | 
 | 2062 | 				else | 
 | 2063 | 					sched = &musb->out_bulk; | 
 | 2064 | 				break; | 
 | 2065 | 			} | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 2066 | 		default: | 
 | 2067 | 			/* REVISIT when we get a schedule tree, periodic | 
 | 2068 | 			 * transfers won't always be at the head of a | 
 | 2069 | 			 * singleton queue... | 
 | 2070 | 			 */ | 
 | 2071 | 			sched = NULL; | 
 | 2072 | 			break; | 
 | 2073 | 		} | 
 | 2074 | 	} | 
 | 2075 |  | 
 | 2076 | 	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */ | 
 | 2077 | 	if (ret < 0 || (sched && qh != first_qh(sched))) { | 
 | 2078 | 		int	ready = qh->is_ready; | 
 | 2079 |  | 
 | 2080 | 		ret = 0; | 
 | 2081 | 		qh->is_ready = 0; | 
 | 2082 | 		__musb_giveback(musb, urb, 0); | 
 | 2083 | 		qh->is_ready = ready; | 
 | 2084 | 	} else | 
 | 2085 | 		ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | 
 | 2086 | done: | 
 | 2087 | 	spin_unlock_irqrestore(&musb->lock, flags); | 
 | 2088 | 	return ret; | 
 | 2089 | } | 
 | 2090 |  | 
 | 2091 | /* disable an endpoint */ | 
 | 2092 | static void | 
 | 2093 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | 
 | 2094 | { | 
 | 2095 | 	u8			epnum = hep->desc.bEndpointAddress; | 
 | 2096 | 	unsigned long		flags; | 
 | 2097 | 	struct musb		*musb = hcd_to_musb(hcd); | 
 | 2098 | 	u8			is_in = epnum & USB_DIR_IN; | 
 | 2099 | 	struct musb_qh		*qh = hep->hcpriv; | 
 | 2100 | 	struct urb		*urb, *tmp; | 
 | 2101 | 	struct list_head	*sched; | 
 | 2102 |  | 
 | 2103 | 	if (!qh) | 
 | 2104 | 		return; | 
 | 2105 |  | 
 | 2106 | 	spin_lock_irqsave(&musb->lock, flags); | 
 | 2107 |  | 
 | 2108 | 	switch (qh->type) { | 
 | 2109 | 	case USB_ENDPOINT_XFER_CONTROL: | 
 | 2110 | 		sched = &musb->control; | 
 | 2111 | 		break; | 
 | 2112 | 	case USB_ENDPOINT_XFER_BULK: | 
| Ajay Kumar Gupta | 23d15e0 | 2008-10-29 15:10:35 +0200 | [diff] [blame] | 2113 | 		if (qh->mux == 1) { | 
 | 2114 | 			if (is_in) | 
 | 2115 | 				sched = &musb->in_bulk; | 
 | 2116 | 			else | 
 | 2117 | 				sched = &musb->out_bulk; | 
 | 2118 | 			break; | 
 | 2119 | 		} | 
| Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 2120 | 	default: | 
 | 2121 | 		/* REVISIT when we get a schedule tree, periodic transfers | 
 | 2122 | 		 * won't always be at the head of a singleton queue... | 
 | 2123 | 		 */ | 
 | 2124 | 		sched = NULL; | 
 | 2125 | 		break; | 
 | 2126 | 	} | 
 | 2127 |  | 
 | 2128 | 	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */ | 
 | 2129 |  | 
 | 2130 | 	/* kick first urb off the hardware, if needed */ | 
 | 2131 | 	qh->is_ready = 0; | 
 | 2132 | 	if (!sched || qh == first_qh(sched)) { | 
 | 2133 | 		urb = next_urb(qh); | 
 | 2134 |  | 
 | 2135 | 		/* make software (then hardware) stop ASAP */ | 
 | 2136 | 		if (!urb->unlinked) | 
 | 2137 | 			urb->status = -ESHUTDOWN; | 
 | 2138 |  | 
 | 2139 | 		/* cleanup */ | 
 | 2140 | 		musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | 
 | 2141 | 	} else | 
 | 2142 | 		urb = NULL; | 
 | 2143 |  | 
 | 2144 | 	/* then just nuke all the others */ | 
 | 2145 | 	list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) | 
 | 2146 | 		musb_giveback(qh, urb, -ESHUTDOWN); | 
 | 2147 |  | 
 | 2148 | 	spin_unlock_irqrestore(&musb->lock, flags); | 
 | 2149 | } | 
 | 2150 |  | 
 | 2151 | static int musb_h_get_frame_number(struct usb_hcd *hcd) | 
 | 2152 | { | 
 | 2153 | 	struct musb	*musb = hcd_to_musb(hcd); | 
 | 2154 |  | 
 | 2155 | 	return musb_readw(musb->mregs, MUSB_FRAME); | 
 | 2156 | } | 
 | 2157 |  | 
 | 2158 | static int musb_h_start(struct usb_hcd *hcd) | 
 | 2159 | { | 
 | 2160 | 	struct musb	*musb = hcd_to_musb(hcd); | 
 | 2161 |  | 
 | 2162 | 	/* NOTE: musb_start() is called when the hub driver turns | 
 | 2163 | 	 * on port power, or when (OTG) peripheral starts. | 
 | 2164 | 	 */ | 
 | 2165 | 	hcd->state = HC_STATE_RUNNING; | 
 | 2166 | 	musb->port1_status = 0; | 
 | 2167 | 	return 0; | 
 | 2168 | } | 
 | 2169 |  | 
 | 2170 | static void musb_h_stop(struct usb_hcd *hcd) | 
 | 2171 | { | 
 | 2172 | 	musb_stop(hcd_to_musb(hcd)); | 
 | 2173 | 	hcd->state = HC_STATE_HALT; | 
 | 2174 | } | 
 | 2175 |  | 
 | 2176 | static int musb_bus_suspend(struct usb_hcd *hcd) | 
 | 2177 | { | 
 | 2178 | 	struct musb	*musb = hcd_to_musb(hcd); | 
 | 2179 |  | 
 | 2180 | 	if (musb->xceiv.state == OTG_STATE_A_SUSPEND) | 
 | 2181 | 		return 0; | 
 | 2182 |  | 
 | 2183 | 	if (is_host_active(musb) && musb->is_active) { | 
 | 2184 | 		WARNING("trying to suspend as %s is_active=%i\n", | 
 | 2185 | 			otg_state_string(musb), musb->is_active); | 
 | 2186 | 		return -EBUSY; | 
 | 2187 | 	} else | 
 | 2188 | 		return 0; | 
 | 2189 | } | 
 | 2190 |  | 
 | 2191 | static int musb_bus_resume(struct usb_hcd *hcd) | 
 | 2192 | { | 
 | 2193 | 	/* resuming child port does the work */ | 
 | 2194 | 	return 0; | 
 | 2195 | } | 
 | 2196 |  | 
 | 2197 | const struct hc_driver musb_hc_driver = { | 
 | 2198 | 	.description		= "musb-hcd", | 
 | 2199 | 	.product_desc		= "MUSB HDRC host driver", | 
 | 2200 | 	.hcd_priv_size		= sizeof(struct musb), | 
 | 2201 | 	.flags			= HCD_USB2 | HCD_MEMORY, | 
 | 2202 |  | 
 | 2203 | 	/* not using irq handler or reset hooks from usbcore, since | 
 | 2204 | 	 * those must be shared with peripheral code for OTG configs | 
 | 2205 | 	 */ | 
 | 2206 |  | 
 | 2207 | 	.start			= musb_h_start, | 
 | 2208 | 	.stop			= musb_h_stop, | 
 | 2209 |  | 
 | 2210 | 	.get_frame_number	= musb_h_get_frame_number, | 
 | 2211 |  | 
 | 2212 | 	.urb_enqueue		= musb_urb_enqueue, | 
 | 2213 | 	.urb_dequeue		= musb_urb_dequeue, | 
 | 2214 | 	.endpoint_disable	= musb_h_disable, | 
 | 2215 |  | 
 | 2216 | 	.hub_status_data	= musb_hub_status_data, | 
 | 2217 | 	.hub_control		= musb_hub_control, | 
 | 2218 | 	.bus_suspend		= musb_bus_suspend, | 
 | 2219 | 	.bus_resume		= musb_bus_resume, | 
 | 2220 | 	/* .start_port_reset	= NULL, */ | 
 | 2221 | 	/* .hub_irq_enable	= NULL, */ | 
 | 2222 | }; |