blob: b6b84dacc7917070fb20503243ac51098c927631 [file] [log] [blame]
Felipe Balbi550a7372008-07-24 12:27:36 +03001/*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
Sergei Shtylyovcea83242009-11-18 22:51:18 +03007 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
Felipe Balbi550a7372008-07-24 12:27:36 +03008 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/list.h>
38#include <linux/timer.h>
39#include <linux/module.h>
40#include <linux/smp.h>
41#include <linux/spinlock.h>
42#include <linux/delay.h>
Felipe Balbi550a7372008-07-24 12:27:36 +030043#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Felipe Balbi550a7372008-07-24 12:27:36 +030045
46#include "musb_core.h"
47
48
49/* MUSB PERIPHERAL status 3-mar-2006:
50 *
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
52 * Minor glitches:
53 *
54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly
58 * clearing SENDSTALL?
59 *
60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not
63 * required.
64 *
65 * - TX/IN
66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost?
71 *
72 * - RX/OUT
73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs
78 *
79 * - ISO not tested ... might work, but only weakly isochronous
80 *
81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.)
84 *
85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works
89 */
90
91/* ----------------------------------------------------------------------- */
92
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +010093#define is_buffer_mapped(req) (is_dma_capable() && \
94 (req->map_state != UN_MAPPED))
95
Hema Kalliguddi92d27112010-11-15 04:24:01 -060096/* Maps the buffer to dma */
97
98static inline void map_dma_buffer(struct musb_request *request,
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +010099 struct musb *musb, struct musb_ep *musb_ep)
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600100{
Mian Yousaf Kaukab5f5761c2011-01-04 12:47:03 +0100101 int compatible = true;
102 struct dma_controller *dma = musb->dma_controller;
103
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100104 request->map_state = UN_MAPPED;
105
106 if (!is_dma_capable() || !musb_ep->dma)
107 return;
108
Mian Yousaf Kaukab5f5761c2011-01-04 12:47:03 +0100109 /* Check if DMA engine can handle this request.
110 * DMA code must reject the USB request explicitly.
111 * Default behaviour is to map the request.
112 */
113 if (dma->is_compatible)
114 compatible = dma->is_compatible(musb_ep->dma,
115 musb_ep->packet_sz, request->request.buf,
116 request->request.length);
117 if (!compatible)
118 return;
119
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600120 if (request->request.dma == DMA_ADDR_INVALID) {
121 request->request.dma = dma_map_single(
122 musb->controller,
123 request->request.buf,
124 request->request.length,
125 request->tx
126 ? DMA_TO_DEVICE
127 : DMA_FROM_DEVICE);
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100128 request->map_state = MUSB_MAPPED;
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600129 } else {
130 dma_sync_single_for_device(musb->controller,
131 request->request.dma,
132 request->request.length,
133 request->tx
134 ? DMA_TO_DEVICE
135 : DMA_FROM_DEVICE);
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100136 request->map_state = PRE_MAPPED;
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600137 }
138}
139
140/* Unmap the buffer from dma and maps it back to cpu */
141static inline void unmap_dma_buffer(struct musb_request *request,
142 struct musb *musb)
143{
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100144 if (!is_buffer_mapped(request))
145 return;
146
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600147 if (request->request.dma == DMA_ADDR_INVALID) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300148 dev_vdbg(musb->controller,
149 "not unmapping a never mapped buffer\n");
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600150 return;
151 }
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100152 if (request->map_state == MUSB_MAPPED) {
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600153 dma_unmap_single(musb->controller,
154 request->request.dma,
155 request->request.length,
156 request->tx
157 ? DMA_TO_DEVICE
158 : DMA_FROM_DEVICE);
159 request->request.dma = DMA_ADDR_INVALID;
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100160 } else { /* PRE_MAPPED */
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600161 dma_sync_single_for_cpu(musb->controller,
162 request->request.dma,
163 request->request.length,
164 request->tx
165 ? DMA_TO_DEVICE
166 : DMA_FROM_DEVICE);
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600167 }
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100168 request->map_state = UN_MAPPED;
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600169}
170
Felipe Balbi550a7372008-07-24 12:27:36 +0300171/*
172 * Immediately complete a request.
173 *
174 * @param request the request to complete
175 * @param status the status to complete the request with
176 * Context: controller locked, IRQs blocked.
177 */
178void musb_g_giveback(
179 struct musb_ep *ep,
180 struct usb_request *request,
181 int status)
182__releases(ep->musb->lock)
183__acquires(ep->musb->lock)
184{
185 struct musb_request *req;
186 struct musb *musb;
187 int busy = ep->busy;
188
189 req = to_musb_request(request);
190
Felipe Balbiad1adb82011-02-16 12:40:05 +0200191 list_del(&req->list);
Felipe Balbi550a7372008-07-24 12:27:36 +0300192 if (req->request.status == -EINPROGRESS)
193 req->request.status = status;
194 musb = req->musb;
195
196 ep->busy = 1;
197 spin_unlock(&musb->lock);
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100198 unmap_dma_buffer(req, musb);
Felipe Balbi550a7372008-07-24 12:27:36 +0300199 if (request->status == 0)
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300200 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300201 ep->end_point.name, request,
202 req->request.actual, req->request.length);
203 else
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300204 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300205 ep->end_point.name, request,
206 req->request.actual, req->request.length,
207 request->status);
208 req->request.complete(&req->ep->end_point, &req->request);
209 spin_lock(&musb->lock);
210 ep->busy = busy;
211}
212
213/* ----------------------------------------------------------------------- */
214
215/*
216 * Abort requests queued to an endpoint using the status. Synchronous.
217 * caller locked controller and blocked irqs, and selected this ep.
218 */
219static void nuke(struct musb_ep *ep, const int status)
220{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300221 struct musb *musb = ep->musb;
Felipe Balbi550a7372008-07-24 12:27:36 +0300222 struct musb_request *req = NULL;
223 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
224
225 ep->busy = 1;
226
227 if (is_dma_capable() && ep->dma) {
228 struct dma_controller *c = ep->musb->dma_controller;
229 int value;
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700230
Felipe Balbi550a7372008-07-24 12:27:36 +0300231 if (ep->is_in) {
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700232 /*
233 * The programming guide says that we must not clear
234 * the DMAMODE bit before DMAENAB, so we only
235 * clear it in the second write...
236 */
Felipe Balbi550a7372008-07-24 12:27:36 +0300237 musb_writew(epio, MUSB_TXCSR,
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700238 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
Felipe Balbi550a7372008-07-24 12:27:36 +0300239 musb_writew(epio, MUSB_TXCSR,
240 0 | MUSB_TXCSR_FLUSHFIFO);
241 } else {
242 musb_writew(epio, MUSB_RXCSR,
243 0 | MUSB_RXCSR_FLUSHFIFO);
244 musb_writew(epio, MUSB_RXCSR,
245 0 | MUSB_RXCSR_FLUSHFIFO);
246 }
247
248 value = c->channel_abort(ep->dma);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300249 dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
250 ep->name, value);
Felipe Balbi550a7372008-07-24 12:27:36 +0300251 c->channel_release(ep->dma);
252 ep->dma = NULL;
253 }
254
Felipe Balbiad1adb82011-02-16 12:40:05 +0200255 while (!list_empty(&ep->req_list)) {
256 req = list_first_entry(&ep->req_list, struct musb_request, list);
Felipe Balbi550a7372008-07-24 12:27:36 +0300257 musb_g_giveback(ep, &req->request, status);
258 }
259}
260
261/* ----------------------------------------------------------------------- */
262
263/* Data transfers - pure PIO, pure DMA, or mixed mode */
264
265/*
266 * This assumes the separate CPPI engine is responding to DMA requests
267 * from the usb core ... sequenced a bit differently from mentor dma.
268 */
269
270static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
271{
272 if (can_bulk_split(musb, ep->type))
273 return ep->hw_ep->max_packet_sz_tx;
274 else
275 return ep->packet_sz;
276}
277
278
279#ifdef CONFIG_USB_INVENTRA_DMA
280
281/* Peripheral tx (IN) using Mentor DMA works as follows:
282 Only mode 0 is used for transfers <= wPktSize,
283 mode 1 is used for larger transfers,
284
285 One of the following happens:
286 - Host sends IN token which causes an endpoint interrupt
287 -> TxAvail
288 -> if DMA is currently busy, exit.
289 -> if queue is non-empty, txstate().
290
291 - Request is queued by the gadget driver.
292 -> if queue was previously empty, txstate()
293
294 txstate()
295 -> start
296 /\ -> setup DMA
297 | (data is transferred to the FIFO, then sent out when
298 | IN token(s) are recd from Host.
299 | -> DMA interrupt on completion
300 | calls TxAvail.
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700301 | -> stop DMA, ~DMAENAB,
Felipe Balbi550a7372008-07-24 12:27:36 +0300302 | -> set TxPktRdy for last short pkt or zlp
303 | -> Complete Request
304 | -> Continue next request (call txstate)
305 |___________________________________|
306
307 * Non-Mentor DMA engines can of course work differently, such as by
308 * upleveling from irq-per-packet to irq-per-buffer.
309 */
310
311#endif
312
313/*
314 * An endpoint is transmitting data. This can be called either from
315 * the IRQ routine or from ep.queue() to kickstart a request on an
316 * endpoint.
317 *
318 * Context: controller locked, IRQs blocked, endpoint selected
319 */
320static void txstate(struct musb *musb, struct musb_request *req)
321{
322 u8 epnum = req->epnum;
323 struct musb_ep *musb_ep;
324 void __iomem *epio = musb->endpoints[epnum].regs;
325 struct usb_request *request;
326 u16 fifo_count = 0, csr;
327 int use_dma = 0;
328
329 musb_ep = req->ep;
330
Vikram Panditaabf710e2012-05-18 13:48:04 -0700331 /* Check if EP is disabled */
332 if (!musb_ep->desc) {
333 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
334 musb_ep->end_point.name);
335 return;
336 }
337
Felipe Balbi550a7372008-07-24 12:27:36 +0300338 /* we shouldn't get here while DMA is active ... but we do ... */
339 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300340 dev_dbg(musb->controller, "dma pending...\n");
Felipe Balbi550a7372008-07-24 12:27:36 +0300341 return;
342 }
343
344 /* read TXCSR before */
345 csr = musb_readw(epio, MUSB_TXCSR);
346
347 request = &req->request;
348 fifo_count = min(max_ep_writesize(musb, musb_ep),
349 (int)(request->length - request->actual));
350
351 if (csr & MUSB_TXCSR_TXPKTRDY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300352 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300353 musb_ep->end_point.name, csr);
354 return;
355 }
356
357 if (csr & MUSB_TXCSR_P_SENDSTALL) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300358 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300359 musb_ep->end_point.name, csr);
360 return;
361 }
362
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300363 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300364 epnum, musb_ep->packet_sz, fifo_count,
365 csr);
366
367#ifndef CONFIG_MUSB_PIO_ONLY
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100368 if (is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300369 struct dma_controller *c = musb->dma_controller;
Ming Lei66af83d2010-09-20 10:32:06 +0300370 size_t request_size;
371
372 /* setup DMA, then program endpoint CSR */
373 request_size = min_t(size_t, request->length - request->actual,
374 musb_ep->dma->max_len);
Felipe Balbi550a7372008-07-24 12:27:36 +0300375
Ajay Kumar Guptad17d5352012-07-20 11:07:23 +0530376 use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300377
378 /* MUSB_TXCSR_P_ISO is still set correctly */
379
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100380#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
Felipe Balbi550a7372008-07-24 12:27:36 +0300381 {
Anand Gadiyard1043a22009-04-02 12:07:08 -0700382 if (request_size < musb_ep->packet_sz)
Felipe Balbi550a7372008-07-24 12:27:36 +0300383 musb_ep->dma->desired_mode = 0;
384 else
385 musb_ep->dma->desired_mode = 1;
386
387 use_dma = use_dma && c->channel_program(
388 musb_ep->dma, musb_ep->packet_sz,
389 musb_ep->dma->desired_mode,
Cliff Cai796a83f2009-12-21 21:18:02 -0500390 request->dma + request->actual, request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300391 if (use_dma) {
392 if (musb_ep->dma->desired_mode == 0) {
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700393 /*
394 * We must not clear the DMAMODE bit
395 * before the DMAENAB bit -- and the
396 * latter doesn't always get cleared
397 * before we get here...
398 */
399 csr &= ~(MUSB_TXCSR_AUTOSET
400 | MUSB_TXCSR_DMAENAB);
401 musb_writew(epio, MUSB_TXCSR, csr
402 | MUSB_TXCSR_P_WZC_BITS);
403 csr &= ~MUSB_TXCSR_DMAMODE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300404 csr |= (MUSB_TXCSR_DMAENAB |
405 MUSB_TXCSR_MODE);
406 /* against programming guide */
Ming Leif11d8932010-09-24 13:44:04 +0300407 } else {
408 csr |= (MUSB_TXCSR_DMAENAB
Felipe Balbi550a7372008-07-24 12:27:36 +0300409 | MUSB_TXCSR_DMAMODE
410 | MUSB_TXCSR_MODE);
Ming Leif11d8932010-09-24 13:44:04 +0300411 if (!musb_ep->hb_mult)
412 csr |= MUSB_TXCSR_AUTOSET;
413 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300414 csr &= ~MUSB_TXCSR_P_UNDERRUN;
Ming Leif11d8932010-09-24 13:44:04 +0300415
Felipe Balbi550a7372008-07-24 12:27:36 +0300416 musb_writew(epio, MUSB_TXCSR, csr);
417 }
418 }
419
420#elif defined(CONFIG_USB_TI_CPPI_DMA)
421 /* program endpoint CSR first, then setup DMA */
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700422 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
Sergei Shtylyov37e3ee92009-03-27 12:53:32 -0700423 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
424 MUSB_TXCSR_MODE;
Felipe Balbi550a7372008-07-24 12:27:36 +0300425 musb_writew(epio, MUSB_TXCSR,
426 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
427 | csr);
428
429 /* ensure writebuffer is empty */
430 csr = musb_readw(epio, MUSB_TXCSR);
431
432 /* NOTE host side sets DMAENAB later than this; both are
433 * OK since the transfer dma glue (between CPPI and Mentor
434 * fifos) just tells CPPI it could start. Data only moves
435 * to the USB TX fifo when both fifos are ready.
436 */
437
438 /* "mode" is irrelevant here; handle terminating ZLPs like
439 * PIO does, since the hardware RNDIS mode seems unreliable
440 * except for the last-packet-is-already-short case.
441 */
442 use_dma = use_dma && c->channel_program(
443 musb_ep->dma, musb_ep->packet_sz,
444 0,
Ming Lei66af83d2010-09-20 10:32:06 +0300445 request->dma + request->actual,
446 request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300447 if (!use_dma) {
448 c->channel_release(musb_ep->dma);
449 musb_ep->dma = NULL;
Sergei Shtylyovb6e434a2009-03-26 18:27:47 -0700450 csr &= ~MUSB_TXCSR_DMAENAB;
451 musb_writew(epio, MUSB_TXCSR, csr);
Felipe Balbi550a7372008-07-24 12:27:36 +0300452 /* invariant: prequest->buf is non-null */
453 }
454#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
455 use_dma = use_dma && c->channel_program(
456 musb_ep->dma, musb_ep->packet_sz,
457 request->zero,
Ming Lei66af83d2010-09-20 10:32:06 +0300458 request->dma + request->actual,
459 request_size);
Felipe Balbi550a7372008-07-24 12:27:36 +0300460#endif
461 }
462#endif
463
464 if (!use_dma) {
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600465 /*
466 * Unmap the dma buffer back to cpu if dma channel
467 * programming fails
468 */
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100469 unmap_dma_buffer(req, musb);
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600470
Felipe Balbi550a7372008-07-24 12:27:36 +0300471 musb_write_fifo(musb_ep->hw_ep, fifo_count,
472 (u8 *) (request->buf + request->actual));
473 request->actual += fifo_count;
474 csr |= MUSB_TXCSR_TXPKTRDY;
475 csr &= ~MUSB_TXCSR_P_UNDERRUN;
476 musb_writew(epio, MUSB_TXCSR, csr);
477 }
478
479 /* host may already have the data when this message shows... */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300480 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300481 musb_ep->end_point.name, use_dma ? "dma" : "pio",
482 request->actual, request->length,
483 musb_readw(epio, MUSB_TXCSR),
484 fifo_count,
485 musb_readw(epio, MUSB_TXMAXP));
486}
487
488/*
489 * FIFO state update (e.g. data ready).
490 * Called from IRQ, with controller locked.
491 */
492void musb_g_tx(struct musb *musb, u8 epnum)
493{
494 u16 csr;
Felipe Balbiad1adb82011-02-16 12:40:05 +0200495 struct musb_request *req;
Felipe Balbi550a7372008-07-24 12:27:36 +0300496 struct usb_request *request;
497 u8 __iomem *mbase = musb->mregs;
498 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
499 void __iomem *epio = musb->endpoints[epnum].regs;
500 struct dma_channel *dma;
501
502 musb_ep_select(mbase, epnum);
Felipe Balbiad1adb82011-02-16 12:40:05 +0200503 req = next_request(musb_ep);
504 request = &req->request;
Felipe Balbi550a7372008-07-24 12:27:36 +0300505
506 csr = musb_readw(epio, MUSB_TXCSR);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300507 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
Felipe Balbi550a7372008-07-24 12:27:36 +0300508
509 dma = is_dma_capable() ? musb_ep->dma : NULL;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300510
511 /*
512 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
513 * probably rates reporting as a host error.
514 */
515 if (csr & MUSB_TXCSR_P_SENTSTALL) {
516 csr |= MUSB_TXCSR_P_WZC_BITS;
517 csr &= ~MUSB_TXCSR_P_SENTSTALL;
518 musb_writew(epio, MUSB_TXCSR, csr);
519 return;
520 }
521
522 if (csr & MUSB_TXCSR_P_UNDERRUN) {
523 /* We NAKed, no big deal... little reason to care. */
524 csr |= MUSB_TXCSR_P_WZC_BITS;
525 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
526 musb_writew(epio, MUSB_TXCSR, csr);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300527 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
528 epnum, request);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300529 }
530
531 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
532 /*
533 * SHOULD NOT HAPPEN... has with CPPI though, after
534 * changing SENDSTALL (and other cases); harmless?
Felipe Balbi550a7372008-07-24 12:27:36 +0300535 */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300536 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300537 return;
538 }
539
540 if (request) {
541 u8 is_dma = 0;
542
543 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
544 is_dma = 1;
Felipe Balbi550a7372008-07-24 12:27:36 +0300545 csr |= MUSB_TXCSR_P_WZC_BITS;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300546 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
Mian Yousaf Kaukab100d4a92011-03-15 16:24:24 +0100547 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
Felipe Balbi550a7372008-07-24 12:27:36 +0300548 musb_writew(epio, MUSB_TXCSR, csr);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300549 /* Ensure writebuffer is empty. */
550 csr = musb_readw(epio, MUSB_TXCSR);
551 request->actual += musb_ep->dma->actual_len;
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300552 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300553 epnum, csr, musb_ep->dma->actual_len, request);
Felipe Balbi550a7372008-07-24 12:27:36 +0300554 }
555
Ming Leie7379aa2010-09-24 13:44:14 +0300556 /*
557 * First, maybe a terminating short packet. Some DMA
558 * engines might handle this by themselves.
559 */
560 if ((request->zero && request->length
561 && (request->length % musb_ep->packet_sz == 0)
562 && (request->actual == request->length))
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100563#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
Ming Leie7379aa2010-09-24 13:44:14 +0300564 || (is_dma && (!dma->desired_mode ||
565 (request->actual &
566 (musb_ep->packet_sz - 1))))
Felipe Balbi550a7372008-07-24 12:27:36 +0300567#endif
Ming Leie7379aa2010-09-24 13:44:14 +0300568 ) {
569 /*
570 * On DMA completion, FIFO may not be
571 * available yet...
572 */
573 if (csr & MUSB_TXCSR_TXPKTRDY)
574 return;
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300575
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300576 dev_dbg(musb->controller, "sending zero pkt\n");
Ming Leie7379aa2010-09-24 13:44:14 +0300577 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
578 | MUSB_TXCSR_TXPKTRDY);
579 request->zero = 0;
580 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300581
Ming Leie7379aa2010-09-24 13:44:14 +0300582 if (request->actual == request->length) {
583 musb_g_giveback(musb_ep, request, 0);
Supriya Karanth39287072012-02-17 14:54:52 +0530584 /*
585 * In the giveback function the MUSB lock is
586 * released and acquired after sometime. During
587 * this time period the INDEX register could get
588 * changed by the gadget_queue function especially
589 * on SMP systems. Reselect the INDEX to be sure
590 * we are reading/modifying the right registers
591 */
592 musb_ep_select(mbase, epnum);
Felipe Balbiad1adb82011-02-16 12:40:05 +0200593 req = musb_ep->desc ? next_request(musb_ep) : NULL;
594 if (!req) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300595 dev_dbg(musb->controller, "%s idle now\n",
Ming Leie7379aa2010-09-24 13:44:14 +0300596 musb_ep->end_point.name);
597 return;
Sergei Shtylyov95962a72009-12-16 20:38:31 +0300598 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300599 }
600
Felipe Balbiad1adb82011-02-16 12:40:05 +0200601 txstate(musb, req);
Sergei Shtylyov7723de72009-11-18 22:55:28 +0300602 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300603}
604
605/* ------------------------------------------------------------ */
606
607#ifdef CONFIG_USB_INVENTRA_DMA
608
609/* Peripheral rx (OUT) using Mentor DMA works as follows:
610 - Only mode 0 is used.
611
612 - Request is queued by the gadget class driver.
613 -> if queue was previously empty, rxstate()
614
615 - Host sends OUT token which causes an endpoint interrupt
616 /\ -> RxReady
617 | -> if request queued, call rxstate
618 | /\ -> setup DMA
619 | | -> DMA interrupt on completion
620 | | -> RxReady
621 | | -> stop DMA
622 | | -> ack the read
623 | | -> if data recd = max expected
624 | | by the request, or host
625 | | sent a short packet,
626 | | complete the request,
627 | | and start the next one.
628 | |_____________________________________|
629 | else just wait for the host
630 | to send the next OUT token.
631 |__________________________________________________|
632
633 * Non-Mentor DMA engines can of course work differently.
634 */
635
636#endif
637
638/*
639 * Context: controller locked, IRQs blocked, endpoint selected
640 */
641static void rxstate(struct musb *musb, struct musb_request *req)
642{
Felipe Balbi550a7372008-07-24 12:27:36 +0300643 const u8 epnum = req->epnum;
644 struct usb_request *request = &req->request;
Ming Leibd2e74d2010-09-20 10:32:01 +0300645 struct musb_ep *musb_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +0300646 void __iomem *epio = musb->endpoints[epnum].regs;
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400647 unsigned len = 0;
648 u16 fifo_count;
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300649 u16 csr = musb_readw(epio, MUSB_RXCSR);
Ming Leibd2e74d2010-09-20 10:32:01 +0300650 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700651 u8 use_mode_1;
Ming Leibd2e74d2010-09-20 10:32:01 +0300652
653 if (hw_ep->is_shared_fifo)
654 musb_ep = &hw_ep->ep_in;
655 else
656 musb_ep = &hw_ep->ep_out;
657
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400658 fifo_count = musb_ep->packet_sz;
Felipe Balbi550a7372008-07-24 12:27:36 +0300659
Vikram Panditaabf710e2012-05-18 13:48:04 -0700660 /* Check if EP is disabled */
661 if (!musb_ep->desc) {
662 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
663 musb_ep->end_point.name);
664 return;
665 }
666
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300667 /* We shouldn't get here while DMA is active, but we do... */
668 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300669 dev_dbg(musb->controller, "DMA pending...\n");
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300670 return;
671 }
672
673 if (csr & MUSB_RXCSR_P_SENDSTALL) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300674 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300675 musb_ep->end_point.name, csr);
676 return;
677 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300678
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100679 if (is_cppi_enabled() && is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300680 struct dma_controller *c = musb->dma_controller;
681 struct dma_channel *channel = musb_ep->dma;
682
683 /* NOTE: CPPI won't actually stop advancing the DMA
684 * queue after short packet transfers, so this is almost
685 * always going to run as IRQ-per-packet DMA so that
686 * faults will be handled correctly.
687 */
688 if (c->channel_program(channel,
689 musb_ep->packet_sz,
690 !request->short_not_ok,
691 request->dma + request->actual,
692 request->length - request->actual)) {
693
694 /* make sure that if an rxpkt arrived after the irq,
695 * the cppi engine will be ready to take it as soon
696 * as DMA is enabled
697 */
698 csr &= ~(MUSB_RXCSR_AUTOCLEAR
699 | MUSB_RXCSR_DMAMODE);
700 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
701 musb_writew(epio, MUSB_RXCSR, csr);
702 return;
703 }
704 }
705
706 if (csr & MUSB_RXCSR_RXPKTRDY) {
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400707 fifo_count = musb_readw(epio, MUSB_RXCOUNT);
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700708
709 /*
Felipe Balbi00a89182012-10-26 09:55:31 +0300710 * Enable Mode 1 on RX transfers only when short_not_ok flag
711 * is set. Currently short_not_ok flag is set only from
712 * file_storage and f_mass_storage drivers
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700713 */
Felipe Balbi00a89182012-10-26 09:55:31 +0300714
715 if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700716 use_mode_1 = 1;
717 else
718 use_mode_1 = 0;
719
Felipe Balbi550a7372008-07-24 12:27:36 +0300720 if (request->actual < request->length) {
721#ifdef CONFIG_USB_INVENTRA_DMA
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100722 if (is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300723 struct dma_controller *c;
724 struct dma_channel *channel;
725 int use_dma = 0;
Roger Quadros660fa882012-08-07 16:26:32 +0300726 int transfer_size;
Felipe Balbi550a7372008-07-24 12:27:36 +0300727
728 c = musb->dma_controller;
729 channel = musb_ep->dma;
730
Felipe Balbi00a89182012-10-26 09:55:31 +0300731 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
732 * mode 0 only. So we do not get endpoint interrupts due to DMA
733 * completion. We only get interrupts from DMA controller.
734 *
735 * We could operate in DMA mode 1 if we knew the size of the tranfer
736 * in advance. For mass storage class, request->length = what the host
737 * sends, so that'd work. But for pretty much everything else,
738 * request->length is routinely more than what the host sends. For
739 * most these gadgets, end of is signified either by a short packet,
740 * or filling the last byte of the buffer. (Sending extra data in
741 * that last pckate should trigger an overflow fault.) But in mode 1,
742 * we don't get DMA completion interrupt for short packets.
743 *
744 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
745 * to get endpoint interrupt on every DMA req, but that didn't seem
746 * to work reliably.
747 *
748 * REVISIT an updated g_file_storage can set req->short_not_ok, which
749 * then becomes usable as a runtime "use mode 1" hint...
750 */
751
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700752 /* Experimental: Mode1 works with mass storage use cases */
753 if (use_mode_1) {
Ming Lei9001d802010-09-25 05:50:43 -0500754 csr |= MUSB_RXCSR_AUTOCLEAR;
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700755 musb_writew(epio, MUSB_RXCSR, csr);
756 csr |= MUSB_RXCSR_DMAENAB;
757 musb_writew(epio, MUSB_RXCSR, csr);
758
759 /*
760 * this special sequence (enabling and then
761 * disabling MUSB_RXCSR_DMAMODE) is required
762 * to get DMAReq to activate
763 */
764 musb_writew(epio, MUSB_RXCSR,
765 csr | MUSB_RXCSR_DMAMODE);
766 musb_writew(epio, MUSB_RXCSR, csr);
767
Roger Quadros660fa882012-08-07 16:26:32 +0300768 transfer_size = min(request->length - request->actual,
769 channel->max_len);
770 musb_ep->dma->desired_mode = 1;
771
Anand Gadiyar0ae52d52011-07-19 22:11:58 -0700772 } else {
773 if (!musb_ep->hb_mult &&
774 musb_ep->hw_ep->rx_double_buffered)
775 csr |= MUSB_RXCSR_AUTOCLEAR;
776 csr |= MUSB_RXCSR_DMAENAB;
777 musb_writew(epio, MUSB_RXCSR, csr);
Felipe Balbi550a7372008-07-24 12:27:36 +0300778
Roger Quadros660fa882012-08-07 16:26:32 +0300779 transfer_size = min(request->length - request->actual,
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400780 (unsigned)fifo_count);
Roger Quadros660fa882012-08-07 16:26:32 +0300781 musb_ep->dma->desired_mode = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +0300782 }
783
Roger Quadros660fa882012-08-07 16:26:32 +0300784 use_dma = c->channel_program(
785 channel,
786 musb_ep->packet_sz,
787 channel->desired_mode,
788 request->dma
789 + request->actual,
790 transfer_size);
791
Felipe Balbi550a7372008-07-24 12:27:36 +0300792 if (use_dma)
793 return;
794 }
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100795#elif defined(CONFIG_USB_UX500_DMA)
796 if ((is_buffer_mapped(req)) &&
797 (request->actual < request->length)) {
798
799 struct dma_controller *c;
800 struct dma_channel *channel;
801 int transfer_size = 0;
802
803 c = musb->dma_controller;
804 channel = musb_ep->dma;
805
806 /* In case first packet is short */
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400807 if (fifo_count < musb_ep->packet_sz)
808 transfer_size = fifo_count;
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100809 else if (request->short_not_ok)
810 transfer_size = min(request->length -
811 request->actual,
812 channel->max_len);
813 else
814 transfer_size = min(request->length -
815 request->actual,
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400816 (unsigned)fifo_count);
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100817
818 csr &= ~MUSB_RXCSR_DMAMODE;
819 csr |= (MUSB_RXCSR_DMAENAB |
820 MUSB_RXCSR_AUTOCLEAR);
821
822 musb_writew(epio, MUSB_RXCSR, csr);
823
824 if (transfer_size <= musb_ep->packet_sz) {
825 musb_ep->dma->desired_mode = 0;
826 } else {
827 musb_ep->dma->desired_mode = 1;
828 /* Mode must be set after DMAENAB */
829 csr |= MUSB_RXCSR_DMAMODE;
830 musb_writew(epio, MUSB_RXCSR, csr);
831 }
832
833 if (c->channel_program(channel,
834 musb_ep->packet_sz,
835 channel->desired_mode,
836 request->dma
837 + request->actual,
838 transfer_size))
839
840 return;
841 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300842#endif /* Mentor's DMA */
843
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400844 len = request->length - request->actual;
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300845 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300846 musb_ep->end_point.name,
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400847 fifo_count, len,
Felipe Balbi550a7372008-07-24 12:27:36 +0300848 musb_ep->packet_sz);
849
Felipe Balbic2c96322009-02-21 15:29:42 -0800850 fifo_count = min_t(unsigned, len, fifo_count);
Felipe Balbi550a7372008-07-24 12:27:36 +0300851
852#ifdef CONFIG_USB_TUSB_OMAP_DMA
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100853 if (tusb_dma_omap() && is_buffer_mapped(req)) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300854 struct dma_controller *c = musb->dma_controller;
855 struct dma_channel *channel = musb_ep->dma;
856 u32 dma_addr = request->dma + request->actual;
857 int ret;
858
859 ret = c->channel_program(channel,
860 musb_ep->packet_sz,
861 channel->desired_mode,
862 dma_addr,
863 fifo_count);
864 if (ret)
865 return;
866 }
867#endif
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600868 /*
869 * Unmap the dma buffer back to cpu if dma channel
870 * programming fails. This buffer is mapped if the
871 * channel allocation is successful
872 */
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +0100873 if (is_buffer_mapped(req)) {
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600874 unmap_dma_buffer(req, musb);
875
Ming Leie75df372010-11-16 23:37:37 +0800876 /*
877 * Clear DMAENAB and AUTOCLEAR for the
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600878 * PIO mode transfer
879 */
Ming Leie75df372010-11-16 23:37:37 +0800880 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
Hema Kalliguddi92d27112010-11-15 04:24:01 -0600881 musb_writew(epio, MUSB_RXCSR, csr);
882 }
Felipe Balbi550a7372008-07-24 12:27:36 +0300883
884 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
885 (request->buf + request->actual));
886 request->actual += fifo_count;
887
888 /* REVISIT if we left anything in the fifo, flush
889 * it and report -EOVERFLOW
890 */
891
892 /* ack the read! */
893 csr |= MUSB_RXCSR_P_WZC_BITS;
894 csr &= ~MUSB_RXCSR_RXPKTRDY;
895 musb_writew(epio, MUSB_RXCSR, csr);
896 }
897 }
898
899 /* reach the end or short packet detected */
Sergei Shtylyovf0443af2012-07-16 23:25:04 +0400900 if (request->actual == request->length ||
901 fifo_count < musb_ep->packet_sz)
Felipe Balbi550a7372008-07-24 12:27:36 +0300902 musb_g_giveback(musb_ep, request, 0);
903}
904
905/*
906 * Data ready for a request; called from IRQ
907 */
908void musb_g_rx(struct musb *musb, u8 epnum)
909{
910 u16 csr;
Felipe Balbiad1adb82011-02-16 12:40:05 +0200911 struct musb_request *req;
Felipe Balbi550a7372008-07-24 12:27:36 +0300912 struct usb_request *request;
913 void __iomem *mbase = musb->mregs;
Ming Leibd2e74d2010-09-20 10:32:01 +0300914 struct musb_ep *musb_ep;
Felipe Balbi550a7372008-07-24 12:27:36 +0300915 void __iomem *epio = musb->endpoints[epnum].regs;
916 struct dma_channel *dma;
Ming Leibd2e74d2010-09-20 10:32:01 +0300917 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
918
919 if (hw_ep->is_shared_fifo)
920 musb_ep = &hw_ep->ep_in;
921 else
922 musb_ep = &hw_ep->ep_out;
Felipe Balbi550a7372008-07-24 12:27:36 +0300923
924 musb_ep_select(mbase, epnum);
925
Felipe Balbiad1adb82011-02-16 12:40:05 +0200926 req = next_request(musb_ep);
927 if (!req)
Maulik Mankad0abdc362009-12-22 16:18:19 +0530928 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300929
Felipe Balbiad1adb82011-02-16 12:40:05 +0200930 request = &req->request;
931
Felipe Balbi550a7372008-07-24 12:27:36 +0300932 csr = musb_readw(epio, MUSB_RXCSR);
933 dma = is_dma_capable() ? musb_ep->dma : NULL;
934
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300935 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
Felipe Balbi550a7372008-07-24 12:27:36 +0300936 csr, dma ? " (dma)" : "", request);
937
938 if (csr & MUSB_RXCSR_P_SENTSTALL) {
Felipe Balbi550a7372008-07-24 12:27:36 +0300939 csr |= MUSB_RXCSR_P_WZC_BITS;
940 csr &= ~MUSB_RXCSR_P_SENTSTALL;
941 musb_writew(epio, MUSB_RXCSR, csr);
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300942 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300943 }
944
945 if (csr & MUSB_RXCSR_P_OVERRUN) {
946 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
947 csr &= ~MUSB_RXCSR_P_OVERRUN;
948 musb_writew(epio, MUSB_RXCSR, csr);
949
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300950 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
Sergei Shtylyov43467862010-09-24 13:44:12 +0300951 if (request->status == -EINPROGRESS)
Felipe Balbi550a7372008-07-24 12:27:36 +0300952 request->status = -EOVERFLOW;
953 }
954 if (csr & MUSB_RXCSR_INCOMPRX) {
955 /* REVISIT not necessarily an error */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300956 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
Felipe Balbi550a7372008-07-24 12:27:36 +0300957 }
958
959 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
960 /* "should not happen"; likely RXPKTRDY pending for DMA */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300961 dev_dbg(musb->controller, "%s busy, csr %04x\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300962 musb_ep->end_point.name, csr);
Sergei Shtylyovcea83242009-11-18 22:51:18 +0300963 return;
Felipe Balbi550a7372008-07-24 12:27:36 +0300964 }
965
966 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
967 csr &= ~(MUSB_RXCSR_AUTOCLEAR
968 | MUSB_RXCSR_DMAENAB
969 | MUSB_RXCSR_DMAMODE);
970 musb_writew(epio, MUSB_RXCSR,
971 MUSB_RXCSR_P_WZC_BITS | csr);
972
973 request->actual += musb_ep->dma->actual_len;
974
Felipe Balbi5c8a86e2011-05-11 12:44:08 +0300975 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
Felipe Balbi550a7372008-07-24 12:27:36 +0300976 epnum, csr,
977 musb_readw(epio, MUSB_RXCSR),
978 musb_ep->dma->actual_len, request);
979
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +0100980#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
981 defined(CONFIG_USB_UX500_DMA)
Felipe Balbi550a7372008-07-24 12:27:36 +0300982 /* Autoclear doesn't clear RxPktRdy for short packets */
Ming Lei9001d802010-09-25 05:50:43 -0500983 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
Felipe Balbi550a7372008-07-24 12:27:36 +0300984 || (dma->actual_len
985 & (musb_ep->packet_sz - 1))) {
986 /* ack the read! */
987 csr &= ~MUSB_RXCSR_RXPKTRDY;
988 musb_writew(epio, MUSB_RXCSR, csr);
989 }
990
991 /* incomplete, and not short? wait for next IN packet */
992 if ((request->actual < request->length)
993 && (musb_ep->dma->actual_len
Ming Lei9001d802010-09-25 05:50:43 -0500994 == musb_ep->packet_sz)) {
995 /* In double buffer case, continue to unload fifo if
996 * there is Rx packet in FIFO.
997 **/
998 csr = musb_readw(epio, MUSB_RXCSR);
999 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
1000 hw_ep->rx_double_buffered)
1001 goto exit;
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001002 return;
Ming Lei9001d802010-09-25 05:50:43 -05001003 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001004#endif
1005 musb_g_giveback(musb_ep, request, 0);
Supriya Karanth39287072012-02-17 14:54:52 +05301006 /*
1007 * In the giveback function the MUSB lock is
1008 * released and acquired after sometime. During
1009 * this time period the INDEX register could get
1010 * changed by the gadget_queue function especially
1011 * on SMP systems. Reselect the INDEX to be sure
1012 * we are reading/modifying the right registers
1013 */
1014 musb_ep_select(mbase, epnum);
Felipe Balbi550a7372008-07-24 12:27:36 +03001015
Felipe Balbiad1adb82011-02-16 12:40:05 +02001016 req = next_request(musb_ep);
1017 if (!req)
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001018 return;
Felipe Balbi550a7372008-07-24 12:27:36 +03001019 }
Mian Yousaf Kaukaba48ff902011-03-22 15:55:56 +01001020#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1021 defined(CONFIG_USB_UX500_DMA)
Ming Lei9001d802010-09-25 05:50:43 -05001022exit:
Ajay Kumar Guptabb324b02010-11-22 14:22:41 +05301023#endif
Sergei Shtylyov43467862010-09-24 13:44:12 +03001024 /* Analyze request */
Felipe Balbiad1adb82011-02-16 12:40:05 +02001025 rxstate(musb, req);
Felipe Balbi550a7372008-07-24 12:27:36 +03001026}
1027
1028/* ------------------------------------------------------------ */
1029
1030static int musb_gadget_enable(struct usb_ep *ep,
1031 const struct usb_endpoint_descriptor *desc)
1032{
1033 unsigned long flags;
1034 struct musb_ep *musb_ep;
1035 struct musb_hw_ep *hw_ep;
1036 void __iomem *regs;
1037 struct musb *musb;
1038 void __iomem *mbase;
1039 u8 epnum;
1040 u16 csr;
1041 unsigned tmp;
1042 int status = -EINVAL;
1043
1044 if (!ep || !desc)
1045 return -EINVAL;
1046
1047 musb_ep = to_musb_ep(ep);
1048 hw_ep = musb_ep->hw_ep;
1049 regs = hw_ep->regs;
1050 musb = musb_ep->musb;
1051 mbase = musb->mregs;
1052 epnum = musb_ep->current_epnum;
1053
1054 spin_lock_irqsave(&musb->lock, flags);
1055
1056 if (musb_ep->desc) {
1057 status = -EBUSY;
1058 goto fail;
1059 }
Julia Lawall96bcd092009-01-24 17:57:24 -08001060 musb_ep->type = usb_endpoint_type(desc);
Felipe Balbi550a7372008-07-24 12:27:36 +03001061
1062 /* check direction and (later) maxpacket size against endpoint */
Julia Lawall96bcd092009-01-24 17:57:24 -08001063 if (usb_endpoint_num(desc) != epnum)
Felipe Balbi550a7372008-07-24 12:27:36 +03001064 goto fail;
1065
1066 /* REVISIT this rules out high bandwidth periodic transfers */
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001067 tmp = usb_endpoint_maxp(desc);
Ming Leif11d8932010-09-24 13:44:04 +03001068 if (tmp & ~0x07ff) {
1069 int ok;
1070
1071 if (usb_endpoint_dir_in(desc))
1072 ok = musb->hb_iso_tx;
1073 else
1074 ok = musb->hb_iso_rx;
1075
1076 if (!ok) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001077 dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
Ming Leif11d8932010-09-24 13:44:04 +03001078 goto fail;
1079 }
1080 musb_ep->hb_mult = (tmp >> 11) & 3;
1081 } else {
1082 musb_ep->hb_mult = 0;
1083 }
1084
1085 musb_ep->packet_sz = tmp & 0x7ff;
1086 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
Felipe Balbi550a7372008-07-24 12:27:36 +03001087
1088 /* enable the interrupts for the endpoint, set the endpoint
1089 * packet size (or fail), set the mode, clear the fifo
1090 */
1091 musb_ep_select(mbase, epnum);
Julia Lawall96bcd092009-01-24 17:57:24 -08001092 if (usb_endpoint_dir_in(desc)) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001093 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1094
1095 if (hw_ep->is_shared_fifo)
1096 musb_ep->is_in = 1;
1097 if (!musb_ep->is_in)
1098 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001099
1100 if (tmp > hw_ep->max_packet_sz_tx) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001101 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001102 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001103 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001104
1105 int_txe |= (1 << epnum);
1106 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1107
1108 /* REVISIT if can_bulk_split(), use by updating "tmp";
1109 * likewise high bandwidth periodic tx
1110 */
Cliff Cai9f445cb2010-01-28 20:44:18 -05001111 /* Set TXMAXP with the FIFO size of the endpoint
Ming Lei31c99092010-10-19 19:08:25 -05001112 * to disable double buffering mode.
Cliff Cai9f445cb2010-01-28 20:44:18 -05001113 */
Felipe Balbi06624812011-01-21 13:39:20 +08001114 if (musb->double_buffer_not_ok)
1115 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1116 else
1117 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1118 | (musb_ep->hb_mult << 11));
Felipe Balbi550a7372008-07-24 12:27:36 +03001119
1120 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1121 if (musb_readw(regs, MUSB_TXCSR)
1122 & MUSB_TXCSR_FIFONOTEMPTY)
1123 csr |= MUSB_TXCSR_FLUSHFIFO;
1124 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1125 csr |= MUSB_TXCSR_P_ISO;
1126
1127 /* set twice in case of double buffering */
1128 musb_writew(regs, MUSB_TXCSR, csr);
1129 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1130 musb_writew(regs, MUSB_TXCSR, csr);
1131
1132 } else {
1133 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1134
1135 if (hw_ep->is_shared_fifo)
1136 musb_ep->is_in = 0;
1137 if (musb_ep->is_in)
1138 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001139
1140 if (tmp > hw_ep->max_packet_sz_rx) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001141 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001142 goto fail;
Ming Leif11d8932010-09-24 13:44:04 +03001143 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001144
1145 int_rxe |= (1 << epnum);
1146 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1147
1148 /* REVISIT if can_bulk_combine() use by updating "tmp"
1149 * likewise high bandwidth periodic rx
1150 */
Cliff Cai9f445cb2010-01-28 20:44:18 -05001151 /* Set RXMAXP with the FIFO size of the endpoint
1152 * to disable double buffering mode.
1153 */
Felipe Balbi06624812011-01-21 13:39:20 +08001154 if (musb->double_buffer_not_ok)
1155 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1156 else
1157 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1158 | (musb_ep->hb_mult << 11));
Felipe Balbi550a7372008-07-24 12:27:36 +03001159
1160 /* force shared fifo to OUT-only mode */
1161 if (hw_ep->is_shared_fifo) {
1162 csr = musb_readw(regs, MUSB_TXCSR);
1163 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1164 musb_writew(regs, MUSB_TXCSR, csr);
1165 }
1166
1167 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1168 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1169 csr |= MUSB_RXCSR_P_ISO;
1170 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1171 csr |= MUSB_RXCSR_DISNYET;
1172
1173 /* set twice in case of double buffering */
1174 musb_writew(regs, MUSB_RXCSR, csr);
1175 musb_writew(regs, MUSB_RXCSR, csr);
1176 }
1177
1178 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1179 * for some reason you run out of channels here.
1180 */
1181 if (is_dma_capable() && musb->dma_controller) {
1182 struct dma_controller *c = musb->dma_controller;
1183
1184 musb_ep->dma = c->channel_alloc(c, hw_ep,
1185 (desc->bEndpointAddress & USB_DIR_IN));
1186 } else
1187 musb_ep->dma = NULL;
1188
1189 musb_ep->desc = desc;
1190 musb_ep->busy = 0;
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001191 musb_ep->wedged = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001192 status = 0;
1193
1194 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1195 musb_driver_name, musb_ep->end_point.name,
1196 ({ char *s; switch (musb_ep->type) {
1197 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1198 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1199 default: s = "iso"; break;
1200 }; s; }),
1201 musb_ep->is_in ? "IN" : "OUT",
1202 musb_ep->dma ? "dma, " : "",
1203 musb_ep->packet_sz);
1204
1205 schedule_work(&musb->irq_work);
1206
1207fail:
1208 spin_unlock_irqrestore(&musb->lock, flags);
1209 return status;
1210}
1211
1212/*
1213 * Disable an endpoint flushing all requests queued.
1214 */
1215static int musb_gadget_disable(struct usb_ep *ep)
1216{
1217 unsigned long flags;
1218 struct musb *musb;
1219 u8 epnum;
1220 struct musb_ep *musb_ep;
1221 void __iomem *epio;
1222 int status = 0;
1223
1224 musb_ep = to_musb_ep(ep);
1225 musb = musb_ep->musb;
1226 epnum = musb_ep->current_epnum;
1227 epio = musb->endpoints[epnum].regs;
1228
1229 spin_lock_irqsave(&musb->lock, flags);
1230 musb_ep_select(musb->mregs, epnum);
1231
1232 /* zero the endpoint sizes */
1233 if (musb_ep->is_in) {
1234 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1235 int_txe &= ~(1 << epnum);
1236 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1237 musb_writew(epio, MUSB_TXMAXP, 0);
1238 } else {
1239 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1240 int_rxe &= ~(1 << epnum);
1241 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1242 musb_writew(epio, MUSB_RXMAXP, 0);
1243 }
1244
1245 musb_ep->desc = NULL;
Grazvydas Ignotas08f75bf2012-05-26 00:21:33 +03001246 musb_ep->end_point.desc = NULL;
Felipe Balbi550a7372008-07-24 12:27:36 +03001247
1248 /* abort all pending DMA and requests */
1249 nuke(musb_ep, -ESHUTDOWN);
1250
1251 schedule_work(&musb->irq_work);
1252
1253 spin_unlock_irqrestore(&(musb->lock), flags);
1254
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001255 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
Felipe Balbi550a7372008-07-24 12:27:36 +03001256
1257 return status;
1258}
1259
1260/*
1261 * Allocate a request for an endpoint.
1262 * Reused by ep0 code.
1263 */
1264struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1265{
1266 struct musb_ep *musb_ep = to_musb_ep(ep);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001267 struct musb *musb = musb_ep->musb;
Felipe Balbi550a7372008-07-24 12:27:36 +03001268 struct musb_request *request = NULL;
1269
1270 request = kzalloc(sizeof *request, gfp_flags);
Felipe Balbi0607f862010-12-01 11:03:54 +02001271 if (!request) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001272 dev_dbg(musb->controller, "not enough memory\n");
Felipe Balbi0607f862010-12-01 11:03:54 +02001273 return NULL;
Felipe Balbi550a7372008-07-24 12:27:36 +03001274 }
1275
Felipe Balbi0607f862010-12-01 11:03:54 +02001276 request->request.dma = DMA_ADDR_INVALID;
1277 request->epnum = musb_ep->current_epnum;
1278 request->ep = musb_ep;
1279
Felipe Balbi550a7372008-07-24 12:27:36 +03001280 return &request->request;
1281}
1282
1283/*
1284 * Free a request
1285 * Reused by ep0 code.
1286 */
1287void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1288{
1289 kfree(to_musb_request(req));
1290}
1291
1292static LIST_HEAD(buffers);
1293
1294struct free_record {
1295 struct list_head list;
1296 struct device *dev;
1297 unsigned bytes;
1298 dma_addr_t dma;
1299};
1300
1301/*
1302 * Context: controller locked, IRQs blocked.
1303 */
Sergei Shtylyova666e3e2010-09-11 13:23:12 -05001304void musb_ep_restart(struct musb *musb, struct musb_request *req)
Felipe Balbi550a7372008-07-24 12:27:36 +03001305{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001306 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
Felipe Balbi550a7372008-07-24 12:27:36 +03001307 req->tx ? "TX/IN" : "RX/OUT",
1308 &req->request, req->request.length, req->epnum);
1309
1310 musb_ep_select(musb->mregs, req->epnum);
1311 if (req->tx)
1312 txstate(musb, req);
1313 else
1314 rxstate(musb, req);
1315}
1316
1317static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1318 gfp_t gfp_flags)
1319{
1320 struct musb_ep *musb_ep;
1321 struct musb_request *request;
1322 struct musb *musb;
1323 int status = 0;
1324 unsigned long lockflags;
1325
1326 if (!ep || !req)
1327 return -EINVAL;
1328 if (!req->buf)
1329 return -ENODATA;
1330
1331 musb_ep = to_musb_ep(ep);
1332 musb = musb_ep->musb;
1333
1334 request = to_musb_request(req);
1335 request->musb = musb;
1336
1337 if (request->ep != musb_ep)
1338 return -EINVAL;
1339
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001340 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
Felipe Balbi550a7372008-07-24 12:27:36 +03001341
1342 /* request is mine now... */
1343 request->request.actual = 0;
1344 request->request.status = -EINPROGRESS;
1345 request->epnum = musb_ep->current_epnum;
1346 request->tx = musb_ep->is_in;
1347
Mian Yousaf Kaukabc65bfa62011-01-04 12:47:02 +01001348 map_dma_buffer(request, musb, musb_ep);
Felipe Balbi550a7372008-07-24 12:27:36 +03001349
1350 spin_lock_irqsave(&musb->lock, lockflags);
1351
1352 /* don't queue if the ep is down */
1353 if (!musb_ep->desc) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001354 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
Felipe Balbi550a7372008-07-24 12:27:36 +03001355 req, ep->name, "disabled");
1356 status = -ESHUTDOWN;
1357 goto cleanup;
1358 }
1359
1360 /* add request to the list */
Felipe Balbiad1adb82011-02-16 12:40:05 +02001361 list_add_tail(&request->list, &musb_ep->req_list);
Felipe Balbi550a7372008-07-24 12:27:36 +03001362
1363 /* it this is the head of the queue, start i/o ... */
Felipe Balbiad1adb82011-02-16 12:40:05 +02001364 if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
Felipe Balbi550a7372008-07-24 12:27:36 +03001365 musb_ep_restart(musb, request);
1366
1367cleanup:
1368 spin_unlock_irqrestore(&musb->lock, lockflags);
1369 return status;
1370}
1371
1372static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1373{
1374 struct musb_ep *musb_ep = to_musb_ep(ep);
Felipe Balbi4cbbf082011-02-28 10:44:50 +02001375 struct musb_request *req = to_musb_request(request);
1376 struct musb_request *r;
Felipe Balbi550a7372008-07-24 12:27:36 +03001377 unsigned long flags;
1378 int status = 0;
1379 struct musb *musb = musb_ep->musb;
1380
1381 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1382 return -EINVAL;
1383
1384 spin_lock_irqsave(&musb->lock, flags);
1385
1386 list_for_each_entry(r, &musb_ep->req_list, list) {
Felipe Balbi4cbbf082011-02-28 10:44:50 +02001387 if (r == req)
Felipe Balbi550a7372008-07-24 12:27:36 +03001388 break;
1389 }
Felipe Balbi4cbbf082011-02-28 10:44:50 +02001390 if (r != req) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001391 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
Felipe Balbi550a7372008-07-24 12:27:36 +03001392 status = -EINVAL;
1393 goto done;
1394 }
1395
1396 /* if the hardware doesn't have the request, easy ... */
Felipe Balbi3d5ad132011-03-22 11:38:49 +02001397 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
Felipe Balbi550a7372008-07-24 12:27:36 +03001398 musb_g_giveback(musb_ep, request, -ECONNRESET);
1399
1400 /* ... else abort the dma transfer ... */
1401 else if (is_dma_capable() && musb_ep->dma) {
1402 struct dma_controller *c = musb->dma_controller;
1403
1404 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1405 if (c->channel_abort)
1406 status = c->channel_abort(musb_ep->dma);
1407 else
1408 status = -EBUSY;
1409 if (status == 0)
1410 musb_g_giveback(musb_ep, request, -ECONNRESET);
1411 } else {
1412 /* NOTE: by sticking to easily tested hardware/driver states,
1413 * we leave counting of in-flight packets imprecise.
1414 */
1415 musb_g_giveback(musb_ep, request, -ECONNRESET);
1416 }
1417
1418done:
1419 spin_unlock_irqrestore(&musb->lock, flags);
1420 return status;
1421}
1422
1423/*
1424 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1425 * data but will queue requests.
1426 *
1427 * exported to ep0 code
1428 */
Felipe Balbi1b6c3b02009-12-04 15:47:46 +02001429static int musb_gadget_set_halt(struct usb_ep *ep, int value)
Felipe Balbi550a7372008-07-24 12:27:36 +03001430{
1431 struct musb_ep *musb_ep = to_musb_ep(ep);
1432 u8 epnum = musb_ep->current_epnum;
1433 struct musb *musb = musb_ep->musb;
1434 void __iomem *epio = musb->endpoints[epnum].regs;
1435 void __iomem *mbase;
1436 unsigned long flags;
1437 u16 csr;
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001438 struct musb_request *request;
Felipe Balbi550a7372008-07-24 12:27:36 +03001439 int status = 0;
1440
1441 if (!ep)
1442 return -EINVAL;
1443 mbase = musb->mregs;
1444
1445 spin_lock_irqsave(&musb->lock, flags);
1446
1447 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1448 status = -EINVAL;
1449 goto done;
1450 }
1451
1452 musb_ep_select(mbase, epnum);
1453
Felipe Balbiad1adb82011-02-16 12:40:05 +02001454 request = next_request(musb_ep);
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001455 if (value) {
1456 if (request) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001457 dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001458 ep->name);
1459 status = -EAGAIN;
1460 goto done;
Felipe Balbi550a7372008-07-24 12:27:36 +03001461 }
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001462 /* Cannot portably stall with non-empty FIFO */
1463 if (musb_ep->is_in) {
1464 csr = musb_readw(epio, MUSB_TXCSR);
1465 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001466 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001467 status = -EAGAIN;
1468 goto done;
1469 }
1470 }
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001471 } else
1472 musb_ep->wedged = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001473
1474 /* set/clear the stall and toggle bits */
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001475 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
Felipe Balbi550a7372008-07-24 12:27:36 +03001476 if (musb_ep->is_in) {
1477 csr = musb_readw(epio, MUSB_TXCSR);
Felipe Balbi550a7372008-07-24 12:27:36 +03001478 csr |= MUSB_TXCSR_P_WZC_BITS
1479 | MUSB_TXCSR_CLRDATATOG;
1480 if (value)
1481 csr |= MUSB_TXCSR_P_SENDSTALL;
1482 else
1483 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1484 | MUSB_TXCSR_P_SENTSTALL);
1485 csr &= ~MUSB_TXCSR_TXPKTRDY;
1486 musb_writew(epio, MUSB_TXCSR, csr);
1487 } else {
1488 csr = musb_readw(epio, MUSB_RXCSR);
1489 csr |= MUSB_RXCSR_P_WZC_BITS
1490 | MUSB_RXCSR_FLUSHFIFO
1491 | MUSB_RXCSR_CLRDATATOG;
1492 if (value)
1493 csr |= MUSB_RXCSR_P_SENDSTALL;
1494 else
1495 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1496 | MUSB_RXCSR_P_SENTSTALL);
1497 musb_writew(epio, MUSB_RXCSR, csr);
1498 }
1499
Felipe Balbi550a7372008-07-24 12:27:36 +03001500 /* maybe start the first request in the queue */
1501 if (!musb_ep->busy && !value && request) {
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001502 dev_dbg(musb->controller, "restarting the request\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001503 musb_ep_restart(musb, request);
1504 }
1505
Sergei Shtylyovcea83242009-11-18 22:51:18 +03001506done:
Felipe Balbi550a7372008-07-24 12:27:36 +03001507 spin_unlock_irqrestore(&musb->lock, flags);
1508 return status;
1509}
1510
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001511/*
1512 * Sets the halt feature with the clear requests ignored
1513 */
Felipe Balbi1b6c3b02009-12-04 15:47:46 +02001514static int musb_gadget_set_wedge(struct usb_ep *ep)
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001515{
1516 struct musb_ep *musb_ep = to_musb_ep(ep);
1517
1518 if (!ep)
1519 return -EINVAL;
1520
1521 musb_ep->wedged = 1;
1522
1523 return usb_ep_set_halt(ep);
1524}
1525
Felipe Balbi550a7372008-07-24 12:27:36 +03001526static int musb_gadget_fifo_status(struct usb_ep *ep)
1527{
1528 struct musb_ep *musb_ep = to_musb_ep(ep);
1529 void __iomem *epio = musb_ep->hw_ep->regs;
1530 int retval = -EINVAL;
1531
1532 if (musb_ep->desc && !musb_ep->is_in) {
1533 struct musb *musb = musb_ep->musb;
1534 int epnum = musb_ep->current_epnum;
1535 void __iomem *mbase = musb->mregs;
1536 unsigned long flags;
1537
1538 spin_lock_irqsave(&musb->lock, flags);
1539
1540 musb_ep_select(mbase, epnum);
1541 /* FIXME return zero unless RXPKTRDY is set */
1542 retval = musb_readw(epio, MUSB_RXCOUNT);
1543
1544 spin_unlock_irqrestore(&musb->lock, flags);
1545 }
1546 return retval;
1547}
1548
1549static void musb_gadget_fifo_flush(struct usb_ep *ep)
1550{
1551 struct musb_ep *musb_ep = to_musb_ep(ep);
1552 struct musb *musb = musb_ep->musb;
1553 u8 epnum = musb_ep->current_epnum;
1554 void __iomem *epio = musb->endpoints[epnum].regs;
1555 void __iomem *mbase;
1556 unsigned long flags;
1557 u16 csr, int_txe;
1558
1559 mbase = musb->mregs;
1560
1561 spin_lock_irqsave(&musb->lock, flags);
1562 musb_ep_select(mbase, (u8) epnum);
1563
1564 /* disable interrupts */
1565 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1566 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1567
1568 if (musb_ep->is_in) {
1569 csr = musb_readw(epio, MUSB_TXCSR);
1570 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1571 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
Yauheni Kaliuta4858f062011-06-08 17:12:02 +03001572 /*
1573 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1574 * to interrupt current FIFO loading, but not flushing
1575 * the already loaded ones.
1576 */
1577 csr &= ~MUSB_TXCSR_TXPKTRDY;
Felipe Balbi550a7372008-07-24 12:27:36 +03001578 musb_writew(epio, MUSB_TXCSR, csr);
1579 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1580 musb_writew(epio, MUSB_TXCSR, csr);
1581 }
1582 } else {
1583 csr = musb_readw(epio, MUSB_RXCSR);
1584 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1585 musb_writew(epio, MUSB_RXCSR, csr);
1586 musb_writew(epio, MUSB_RXCSR, csr);
1587 }
1588
1589 /* re-enable interrupt */
1590 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1591 spin_unlock_irqrestore(&musb->lock, flags);
1592}
1593
1594static const struct usb_ep_ops musb_ep_ops = {
1595 .enable = musb_gadget_enable,
1596 .disable = musb_gadget_disable,
1597 .alloc_request = musb_alloc_request,
1598 .free_request = musb_free_request,
1599 .queue = musb_gadget_queue,
1600 .dequeue = musb_gadget_dequeue,
1601 .set_halt = musb_gadget_set_halt,
Sergei Shtylyov47e97602009-11-18 22:51:51 +03001602 .set_wedge = musb_gadget_set_wedge,
Felipe Balbi550a7372008-07-24 12:27:36 +03001603 .fifo_status = musb_gadget_fifo_status,
1604 .fifo_flush = musb_gadget_fifo_flush
1605};
1606
1607/* ----------------------------------------------------------------------- */
1608
1609static int musb_gadget_get_frame(struct usb_gadget *gadget)
1610{
1611 struct musb *musb = gadget_to_musb(gadget);
1612
1613 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1614}
1615
1616static int musb_gadget_wakeup(struct usb_gadget *gadget)
1617{
1618 struct musb *musb = gadget_to_musb(gadget);
1619 void __iomem *mregs = musb->mregs;
1620 unsigned long flags;
1621 int status = -EINVAL;
1622 u8 power, devctl;
1623 int retries;
1624
1625 spin_lock_irqsave(&musb->lock, flags);
1626
David Brownell84e250f2009-03-31 12:30:04 -07001627 switch (musb->xceiv->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03001628 case OTG_STATE_B_PERIPHERAL:
1629 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1630 * that's part of the standard usb 1.1 state machine, and
1631 * doesn't affect OTG transitions.
1632 */
1633 if (musb->may_wakeup && musb->is_suspended)
1634 break;
1635 goto done;
1636 case OTG_STATE_B_IDLE:
1637 /* Start SRP ... OTG not required. */
1638 devctl = musb_readb(mregs, MUSB_DEVCTL);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001639 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
Felipe Balbi550a7372008-07-24 12:27:36 +03001640 devctl |= MUSB_DEVCTL_SESSION;
1641 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1642 devctl = musb_readb(mregs, MUSB_DEVCTL);
1643 retries = 100;
1644 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1645 devctl = musb_readb(mregs, MUSB_DEVCTL);
1646 if (retries-- < 1)
1647 break;
1648 }
1649 retries = 10000;
1650 while (devctl & MUSB_DEVCTL_SESSION) {
1651 devctl = musb_readb(mregs, MUSB_DEVCTL);
1652 if (retries-- < 1)
1653 break;
1654 }
1655
Hema HK86205432011-03-22 16:54:22 +05301656 spin_unlock_irqrestore(&musb->lock, flags);
Heikki Krogerus6e13c652012-02-13 13:24:20 +02001657 otg_start_srp(musb->xceiv->otg);
Hema HK86205432011-03-22 16:54:22 +05301658 spin_lock_irqsave(&musb->lock, flags);
1659
Felipe Balbi550a7372008-07-24 12:27:36 +03001660 /* Block idling for at least 1s */
1661 musb_platform_try_idle(musb,
1662 jiffies + msecs_to_jiffies(1 * HZ));
1663
1664 status = 0;
1665 goto done;
1666 default:
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001667 dev_dbg(musb->controller, "Unhandled wake: %s\n",
Anatolij Gustschin3df00452011-05-05 12:11:21 +02001668 otg_state_string(musb->xceiv->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03001669 goto done;
1670 }
1671
1672 status = 0;
1673
1674 power = musb_readb(mregs, MUSB_POWER);
1675 power |= MUSB_POWER_RESUME;
1676 musb_writeb(mregs, MUSB_POWER, power);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001677 dev_dbg(musb->controller, "issue wakeup\n");
Felipe Balbi550a7372008-07-24 12:27:36 +03001678
1679 /* FIXME do this next chunk in a timer callback, no udelay */
1680 mdelay(2);
1681
1682 power = musb_readb(mregs, MUSB_POWER);
1683 power &= ~MUSB_POWER_RESUME;
1684 musb_writeb(mregs, MUSB_POWER, power);
1685done:
1686 spin_unlock_irqrestore(&musb->lock, flags);
1687 return status;
1688}
1689
1690static int
1691musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1692{
1693 struct musb *musb = gadget_to_musb(gadget);
1694
1695 musb->is_self_powered = !!is_selfpowered;
1696 return 0;
1697}
1698
1699static void musb_pullup(struct musb *musb, int is_on)
1700{
1701 u8 power;
1702
1703 power = musb_readb(musb->mregs, MUSB_POWER);
1704 if (is_on)
1705 power |= MUSB_POWER_SOFTCONN;
1706 else
1707 power &= ~MUSB_POWER_SOFTCONN;
1708
1709 /* FIXME if on, HdrcStart; if off, HdrcStop */
1710
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001711 dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1712 is_on ? "on" : "off");
Felipe Balbi550a7372008-07-24 12:27:36 +03001713 musb_writeb(musb->mregs, MUSB_POWER, power);
1714}
1715
1716#if 0
1717static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1718{
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001719 dev_dbg(musb->controller, "<= %s =>\n", __func__);
Felipe Balbi550a7372008-07-24 12:27:36 +03001720
1721 /*
1722 * FIXME iff driver's softconnect flag is set (as it is during probe,
1723 * though that can clear it), just musb_pullup().
1724 */
1725
1726 return -EINVAL;
1727}
1728#endif
1729
1730static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1731{
1732 struct musb *musb = gadget_to_musb(gadget);
1733
David Brownell84e250f2009-03-31 12:30:04 -07001734 if (!musb->xceiv->set_power)
Felipe Balbi550a7372008-07-24 12:27:36 +03001735 return -EOPNOTSUPP;
Heikki Krogerusb96d3b02012-02-13 13:24:18 +02001736 return usb_phy_set_power(musb->xceiv, mA);
Felipe Balbi550a7372008-07-24 12:27:36 +03001737}
1738
1739static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1740{
1741 struct musb *musb = gadget_to_musb(gadget);
1742 unsigned long flags;
1743
1744 is_on = !!is_on;
1745
John Stultz93e098a2011-07-20 17:09:34 -07001746 pm_runtime_get_sync(musb->controller);
1747
Felipe Balbi550a7372008-07-24 12:27:36 +03001748 /* NOTE: this assumes we are sensing vbus; we'd rather
1749 * not pullup unless the B-session is active.
1750 */
1751 spin_lock_irqsave(&musb->lock, flags);
1752 if (is_on != musb->softconnect) {
1753 musb->softconnect = is_on;
1754 musb_pullup(musb, is_on);
1755 }
1756 spin_unlock_irqrestore(&musb->lock, flags);
John Stultz93e098a2011-07-20 17:09:34 -07001757
1758 pm_runtime_put(musb->controller);
1759
Felipe Balbi550a7372008-07-24 12:27:36 +03001760 return 0;
1761}
1762
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001763static int musb_gadget_start(struct usb_gadget *g,
1764 struct usb_gadget_driver *driver);
1765static int musb_gadget_stop(struct usb_gadget *g,
1766 struct usb_gadget_driver *driver);
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001767
Felipe Balbi550a7372008-07-24 12:27:36 +03001768static const struct usb_gadget_ops musb_gadget_operations = {
1769 .get_frame = musb_gadget_get_frame,
1770 .wakeup = musb_gadget_wakeup,
1771 .set_selfpowered = musb_gadget_set_self_powered,
1772 /* .vbus_session = musb_gadget_vbus_session, */
1773 .vbus_draw = musb_gadget_vbus_draw,
1774 .pullup = musb_gadget_pullup,
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001775 .udc_start = musb_gadget_start,
1776 .udc_stop = musb_gadget_stop,
Felipe Balbi550a7372008-07-24 12:27:36 +03001777};
1778
1779/* ----------------------------------------------------------------------- */
1780
1781/* Registration */
1782
1783/* Only this registration code "knows" the rule (from USB standards)
1784 * about there being only one external upstream port. It assumes
1785 * all peripheral ports are external...
1786 */
Felipe Balbi550a7372008-07-24 12:27:36 +03001787
1788static void musb_gadget_release(struct device *dev)
1789{
1790 /* kref_put(WHAT) */
1791 dev_dbg(dev, "%s\n", __func__);
1792}
1793
1794
Felipe Balbie9e8c852012-01-26 12:40:23 +02001795static void __devinit
Felipe Balbi550a7372008-07-24 12:27:36 +03001796init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1797{
1798 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1799
1800 memset(ep, 0, sizeof *ep);
1801
1802 ep->current_epnum = epnum;
1803 ep->musb = musb;
1804 ep->hw_ep = hw_ep;
1805 ep->is_in = is_in;
1806
1807 INIT_LIST_HEAD(&ep->req_list);
1808
1809 sprintf(ep->name, "ep%d%s", epnum,
1810 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1811 is_in ? "in" : "out"));
1812 ep->end_point.name = ep->name;
1813 INIT_LIST_HEAD(&ep->end_point.ep_list);
1814 if (!epnum) {
1815 ep->end_point.maxpacket = 64;
1816 ep->end_point.ops = &musb_g_ep0_ops;
1817 musb->g.ep0 = &ep->end_point;
1818 } else {
1819 if (is_in)
1820 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1821 else
1822 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1823 ep->end_point.ops = &musb_ep_ops;
1824 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1825 }
1826}
1827
1828/*
1829 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1830 * to the rest of the driver state.
1831 */
Felipe Balbie9e8c852012-01-26 12:40:23 +02001832static inline void __devinit musb_g_init_endpoints(struct musb *musb)
Felipe Balbi550a7372008-07-24 12:27:36 +03001833{
1834 u8 epnum;
1835 struct musb_hw_ep *hw_ep;
1836 unsigned count = 0;
1837
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001838 /* initialize endpoint list just once */
Felipe Balbi550a7372008-07-24 12:27:36 +03001839 INIT_LIST_HEAD(&(musb->g.ep_list));
1840
1841 for (epnum = 0, hw_ep = musb->endpoints;
1842 epnum < musb->nr_endpoints;
1843 epnum++, hw_ep++) {
1844 if (hw_ep->is_shared_fifo /* || !epnum */) {
1845 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1846 count++;
1847 } else {
1848 if (hw_ep->max_packet_sz_tx) {
1849 init_peripheral_ep(musb, &hw_ep->ep_in,
1850 epnum, 1);
1851 count++;
1852 }
1853 if (hw_ep->max_packet_sz_rx) {
1854 init_peripheral_ep(musb, &hw_ep->ep_out,
1855 epnum, 0);
1856 count++;
1857 }
1858 }
1859 }
1860}
1861
1862/* called once during driver setup to initialize and link into
1863 * the driver model; memory is zeroed.
1864 */
Felipe Balbie9e8c852012-01-26 12:40:23 +02001865int __devinit musb_gadget_setup(struct musb *musb)
Felipe Balbi550a7372008-07-24 12:27:36 +03001866{
1867 int status;
1868
1869 /* REVISIT minor race: if (erroneously) setting up two
1870 * musb peripherals at the same time, only the bus lock
1871 * is probably held.
1872 */
Felipe Balbi550a7372008-07-24 12:27:36 +03001873
1874 musb->g.ops = &musb_gadget_operations;
Michal Nazarewiczd327ab52011-11-19 18:27:37 +01001875 musb->g.max_speed = USB_SPEED_HIGH;
Felipe Balbi550a7372008-07-24 12:27:36 +03001876 musb->g.speed = USB_SPEED_UNKNOWN;
1877
1878 /* this "gadget" abstracts/virtualizes the controller */
Kay Sievers427c4f32008-11-07 01:52:53 +01001879 dev_set_name(&musb->g.dev, "gadget");
Felipe Balbi550a7372008-07-24 12:27:36 +03001880 musb->g.dev.parent = musb->controller;
1881 musb->g.dev.dma_mask = musb->controller->dma_mask;
1882 musb->g.dev.release = musb_gadget_release;
1883 musb->g.name = musb_driver_name;
1884
Felipe Balbi032ec492011-11-24 15:46:26 +02001885 musb->g.is_otg = 1;
Felipe Balbi550a7372008-07-24 12:27:36 +03001886
1887 musb_g_init_endpoints(musb);
1888
1889 musb->is_active = 0;
1890 musb_platform_try_idle(musb, 0);
1891
1892 status = device_register(&musb->g.dev);
Rahul Ruikare2c34042010-10-02 01:35:48 -05001893 if (status != 0) {
1894 put_device(&musb->g.dev);
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001895 return status;
Rahul Ruikare2c34042010-10-02 01:35:48 -05001896 }
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001897 status = usb_add_gadget_udc(musb->controller, &musb->g);
1898 if (status)
1899 goto err;
1900
1901 return 0;
1902err:
Sebastian Andrzej Siewior6193d692011-08-10 11:01:57 +02001903 musb->g.dev.parent = NULL;
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001904 device_unregister(&musb->g.dev);
Felipe Balbi550a7372008-07-24 12:27:36 +03001905 return status;
1906}
1907
1908void musb_gadget_cleanup(struct musb *musb)
1909{
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001910 usb_del_gadget_udc(&musb->g);
Sebastian Andrzej Siewior6193d692011-08-10 11:01:57 +02001911 if (musb->g.dev.parent)
1912 device_unregister(&musb->g.dev);
Felipe Balbi550a7372008-07-24 12:27:36 +03001913}
1914
1915/*
1916 * Register the gadget driver. Used by gadget drivers when
1917 * registering themselves with the controller.
1918 *
1919 * -EINVAL something went wrong (not driver)
1920 * -EBUSY another gadget is already using the controller
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001921 * -ENOMEM no memory to perform the operation
Felipe Balbi550a7372008-07-24 12:27:36 +03001922 *
1923 * @param driver the gadget driver
1924 * @return <0 if error, 0 if everything is fine
1925 */
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001926static int musb_gadget_start(struct usb_gadget *g,
1927 struct usb_gadget_driver *driver)
Felipe Balbi550a7372008-07-24 12:27:36 +03001928{
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001929 struct musb *musb = gadget_to_musb(g);
Heikki Krogerusd445b6d2012-02-13 13:24:15 +02001930 struct usb_otg *otg = musb->xceiv->otg;
Felipe Balbi032ec492011-11-24 15:46:26 +02001931 struct usb_hcd *hcd = musb_to_hcd(musb);
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001932 unsigned long flags;
Felipe Balbi032ec492011-11-24 15:46:26 +02001933 int retval = 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03001934
Felipe Balbi032ec492011-11-24 15:46:26 +02001935 if (driver->max_speed < USB_SPEED_HIGH) {
1936 retval = -EINVAL;
1937 goto err;
1938 }
Felipe Balbi550a7372008-07-24 12:27:36 +03001939
Hema HK7acc6192011-02-28 14:19:34 +05301940 pm_runtime_get_sync(musb->controller);
1941
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03001942 dev_dbg(musb->controller, "registering driver %s\n", driver->function);
Felipe Balbi550a7372008-07-24 12:27:36 +03001943
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001944 musb->softconnect = 0;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001945 musb->gadget_driver = driver;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001946
1947 spin_lock_irqsave(&musb->lock, flags);
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02001948 musb->is_active = 1;
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001949
Heikki Krogerus6e13c652012-02-13 13:24:20 +02001950 otg_set_peripheral(otg, &musb->g);
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001951 musb->xceiv->state = OTG_STATE_B_IDLE;
Felipe Balbi550a7372008-07-24 12:27:36 +03001952 spin_unlock_irqrestore(&musb->lock, flags);
1953
Felipe Balbi032ec492011-11-24 15:46:26 +02001954 /* REVISIT: funcall to other code, which also
1955 * handles power budgeting ... this way also
1956 * ensures HdrcStart is indirectly called.
1957 */
1958 retval = usb_add_hcd(hcd, 0, 0);
1959 if (retval < 0) {
1960 dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1961 goto err;
Felipe Balbi550a7372008-07-24 12:27:36 +03001962 }
Felipe Balbi032ec492011-11-24 15:46:26 +02001963
1964 if ((musb->xceiv->last_event == USB_EVENT_ID)
1965 && otg->set_vbus)
1966 otg_set_vbus(otg, 1);
1967
1968 hcd->self.uses_pio_for_control = 1;
1969
Jarkko Nikulacdefce12011-04-29 16:17:35 +03001970 if (musb->xceiv->last_event == USB_EVENT_NONE)
1971 pm_runtime_put(musb->controller);
Felipe Balbi550a7372008-07-24 12:27:36 +03001972
Felipe Balbi63eed2b2011-01-17 10:34:38 +02001973 return 0;
1974
Felipe Balbi032ec492011-11-24 15:46:26 +02001975err:
Felipe Balbi550a7372008-07-24 12:27:36 +03001976 return retval;
1977}
Felipe Balbi550a7372008-07-24 12:27:36 +03001978
1979static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1980{
1981 int i;
1982 struct musb_hw_ep *hw_ep;
1983
1984 /* don't disconnect if it's not connected */
1985 if (musb->g.speed == USB_SPEED_UNKNOWN)
1986 driver = NULL;
1987 else
1988 musb->g.speed = USB_SPEED_UNKNOWN;
1989
1990 /* deactivate the hardware */
1991 if (musb->softconnect) {
1992 musb->softconnect = 0;
1993 musb_pullup(musb, 0);
1994 }
1995 musb_stop(musb);
1996
1997 /* killing any outstanding requests will quiesce the driver;
1998 * then report disconnect
1999 */
2000 if (driver) {
2001 for (i = 0, hw_ep = musb->endpoints;
2002 i < musb->nr_endpoints;
2003 i++, hw_ep++) {
2004 musb_ep_select(musb->mregs, i);
2005 if (hw_ep->is_shared_fifo /* || !epnum */) {
2006 nuke(&hw_ep->ep_in, -ESHUTDOWN);
2007 } else {
2008 if (hw_ep->max_packet_sz_tx)
2009 nuke(&hw_ep->ep_in, -ESHUTDOWN);
2010 if (hw_ep->max_packet_sz_rx)
2011 nuke(&hw_ep->ep_out, -ESHUTDOWN);
2012 }
2013 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002014 }
2015}
2016
2017/*
2018 * Unregister the gadget driver. Used by gadget drivers when
2019 * unregistering themselves from the controller.
2020 *
2021 * @param driver the gadget driver to unregister
2022 */
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02002023static int musb_gadget_stop(struct usb_gadget *g,
2024 struct usb_gadget_driver *driver)
Felipe Balbi550a7372008-07-24 12:27:36 +03002025{
Sebastian Andrzej Siewiore71eb392011-06-23 14:26:16 +02002026 struct musb *musb = gadget_to_musb(g);
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002027 unsigned long flags;
Felipe Balbi550a7372008-07-24 12:27:36 +03002028
Hema HK7acc6192011-02-28 14:19:34 +05302029 if (musb->xceiv->last_event == USB_EVENT_NONE)
2030 pm_runtime_get_sync(musb->controller);
2031
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002032 /*
2033 * REVISIT always use otg_set_peripheral() here too;
Felipe Balbi550a7372008-07-24 12:27:36 +03002034 * this needs to shut down the OTG engine.
2035 */
2036
2037 spin_lock_irqsave(&musb->lock, flags);
2038
Felipe Balbi550a7372008-07-24 12:27:36 +03002039 musb_hnp_stop(musb);
Felipe Balbi550a7372008-07-24 12:27:36 +03002040
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002041 (void) musb_gadget_vbus_draw(&musb->g, 0);
Felipe Balbi550a7372008-07-24 12:27:36 +03002042
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002043 musb->xceiv->state = OTG_STATE_UNDEFINED;
2044 stop_activity(musb, driver);
Heikki Krogerus6e13c652012-02-13 13:24:20 +02002045 otg_set_peripheral(musb->xceiv->otg, NULL);
Felipe Balbi550a7372008-07-24 12:27:36 +03002046
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002047 dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
Felipe Balbi550a7372008-07-24 12:27:36 +03002048
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002049 musb->is_active = 0;
2050 musb_platform_try_idle(musb, 0);
Felipe Balbi550a7372008-07-24 12:27:36 +03002051 spin_unlock_irqrestore(&musb->lock, flags);
2052
Felipe Balbi032ec492011-11-24 15:46:26 +02002053 usb_remove_hcd(musb_to_hcd(musb));
2054 /*
2055 * FIXME we need to be able to register another
2056 * gadget driver here and have everything work;
2057 * that currently misbehaves.
2058 */
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002059
Hema HK7acc6192011-02-28 14:19:34 +05302060 pm_runtime_put(musb->controller);
2061
Felipe Balbi63eed2b2011-01-17 10:34:38 +02002062 return 0;
Felipe Balbi550a7372008-07-24 12:27:36 +03002063}
Felipe Balbi550a7372008-07-24 12:27:36 +03002064
2065/* ----------------------------------------------------------------------- */
2066
2067/* lifecycle operations called through plat_uds.c */
2068
2069void musb_g_resume(struct musb *musb)
2070{
2071 musb->is_suspended = 0;
David Brownell84e250f2009-03-31 12:30:04 -07002072 switch (musb->xceiv->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002073 case OTG_STATE_B_IDLE:
2074 break;
2075 case OTG_STATE_B_WAIT_ACON:
2076 case OTG_STATE_B_PERIPHERAL:
2077 musb->is_active = 1;
2078 if (musb->gadget_driver && musb->gadget_driver->resume) {
2079 spin_unlock(&musb->lock);
2080 musb->gadget_driver->resume(&musb->g);
2081 spin_lock(&musb->lock);
2082 }
2083 break;
2084 default:
2085 WARNING("unhandled RESUME transition (%s)\n",
Anatolij Gustschin3df00452011-05-05 12:11:21 +02002086 otg_state_string(musb->xceiv->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03002087 }
2088}
2089
2090/* called when SOF packets stop for 3+ msec */
2091void musb_g_suspend(struct musb *musb)
2092{
2093 u8 devctl;
2094
2095 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002096 dev_dbg(musb->controller, "devctl %02x\n", devctl);
Felipe Balbi550a7372008-07-24 12:27:36 +03002097
David Brownell84e250f2009-03-31 12:30:04 -07002098 switch (musb->xceiv->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002099 case OTG_STATE_B_IDLE:
2100 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
David Brownell84e250f2009-03-31 12:30:04 -07002101 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002102 break;
2103 case OTG_STATE_B_PERIPHERAL:
2104 musb->is_suspended = 1;
2105 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2106 spin_unlock(&musb->lock);
2107 musb->gadget_driver->suspend(&musb->g);
2108 spin_lock(&musb->lock);
2109 }
2110 break;
2111 default:
2112 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2113 * A_PERIPHERAL may need care too
2114 */
2115 WARNING("unhandled SUSPEND transition (%s)\n",
Anatolij Gustschin3df00452011-05-05 12:11:21 +02002116 otg_state_string(musb->xceiv->state));
Felipe Balbi550a7372008-07-24 12:27:36 +03002117 }
2118}
2119
2120/* Called during SRP */
2121void musb_g_wakeup(struct musb *musb)
2122{
2123 musb_gadget_wakeup(&musb->g);
2124}
2125
2126/* called when VBUS drops below session threshold, and in other cases */
2127void musb_g_disconnect(struct musb *musb)
2128{
2129 void __iomem *mregs = musb->mregs;
2130 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2131
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002132 dev_dbg(musb->controller, "devctl %02x\n", devctl);
Felipe Balbi550a7372008-07-24 12:27:36 +03002133
2134 /* clear HR */
2135 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2136
2137 /* don't draw vbus until new b-default session */
2138 (void) musb_gadget_vbus_draw(&musb->g, 0);
2139
2140 musb->g.speed = USB_SPEED_UNKNOWN;
2141 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2142 spin_unlock(&musb->lock);
2143 musb->gadget_driver->disconnect(&musb->g);
2144 spin_lock(&musb->lock);
2145 }
2146
David Brownell84e250f2009-03-31 12:30:04 -07002147 switch (musb->xceiv->state) {
Felipe Balbi550a7372008-07-24 12:27:36 +03002148 default:
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002149 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
Anatolij Gustschin3df00452011-05-05 12:11:21 +02002150 otg_state_string(musb->xceiv->state));
David Brownell84e250f2009-03-31 12:30:04 -07002151 musb->xceiv->state = OTG_STATE_A_IDLE;
David Brownellab983f2a2009-03-31 12:35:09 -07002152 MUSB_HST_MODE(musb);
Felipe Balbi550a7372008-07-24 12:27:36 +03002153 break;
2154 case OTG_STATE_A_PERIPHERAL:
David Brownell1de00da2009-04-02 10:16:11 -07002155 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
David Brownellab983f2a2009-03-31 12:35:09 -07002156 MUSB_HST_MODE(musb);
Felipe Balbi550a7372008-07-24 12:27:36 +03002157 break;
2158 case OTG_STATE_B_WAIT_ACON:
2159 case OTG_STATE_B_HOST:
Felipe Balbi550a7372008-07-24 12:27:36 +03002160 case OTG_STATE_B_PERIPHERAL:
2161 case OTG_STATE_B_IDLE:
David Brownell84e250f2009-03-31 12:30:04 -07002162 musb->xceiv->state = OTG_STATE_B_IDLE;
Felipe Balbi550a7372008-07-24 12:27:36 +03002163 break;
2164 case OTG_STATE_B_SRP_INIT:
2165 break;
2166 }
2167
2168 musb->is_active = 0;
2169}
2170
2171void musb_g_reset(struct musb *musb)
2172__releases(musb->lock)
2173__acquires(musb->lock)
2174{
2175 void __iomem *mbase = musb->mregs;
2176 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2177 u8 power;
2178
Felipe Balbi5c8a86e2011-05-11 12:44:08 +03002179 dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
Felipe Balbi550a7372008-07-24 12:27:36 +03002180 (devctl & MUSB_DEVCTL_BDEVICE)
2181 ? "B-Device" : "A-Device",
2182 musb_readb(mbase, MUSB_FADDR),
2183 musb->gadget_driver
2184 ? musb->gadget_driver->driver.name
2185 : NULL
2186 );
2187
2188 /* report disconnect, if we didn't already (flushing EP state) */
2189 if (musb->g.speed != USB_SPEED_UNKNOWN)
2190 musb_g_disconnect(musb);
2191
2192 /* clear HR */
2193 else if (devctl & MUSB_DEVCTL_HR)
2194 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2195
2196
2197 /* what speed did we negotiate? */
2198 power = musb_readb(mbase, MUSB_POWER);
2199 musb->g.speed = (power & MUSB_POWER_HSMODE)
2200 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2201
2202 /* start in USB_STATE_DEFAULT */
2203 musb->is_active = 1;
2204 musb->is_suspended = 0;
2205 MUSB_DEV_MODE(musb);
2206 musb->address = 0;
2207 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2208
2209 musb->may_wakeup = 0;
2210 musb->g.b_hnp_enable = 0;
2211 musb->g.a_alt_hnp_support = 0;
2212 musb->g.a_hnp_support = 0;
2213
2214 /* Normal reset, as B-Device;
2215 * or else after HNP, as A-Device
2216 */
2217 if (devctl & MUSB_DEVCTL_BDEVICE) {
David Brownell84e250f2009-03-31 12:30:04 -07002218 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002219 musb->g.is_a_peripheral = 0;
Felipe Balbi032ec492011-11-24 15:46:26 +02002220 } else {
David Brownell84e250f2009-03-31 12:30:04 -07002221 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
Felipe Balbi550a7372008-07-24 12:27:36 +03002222 musb->g.is_a_peripheral = 1;
Felipe Balbi032ec492011-11-24 15:46:26 +02002223 }
Felipe Balbi550a7372008-07-24 12:27:36 +03002224
2225 /* start with default limits on VBUS power draw */
Felipe Balbi032ec492011-11-24 15:46:26 +02002226 (void) musb_gadget_vbus_draw(&musb->g, 8);
Felipe Balbi550a7372008-07-24 12:27:36 +03002227}