blob: 76d0a87f5b15224bdcac605843a5d86b35e43ca3 [file] [log] [blame]
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 * All rights reserved.
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the above-listed copyright holders may not be used
20 * to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2, as published by the Free
25 * Software Foundation.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <linux/kernel.h>
41#include <linux/delay.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/platform_device.h>
45#include <linux/pm_runtime.h>
46#include <linux/interrupt.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/dma-mapping.h>
50
51#include <linux/usb/ch9.h>
52#include <linux/usb/gadget.h>
53
54#include "core.h"
55#include "gadget.h"
56#include "io.h"
57
58#define DMA_ADDR_INVALID (~(dma_addr_t)0)
59
60void dwc3_map_buffer_to_dma(struct dwc3_request *req)
61{
62 struct dwc3 *dwc = req->dep->dwc;
63
Sebastian Andrzej Siewior4ae8e1c2011-08-31 17:12:02 +020064 if (req->request.length == 0) {
65 /* req->request.dma = dwc->setup_buf_addr; */
66 return;
67 }
68
Felipe Balbi4dc64e52011-08-19 18:10:58 +030069 if (req->request.dma == DMA_ADDR_INVALID) {
70 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
71 req->request.length, req->direction
72 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 req->mapped = true;
Felipe Balbi4dc64e52011-08-19 18:10:58 +030074 }
75}
76
77void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
78{
79 struct dwc3 *dwc = req->dep->dwc;
80
Sebastian Andrzej Siewior4ae8e1c2011-08-31 17:12:02 +020081 if (req->request.length == 0) {
82 req->request.dma = DMA_ADDR_INVALID;
83 return;
84 }
85
Felipe Balbi4dc64e52011-08-19 18:10:58 +030086 if (req->mapped) {
87 dma_unmap_single(dwc->dev, req->request.dma,
88 req->request.length, req->direction
89 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
90 req->mapped = 0;
Felipe Balbi162e1282011-08-27 15:10:09 +030091 req->request.dma = DMA_ADDR_INVALID;
Felipe Balbi4dc64e52011-08-19 18:10:58 +030092 }
93}
94
95void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
96 int status)
97{
98 struct dwc3 *dwc = dep->dwc;
99
100 if (req->queued) {
101 dep->busy_slot++;
102 /*
103 * Skip LINK TRB. We can't use req->trb and check for
104 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
105 * completed (not the LINK TRB).
106 */
107 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
108 usb_endpoint_xfer_isoc(dep->desc))
109 dep->busy_slot++;
110 }
111 list_del(&req->list);
112
113 if (req->request.status == -EINPROGRESS)
114 req->request.status = status;
115
116 dwc3_unmap_buffer_from_dma(req);
117
118 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
119 req, dep->name, req->request.actual,
120 req->request.length, status);
121
122 spin_unlock(&dwc->lock);
123 req->request.complete(&req->dep->endpoint, &req->request);
124 spin_lock(&dwc->lock);
125}
126
127static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
128{
129 switch (cmd) {
130 case DWC3_DEPCMD_DEPSTARTCFG:
131 return "Start New Configuration";
132 case DWC3_DEPCMD_ENDTRANSFER:
133 return "End Transfer";
134 case DWC3_DEPCMD_UPDATETRANSFER:
135 return "Update Transfer";
136 case DWC3_DEPCMD_STARTTRANSFER:
137 return "Start Transfer";
138 case DWC3_DEPCMD_CLEARSTALL:
139 return "Clear Stall";
140 case DWC3_DEPCMD_SETSTALL:
141 return "Set Stall";
142 case DWC3_DEPCMD_GETSEQNUMBER:
143 return "Get Data Sequence Number";
144 case DWC3_DEPCMD_SETTRANSFRESOURCE:
145 return "Set Endpoint Transfer Resource";
146 case DWC3_DEPCMD_SETEPCONFIG:
147 return "Set Endpoint Configuration";
148 default:
149 return "UNKNOWN command";
150 }
151}
152
153int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
154 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
155{
156 struct dwc3_ep *dep = dwc->eps[ep];
Sebastian Andrzej Siewior6062cac2011-08-29 16:46:38 +0200157 u32 timeout = 500;
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300158 u32 reg;
159
160 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
161 dep->name,
162 dwc3_gadget_ep_cmd_string(cmd), params->param0.raw,
163 params->param1.raw, params->param2.raw);
164
165 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0.raw);
166 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1.raw);
167 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2.raw);
168
169 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
170 do {
171 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
172 if (!(reg & DWC3_DEPCMD_CMDACT)) {
Felipe Balbic7dbe4f2011-08-27 20:29:58 +0300173 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
174 DWC3_DEPCMD_STATUS(reg));
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300175 return 0;
176 }
177
178 /*
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300179 * We can't sleep here, because it is also called from
180 * interrupt context.
181 */
182 timeout--;
183 if (!timeout)
184 return -ETIMEDOUT;
185
Sebastian Andrzej Siewior6062cac2011-08-29 16:46:38 +0200186 udelay(1);
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300187 } while (1);
188}
189
190static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
191 struct dwc3_trb_hw *trb)
192{
193 u32 offset = trb - dep->trb_pool;
194
195 return dep->trb_pool_dma + offset;
196}
197
198static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
199{
200 struct dwc3 *dwc = dep->dwc;
201
202 if (dep->trb_pool)
203 return 0;
204
205 if (dep->number == 0 || dep->number == 1)
206 return 0;
207
208 dep->trb_pool = dma_alloc_coherent(dwc->dev,
209 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
210 &dep->trb_pool_dma, GFP_KERNEL);
211 if (!dep->trb_pool) {
212 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
213 dep->name);
214 return -ENOMEM;
215 }
216
217 return 0;
218}
219
220static void dwc3_free_trb_pool(struct dwc3_ep *dep)
221{
222 struct dwc3 *dwc = dep->dwc;
223
224 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
225 dep->trb_pool, dep->trb_pool_dma);
226
227 dep->trb_pool = NULL;
228 dep->trb_pool_dma = 0;
229}
230
231static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
232{
233 struct dwc3_gadget_ep_cmd_params params;
234 u32 cmd;
235
236 memset(&params, 0x00, sizeof(params));
237
238 if (dep->number != 1) {
239 cmd = DWC3_DEPCMD_DEPSTARTCFG;
240 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
241 if (dep->number > 1)
242 cmd |= DWC3_DEPCMD_PARAM(2);
243
244 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
245 }
246
247 return 0;
248}
249
250static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
251 const struct usb_endpoint_descriptor *desc)
252{
253 struct dwc3_gadget_ep_cmd_params params;
254
255 memset(&params, 0x00, sizeof(params));
256
257 params.param0.depcfg.ep_type = usb_endpoint_type(desc);
258 params.param0.depcfg.max_packet_size =
259 le16_to_cpu(desc->wMaxPacketSize);
260
261 params.param1.depcfg.xfer_complete_enable = true;
262 params.param1.depcfg.xfer_not_ready_enable = true;
263
264 if (usb_endpoint_xfer_isoc(desc))
265 params.param1.depcfg.xfer_in_progress_enable = true;
266
267 /*
268 * We are doing 1:1 mapping for endpoints, meaning
269 * Physical Endpoints 2 maps to Logical Endpoint 2 and
270 * so on. We consider the direction bit as part of the physical
271 * endpoint number. So USB endpoint 0x81 is 0x03.
272 */
273 params.param1.depcfg.ep_number = dep->number;
274
275 /*
276 * We must use the lower 16 TX FIFOs even though
277 * HW might have more
278 */
279 if (dep->direction)
280 params.param0.depcfg.fifo_number = dep->number >> 1;
281
282 if (desc->bInterval) {
283 params.param1.depcfg.binterval_m1 = desc->bInterval - 1;
284 dep->interval = 1 << (desc->bInterval - 1);
285 }
286
287 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
288 DWC3_DEPCMD_SETEPCONFIG, &params);
289}
290
291static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
292{
293 struct dwc3_gadget_ep_cmd_params params;
294
295 memset(&params, 0x00, sizeof(params));
296
297 params.param0.depxfercfg.number_xfer_resources = 1;
298
299 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
300 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
301}
302
303/**
304 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
305 * @dep: endpoint to be initialized
306 * @desc: USB Endpoint Descriptor
307 *
308 * Caller should take care of locking
309 */
310static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
311 const struct usb_endpoint_descriptor *desc)
312{
313 struct dwc3 *dwc = dep->dwc;
314 u32 reg;
315 int ret = -ENOMEM;
316
317 if (!(dep->flags & DWC3_EP_ENABLED)) {
318 ret = dwc3_gadget_start_config(dwc, dep);
319 if (ret)
320 return ret;
321 }
322
323 ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
324 if (ret)
325 return ret;
326
327 if (!(dep->flags & DWC3_EP_ENABLED)) {
328 struct dwc3_trb_hw *trb_st_hw;
329 struct dwc3_trb_hw *trb_link_hw;
330 struct dwc3_trb trb_link;
331
332 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
333 if (ret)
334 return ret;
335
336 dep->desc = desc;
337 dep->type = usb_endpoint_type(desc);
338 dep->flags |= DWC3_EP_ENABLED;
339
340 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
341 reg |= DWC3_DALEPENA_EP(dep->number);
342 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
343
344 if (!usb_endpoint_xfer_isoc(desc))
345 return 0;
346
347 memset(&trb_link, 0, sizeof(trb_link));
348
349 /* Link TRB for ISOC. The HWO but is never reset */
350 trb_st_hw = &dep->trb_pool[0];
351
352 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
353 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
354 trb_link.hwo = true;
355
356 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
357 dwc3_trb_to_hw(&trb_link, trb_link_hw);
358 }
359
360 return 0;
361}
362
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +0200363static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
364static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300365{
366 struct dwc3_request *req;
367
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +0200368 if (!list_empty(&dep->req_queued))
369 dwc3_stop_active_transfer(dwc, dep->number);
370
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300371 while (!list_empty(&dep->request_list)) {
372 req = next_request(&dep->request_list);
373
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +0200374 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300375 }
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300376}
377
378/**
379 * __dwc3_gadget_ep_disable - Disables a HW endpoint
380 * @dep: the endpoint to disable
381 *
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +0200382 * This function also removes requests which are currently processed ny the
383 * hardware and those which are not yet scheduled.
384 * Caller should take care of locking.
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300385 */
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300386static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
387{
388 struct dwc3 *dwc = dep->dwc;
389 u32 reg;
390
391 dep->flags &= ~DWC3_EP_ENABLED;
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +0200392 dwc3_remove_requests(dwc, dep);
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300393
394 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
395 reg &= ~DWC3_DALEPENA_EP(dep->number);
396 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
397
398 dep->desc = NULL;
399 dep->type = 0;
400
401 return 0;
402}
403
404/* -------------------------------------------------------------------------- */
405
406static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
407 const struct usb_endpoint_descriptor *desc)
408{
409 return -EINVAL;
410}
411
412static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
413{
414 return -EINVAL;
415}
416
417/* -------------------------------------------------------------------------- */
418
419static int dwc3_gadget_ep_enable(struct usb_ep *ep,
420 const struct usb_endpoint_descriptor *desc)
421{
422 struct dwc3_ep *dep;
423 struct dwc3 *dwc;
424 unsigned long flags;
425 int ret;
426
427 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
428 pr_debug("dwc3: invalid parameters\n");
429 return -EINVAL;
430 }
431
432 if (!desc->wMaxPacketSize) {
433 pr_debug("dwc3: missing wMaxPacketSize\n");
434 return -EINVAL;
435 }
436
437 dep = to_dwc3_ep(ep);
438 dwc = dep->dwc;
439
440 switch (usb_endpoint_type(desc)) {
441 case USB_ENDPOINT_XFER_CONTROL:
442 strncat(dep->name, "-control", sizeof(dep->name));
443 break;
444 case USB_ENDPOINT_XFER_ISOC:
445 strncat(dep->name, "-isoc", sizeof(dep->name));
446 break;
447 case USB_ENDPOINT_XFER_BULK:
448 strncat(dep->name, "-bulk", sizeof(dep->name));
449 break;
450 case USB_ENDPOINT_XFER_INT:
451 strncat(dep->name, "-int", sizeof(dep->name));
452 break;
453 default:
454 dev_err(dwc->dev, "invalid endpoint transfer type\n");
455 }
456
457 if (dep->flags & DWC3_EP_ENABLED) {
458 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
459 dep->name);
460 return 0;
461 }
462
463 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
464
465 spin_lock_irqsave(&dwc->lock, flags);
466 ret = __dwc3_gadget_ep_enable(dep, desc);
467 spin_unlock_irqrestore(&dwc->lock, flags);
468
469 return ret;
470}
471
472static int dwc3_gadget_ep_disable(struct usb_ep *ep)
473{
474 struct dwc3_ep *dep;
475 struct dwc3 *dwc;
476 unsigned long flags;
477 int ret;
478
479 if (!ep) {
480 pr_debug("dwc3: invalid parameters\n");
481 return -EINVAL;
482 }
483
484 dep = to_dwc3_ep(ep);
485 dwc = dep->dwc;
486
487 if (!(dep->flags & DWC3_EP_ENABLED)) {
488 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
489 dep->name);
490 return 0;
491 }
492
493 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
494 dep->number >> 1,
495 (dep->number & 1) ? "in" : "out");
496
497 spin_lock_irqsave(&dwc->lock, flags);
498 ret = __dwc3_gadget_ep_disable(dep);
499 spin_unlock_irqrestore(&dwc->lock, flags);
500
501 return ret;
502}
503
504static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
505 gfp_t gfp_flags)
506{
507 struct dwc3_request *req;
508 struct dwc3_ep *dep = to_dwc3_ep(ep);
509 struct dwc3 *dwc = dep->dwc;
510
511 req = kzalloc(sizeof(*req), gfp_flags);
512 if (!req) {
513 dev_err(dwc->dev, "not enough memory\n");
514 return NULL;
515 }
516
517 req->epnum = dep->number;
518 req->dep = dep;
519 req->request.dma = DMA_ADDR_INVALID;
520
521 return &req->request;
522}
523
524static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
525 struct usb_request *request)
526{
527 struct dwc3_request *req = to_dwc3_request(request);
528
529 kfree(req);
530}
531
532/*
533 * dwc3_prepare_trbs - setup TRBs from requests
534 * @dep: endpoint for which requests are being prepared
535 * @starting: true if the endpoint is idle and no requests are queued.
536 *
537 * The functions goes through the requests list and setups TRBs for the
538 * transfers. The functions returns once there are not more TRBs available or
539 * it run out of requests.
540 */
541static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
542 bool starting)
543{
544 struct dwc3_request *req, *n, *ret = NULL;
545 struct dwc3_trb_hw *trb_hw;
546 struct dwc3_trb trb;
547 u32 trbs_left;
548
549 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
550
551 /* the first request must not be queued */
552 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
553 /*
554 * if busy & slot are equal than it is either full or empty. If we are
555 * starting to proceed requests then we are empty. Otherwise we ar
556 * full and don't do anything
557 */
558 if (!trbs_left) {
559 if (!starting)
560 return NULL;
561 trbs_left = DWC3_TRB_NUM;
562 /*
563 * In case we start from scratch, we queue the ISOC requests
564 * starting from slot 1. This is done because we use ring
565 * buffer and have no LST bit to stop us. Instead, we place
566 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
567 * after the first request so we start at slot 1 and have
568 * 7 requests proceed before we hit the first IOC.
569 * Other transfer types don't use the ring buffer and are
570 * processed from the first TRB until the last one. Since we
571 * don't wrap around we have to start at the beginning.
572 */
573 if (usb_endpoint_xfer_isoc(dep->desc)) {
574 dep->busy_slot = 1;
575 dep->free_slot = 1;
576 } else {
577 dep->busy_slot = 0;
578 dep->free_slot = 0;
579 }
580 }
581
582 /* The last TRB is a link TRB, not used for xfer */
583 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
584 return NULL;
585
586 list_for_each_entry_safe(req, n, &dep->request_list, list) {
587 unsigned int last_one = 0;
588 unsigned int cur_slot;
589
590 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
591 cur_slot = dep->free_slot;
592 dep->free_slot++;
593
594 /* Skip the LINK-TRB on ISOC */
595 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
596 usb_endpoint_xfer_isoc(dep->desc))
597 continue;
598
599 dwc3_gadget_move_request_queued(req);
600 memset(&trb, 0, sizeof(trb));
601 trbs_left--;
602
603 /* Is our TRB pool empty? */
604 if (!trbs_left)
605 last_one = 1;
606 /* Is this the last request? */
607 if (list_empty(&dep->request_list))
608 last_one = 1;
609
610 /*
611 * FIXME we shouldn't need to set LST bit always but we are
612 * facing some weird problem with the Hardware where it doesn't
613 * complete even though it has been previously started.
614 *
615 * While we're debugging the problem, as a workaround to
616 * multiple TRBs handling, use only one TRB at a time.
617 */
618 last_one = 1;
619
620 req->trb = trb_hw;
621 if (!ret)
622 ret = req;
623
624 trb.bplh = req->request.dma;
625
626 if (usb_endpoint_xfer_isoc(dep->desc)) {
627 trb.isp_imi = true;
628 trb.csp = true;
629 } else {
630 trb.lst = last_one;
631 }
632
633 switch (usb_endpoint_type(dep->desc)) {
634 case USB_ENDPOINT_XFER_CONTROL:
635 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
636 break;
637
638 case USB_ENDPOINT_XFER_ISOC:
Sebastian Andrzej Siewior15623d72011-08-22 17:42:19 +0200639 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300640
641 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
642 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
643 trb.ioc = last_one;
644 break;
645
646 case USB_ENDPOINT_XFER_BULK:
647 case USB_ENDPOINT_XFER_INT:
648 trb.trbctl = DWC3_TRBCTL_NORMAL;
649 break;
650 default:
651 /*
652 * This is only possible with faulty memory because we
653 * checked it already :)
654 */
655 BUG();
656 }
657
658 trb.length = req->request.length;
659 trb.hwo = true;
660
661 dwc3_trb_to_hw(&trb, trb_hw);
662 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
663
664 if (last_one)
665 break;
666 }
667
668 return ret;
669}
670
671static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
672 int start_new)
673{
674 struct dwc3_gadget_ep_cmd_params params;
675 struct dwc3_request *req;
676 struct dwc3 *dwc = dep->dwc;
677 int ret;
678 u32 cmd;
679
680 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
681 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
682 return -EBUSY;
683 }
684 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
685
686 /*
687 * If we are getting here after a short-out-packet we don't enqueue any
688 * new requests as we try to set the IOC bit only on the last request.
689 */
690 if (start_new) {
691 if (list_empty(&dep->req_queued))
692 dwc3_prepare_trbs(dep, start_new);
693
694 /* req points to the first request which will be sent */
695 req = next_request(&dep->req_queued);
696 } else {
697 /*
698 * req points to the first request where HWO changed
699 * from 0 to 1
700 */
701 req = dwc3_prepare_trbs(dep, start_new);
702 }
703 if (!req) {
704 dep->flags |= DWC3_EP_PENDING_REQUEST;
705 return 0;
706 }
707
708 memset(&params, 0, sizeof(params));
709 params.param0.depstrtxfer.transfer_desc_addr_high =
710 upper_32_bits(req->trb_dma);
711 params.param1.depstrtxfer.transfer_desc_addr_low =
712 lower_32_bits(req->trb_dma);
713
714 if (start_new)
715 cmd = DWC3_DEPCMD_STARTTRANSFER;
716 else
717 cmd = DWC3_DEPCMD_UPDATETRANSFER;
718
719 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
720 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
721 if (ret < 0) {
722 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
723
724 /*
725 * FIXME we need to iterate over the list of requests
726 * here and stop, unmap, free and del each of the linked
727 * requests instead of we do now.
728 */
729 dwc3_unmap_buffer_from_dma(req);
730 list_del(&req->list);
731 return ret;
732 }
733
734 dep->flags |= DWC3_EP_BUSY;
735 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
736 dep->number);
737 if (!dep->res_trans_idx)
738 printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
739 return 0;
740}
741
742static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
743{
744 req->request.actual = 0;
745 req->request.status = -EINPROGRESS;
746 req->direction = dep->direction;
747 req->epnum = dep->number;
748
749 /*
750 * We only add to our list of requests now and
751 * start consuming the list once we get XferNotReady
752 * IRQ.
753 *
754 * That way, we avoid doing anything that we don't need
755 * to do now and defer it until the point we receive a
756 * particular token from the Host side.
757 *
758 * This will also avoid Host cancelling URBs due to too
759 * many NACKs.
760 */
761 dwc3_map_buffer_to_dma(req);
762 list_add_tail(&req->list, &dep->request_list);
763
764 /*
765 * There is one special case: XferNotReady with
766 * empty list of requests. We need to kick the
767 * transfer here in that situation, otherwise
768 * we will be NAKing forever.
769 *
770 * If we get XferNotReady before gadget driver
771 * has a chance to queue a request, we will ACK
772 * the IRQ but won't be able to receive the data
773 * until the next request is queued. The following
774 * code is handling exactly that.
775 */
776 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
777 int ret;
778 int start_trans;
779
780 start_trans = 1;
781 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
782 dep->flags & DWC3_EP_BUSY)
783 start_trans = 0;
784
785 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
786 if (ret && ret != -EBUSY) {
787 struct dwc3 *dwc = dep->dwc;
788
789 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
790 dep->name);
791 }
792 };
793
794 return 0;
795}
796
797static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
798 gfp_t gfp_flags)
799{
800 struct dwc3_request *req = to_dwc3_request(request);
801 struct dwc3_ep *dep = to_dwc3_ep(ep);
802 struct dwc3 *dwc = dep->dwc;
803
804 unsigned long flags;
805
806 int ret;
807
808 if (!dep->desc) {
809 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
810 request, ep->name);
811 return -ESHUTDOWN;
812 }
813
814 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
815 request, ep->name, request->length);
816
817 spin_lock_irqsave(&dwc->lock, flags);
818 ret = __dwc3_gadget_ep_queue(dep, req);
819 spin_unlock_irqrestore(&dwc->lock, flags);
820
821 return ret;
822}
823
824static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
825 struct usb_request *request)
826{
827 struct dwc3_request *req = to_dwc3_request(request);
828 struct dwc3_request *r = NULL;
829
830 struct dwc3_ep *dep = to_dwc3_ep(ep);
831 struct dwc3 *dwc = dep->dwc;
832
833 unsigned long flags;
834 int ret = 0;
835
836 spin_lock_irqsave(&dwc->lock, flags);
837
838 list_for_each_entry(r, &dep->request_list, list) {
839 if (r == req)
840 break;
841 }
842
843 if (r != req) {
844 list_for_each_entry(r, &dep->req_queued, list) {
845 if (r == req)
846 break;
847 }
848 if (r == req) {
849 /* wait until it is processed */
850 dwc3_stop_active_transfer(dwc, dep->number);
851 goto out0;
852 }
853 dev_err(dwc->dev, "request %p was not queued to %s\n",
854 request, ep->name);
855 ret = -EINVAL;
856 goto out0;
857 }
858
859 /* giveback the request */
860 dwc3_gadget_giveback(dep, req, -ECONNRESET);
861
862out0:
863 spin_unlock_irqrestore(&dwc->lock, flags);
864
865 return ret;
866}
867
868int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
869{
870 struct dwc3_gadget_ep_cmd_params params;
871 struct dwc3 *dwc = dep->dwc;
872 int ret;
873
874 memset(&params, 0x00, sizeof(params));
875
876 if (value) {
Felipe Balbiaa7b4d02011-08-30 15:48:08 +0300877 if (dep->number == 0 || dep->number == 1) {
878 /*
879 * Whenever EP0 is stalled, we will restart
880 * the state machine, thus moving back to
881 * Setup Phase
882 */
883 dwc->ep0state = EP0_SETUP_PHASE;
884 }
Felipe Balbi4dc64e52011-08-19 18:10:58 +0300885
886 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
887 DWC3_DEPCMD_SETSTALL, &params);
888 if (ret)
889 dev_err(dwc->dev, "failed to %s STALL on %s\n",
890 value ? "set" : "clear",
891 dep->name);
892 else
893 dep->flags |= DWC3_EP_STALL;
894 } else {
895 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
896 DWC3_DEPCMD_CLEARSTALL, &params);
897 if (ret)
898 dev_err(dwc->dev, "failed to %s STALL on %s\n",
899 value ? "set" : "clear",
900 dep->name);
901 else
902 dep->flags &= ~DWC3_EP_STALL;
903 }
904 return ret;
905}
906
907static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
908{
909 struct dwc3_ep *dep = to_dwc3_ep(ep);
910 struct dwc3 *dwc = dep->dwc;
911
912 unsigned long flags;
913
914 int ret;
915
916 spin_lock_irqsave(&dwc->lock, flags);
917
918 if (usb_endpoint_xfer_isoc(dep->desc)) {
919 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
920 ret = -EINVAL;
921 goto out;
922 }
923
924 ret = __dwc3_gadget_ep_set_halt(dep, value);
925out:
926 spin_unlock_irqrestore(&dwc->lock, flags);
927
928 return ret;
929}
930
931static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
932{
933 struct dwc3_ep *dep = to_dwc3_ep(ep);
934
935 dep->flags |= DWC3_EP_WEDGE;
936
937 return usb_ep_set_halt(ep);
938}
939
940/* -------------------------------------------------------------------------- */
941
942static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
943 .bLength = USB_DT_ENDPOINT_SIZE,
944 .bDescriptorType = USB_DT_ENDPOINT,
945 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
946};
947
948static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
949 .enable = dwc3_gadget_ep0_enable,
950 .disable = dwc3_gadget_ep0_disable,
951 .alloc_request = dwc3_gadget_ep_alloc_request,
952 .free_request = dwc3_gadget_ep_free_request,
953 .queue = dwc3_gadget_ep0_queue,
954 .dequeue = dwc3_gadget_ep_dequeue,
955 .set_halt = dwc3_gadget_ep_set_halt,
956 .set_wedge = dwc3_gadget_ep_set_wedge,
957};
958
959static const struct usb_ep_ops dwc3_gadget_ep_ops = {
960 .enable = dwc3_gadget_ep_enable,
961 .disable = dwc3_gadget_ep_disable,
962 .alloc_request = dwc3_gadget_ep_alloc_request,
963 .free_request = dwc3_gadget_ep_free_request,
964 .queue = dwc3_gadget_ep_queue,
965 .dequeue = dwc3_gadget_ep_dequeue,
966 .set_halt = dwc3_gadget_ep_set_halt,
967 .set_wedge = dwc3_gadget_ep_set_wedge,
968};
969
970/* -------------------------------------------------------------------------- */
971
972static int dwc3_gadget_get_frame(struct usb_gadget *g)
973{
974 struct dwc3 *dwc = gadget_to_dwc(g);
975 u32 reg;
976
977 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
978 return DWC3_DSTS_SOFFN(reg);
979}
980
981static int dwc3_gadget_wakeup(struct usb_gadget *g)
982{
983 struct dwc3 *dwc = gadget_to_dwc(g);
984
985 unsigned long timeout;
986 unsigned long flags;
987
988 u32 reg;
989
990 int ret = 0;
991
992 u8 link_state;
993 u8 speed;
994
995 spin_lock_irqsave(&dwc->lock, flags);
996
997 /*
998 * According to the Databook Remote wakeup request should
999 * be issued only when the device is in early suspend state.
1000 *
1001 * We can check that via USB Link State bits in DSTS register.
1002 */
1003 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1004
1005 speed = reg & DWC3_DSTS_CONNECTSPD;
1006 if (speed == DWC3_DSTS_SUPERSPEED) {
1007 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1008 ret = -EINVAL;
1009 goto out;
1010 }
1011
1012 link_state = DWC3_DSTS_USBLNKST(reg);
1013
1014 switch (link_state) {
1015 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1016 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1017 break;
1018 default:
1019 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1020 link_state);
1021 ret = -EINVAL;
1022 goto out;
1023 }
1024
1025 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1026
1027 /*
1028 * Switch link state to Recovery. In HS/FS/LS this means
1029 * RemoteWakeup Request
1030 */
1031 reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
1032 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1033
1034 /* wait for at least 2000us */
1035 usleep_range(2000, 2500);
1036
1037 /* write zeroes to Link Change Request */
1038 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1039 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1040
1041 /* pool until Link State change to ON */
1042 timeout = jiffies + msecs_to_jiffies(100);
1043
1044 while (!(time_after(jiffies, timeout))) {
1045 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1046
1047 /* in HS, means ON */
1048 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1049 break;
1050 }
1051
1052 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1053 dev_err(dwc->dev, "failed to send remote wakeup\n");
1054 ret = -EINVAL;
1055 }
1056
1057out:
1058 spin_unlock_irqrestore(&dwc->lock, flags);
1059
1060 return ret;
1061}
1062
1063static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1064 int is_selfpowered)
1065{
1066 struct dwc3 *dwc = gadget_to_dwc(g);
1067
1068 dwc->is_selfpowered = !!is_selfpowered;
1069
1070 return 0;
1071}
1072
1073static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1074{
1075 u32 reg;
Sebastian Andrzej Siewior6062cac2011-08-29 16:46:38 +02001076 u32 timeout = 500;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001077
1078 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1079 if (is_on)
1080 reg |= DWC3_DCTL_RUN_STOP;
1081 else
1082 reg &= ~DWC3_DCTL_RUN_STOP;
1083
1084 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1085
1086 do {
1087 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1088 if (is_on) {
1089 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1090 break;
1091 } else {
1092 if (reg & DWC3_DSTS_DEVCTRLHLT)
1093 break;
1094 }
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001095 timeout--;
1096 if (!timeout)
1097 break;
Sebastian Andrzej Siewior6062cac2011-08-29 16:46:38 +02001098 udelay(1);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001099 } while (1);
1100
1101 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1102 dwc->gadget_driver
1103 ? dwc->gadget_driver->function : "no-function",
1104 is_on ? "connect" : "disconnect");
1105}
1106
1107static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1108{
1109 struct dwc3 *dwc = gadget_to_dwc(g);
1110 unsigned long flags;
1111
1112 is_on = !!is_on;
1113
1114 spin_lock_irqsave(&dwc->lock, flags);
1115 dwc3_gadget_run_stop(dwc, is_on);
1116 spin_unlock_irqrestore(&dwc->lock, flags);
1117
1118 return 0;
1119}
1120
1121static int dwc3_gadget_start(struct usb_gadget *g,
1122 struct usb_gadget_driver *driver)
1123{
1124 struct dwc3 *dwc = gadget_to_dwc(g);
1125 struct dwc3_ep *dep;
1126 unsigned long flags;
1127 int ret = 0;
1128 u32 reg;
1129
1130 spin_lock_irqsave(&dwc->lock, flags);
1131
1132 if (dwc->gadget_driver) {
1133 dev_err(dwc->dev, "%s is already bound to %s\n",
1134 dwc->gadget.name,
1135 dwc->gadget_driver->driver.name);
1136 ret = -EBUSY;
1137 goto err0;
1138 }
1139
1140 dwc->gadget_driver = driver;
1141 dwc->gadget.dev.driver = &driver->driver;
1142
1143 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1144
Felipe Balbid195b322011-09-08 17:42:11 +03001145 reg &= ~DWC3_GCTL_SCALEDOWN(3);
1146 reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001147 reg &= ~DWC3_GCTL_DISSCRAMBLE;
Felipe Balbid195b322011-09-08 17:42:11 +03001148 reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001149
1150 /*
1151 * WORKAROUND: DWC3 revisions <1.90a have a bug
1152 * when The device fails to connect at SuperSpeed
1153 * and falls back to high-speed mode which causes
1154 * the device to enter in a Connect/Disconnect loop
1155 */
1156 if (dwc->revision < DWC3_REVISION_190A)
1157 reg |= DWC3_GCTL_U2RSTECN;
1158
1159 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1160
1161 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1162 reg &= ~(DWC3_DCFG_SPEED_MASK);
1163 reg |= DWC3_DCFG_SUPERSPEED;
1164 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1165
1166 /* Start with SuperSpeed Default */
1167 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1168
1169 dep = dwc->eps[0];
1170 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1171 if (ret) {
1172 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1173 goto err0;
1174 }
1175
1176 dep = dwc->eps[1];
1177 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1178 if (ret) {
1179 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1180 goto err1;
1181 }
1182
1183 /* begin to receive SETUP packets */
Felipe Balbi32e132e2011-08-27 22:28:36 +03001184 dwc->ep0state = EP0_SETUP_PHASE;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001185 dwc3_ep0_out_start(dwc);
1186
1187 spin_unlock_irqrestore(&dwc->lock, flags);
1188
1189 return 0;
1190
1191err1:
1192 __dwc3_gadget_ep_disable(dwc->eps[0]);
1193
1194err0:
1195 spin_unlock_irqrestore(&dwc->lock, flags);
1196
1197 return ret;
1198}
1199
1200static int dwc3_gadget_stop(struct usb_gadget *g,
1201 struct usb_gadget_driver *driver)
1202{
1203 struct dwc3 *dwc = gadget_to_dwc(g);
1204 unsigned long flags;
1205
1206 spin_lock_irqsave(&dwc->lock, flags);
1207
1208 __dwc3_gadget_ep_disable(dwc->eps[0]);
1209 __dwc3_gadget_ep_disable(dwc->eps[1]);
1210
1211 dwc->gadget_driver = NULL;
1212 dwc->gadget.dev.driver = NULL;
1213
1214 spin_unlock_irqrestore(&dwc->lock, flags);
1215
1216 return 0;
1217}
1218static const struct usb_gadget_ops dwc3_gadget_ops = {
1219 .get_frame = dwc3_gadget_get_frame,
1220 .wakeup = dwc3_gadget_wakeup,
1221 .set_selfpowered = dwc3_gadget_set_selfpowered,
1222 .pullup = dwc3_gadget_pullup,
1223 .udc_start = dwc3_gadget_start,
1224 .udc_stop = dwc3_gadget_stop,
1225};
1226
1227/* -------------------------------------------------------------------------- */
1228
1229static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1230{
1231 struct dwc3_ep *dep;
1232 u8 epnum;
1233
1234 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1235
1236 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1237 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1238 if (!dep) {
1239 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1240 epnum);
1241 return -ENOMEM;
1242 }
1243
1244 dep->dwc = dwc;
1245 dep->number = epnum;
1246 dwc->eps[epnum] = dep;
1247
1248 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1249 (epnum & 1) ? "in" : "out");
1250 dep->endpoint.name = dep->name;
1251 dep->direction = (epnum & 1);
1252
1253 if (epnum == 0 || epnum == 1) {
1254 dep->endpoint.maxpacket = 512;
1255 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1256 if (!epnum)
1257 dwc->gadget.ep0 = &dep->endpoint;
1258 } else {
1259 int ret;
1260
1261 dep->endpoint.maxpacket = 1024;
1262 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1263 list_add_tail(&dep->endpoint.ep_list,
1264 &dwc->gadget.ep_list);
1265
1266 ret = dwc3_alloc_trb_pool(dep);
1267 if (ret) {
1268 dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
1269 return ret;
1270 }
1271 }
1272 INIT_LIST_HEAD(&dep->request_list);
1273 INIT_LIST_HEAD(&dep->req_queued);
1274 }
1275
1276 return 0;
1277}
1278
1279static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1280{
1281 struct dwc3_ep *dep;
1282 u8 epnum;
1283
1284 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1285 dep = dwc->eps[epnum];
1286 dwc3_free_trb_pool(dep);
1287
1288 if (epnum != 0 && epnum != 1)
1289 list_del(&dep->endpoint.ep_list);
1290
1291 kfree(dep);
1292 }
1293}
1294
1295static void dwc3_gadget_release(struct device *dev)
1296{
1297 dev_dbg(dev, "%s\n", __func__);
1298}
1299
1300/* -------------------------------------------------------------------------- */
1301static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1302 const struct dwc3_event_depevt *event, int status)
1303{
1304 struct dwc3_request *req;
1305 struct dwc3_trb trb;
1306 unsigned int count;
1307 unsigned int s_pkt = 0;
1308
1309 do {
1310 req = next_request(&dep->req_queued);
1311 if (!req)
1312 break;
1313
1314 dwc3_trb_to_nat(req->trb, &trb);
1315
Sebastian Andrzej Siewior679dc462011-08-19 19:59:12 +02001316 if (trb.hwo && status != -ESHUTDOWN)
1317 /*
1318 * We continue despite the error. There is not much we
1319 * can do. If we don't clean in up we loop for ever. If
1320 * we skip the TRB than it gets overwritten reused after
1321 * a while since we use them in a ring buffer. a BUG()
1322 * would help. Lets hope that if this occures, someone
1323 * fixes the root cause instead of looking away :)
1324 */
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001325 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1326 dep->name, req->trb);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001327 count = trb.length;
1328
1329 if (dep->direction) {
1330 if (count) {
1331 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1332 dep->name);
1333 status = -ECONNRESET;
1334 }
1335 } else {
1336 if (count && (event->status & DEPEVT_STATUS_SHORT))
1337 s_pkt = 1;
1338 }
1339
1340 /*
1341 * We assume here we will always receive the entire data block
1342 * which we should receive. Meaning, if we program RX to
1343 * receive 4K but we receive only 2K, we assume that's all we
1344 * should receive and we simply bounce the request back to the
1345 * gadget driver for further processing.
1346 */
1347 req->request.actual += req->request.length - count;
1348 dwc3_gadget_giveback(dep, req, status);
1349 if (s_pkt)
1350 break;
1351 if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1352 break;
1353 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1354 break;
1355 } while (1);
1356
1357 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1358 return 0;
1359 return 1;
1360}
1361
1362static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1363 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1364 int start_new)
1365{
1366 unsigned status = 0;
1367 int clean_busy;
1368
1369 if (event->status & DEPEVT_STATUS_BUSERR)
1370 status = -ECONNRESET;
1371
1372 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
Sebastian Andrzej Siewior4df39772011-08-22 17:42:18 +02001373 if (clean_busy) {
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001374 dep->flags &= ~DWC3_EP_BUSY;
Sebastian Andrzej Siewior4df39772011-08-22 17:42:18 +02001375 dep->res_trans_idx = 0;
1376 }
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001377}
1378
1379static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1380 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1381{
1382 u32 uf;
1383
1384 if (list_empty(&dep->request_list)) {
1385 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1386 dep->name);
1387 return;
1388 }
1389
1390 if (event->parameters) {
1391 u32 mask;
1392
1393 mask = ~(dep->interval - 1);
1394 uf = event->parameters & mask;
1395 /* 4 micro frames in the future */
1396 uf += dep->interval * 4;
1397 } else {
1398 uf = 0;
1399 }
1400
1401 __dwc3_gadget_kick_transfer(dep, uf, 1);
1402}
1403
1404static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1405 const struct dwc3_event_depevt *event)
1406{
1407 struct dwc3 *dwc = dep->dwc;
1408 struct dwc3_event_depevt mod_ev = *event;
1409
1410 /*
1411 * We were asked to remove one requests. It is possible that this
1412 * request and a few other were started together and have the same
1413 * transfer index. Since we stopped the complete endpoint we don't
1414 * know how many requests were already completed (and not yet)
1415 * reported and how could be done (later). We purge them all until
1416 * the end of the list.
1417 */
1418 mod_ev.status = DEPEVT_STATUS_LST;
1419 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1420 dep->flags &= ~DWC3_EP_BUSY;
1421 /* pending requets are ignored and are queued on XferNotReady */
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001422}
1423
1424static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1425 const struct dwc3_event_depevt *event)
1426{
1427 u32 param = event->parameters;
1428 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1429
1430 switch (cmd_type) {
1431 case DWC3_DEPCMD_ENDTRANSFER:
1432 dwc3_process_ep_cmd_complete(dep, event);
1433 break;
1434 case DWC3_DEPCMD_STARTTRANSFER:
1435 dep->res_trans_idx = param & 0x7f;
1436 break;
1437 default:
1438 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1439 __func__, cmd_type);
1440 break;
1441 };
1442}
1443
1444static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1445 const struct dwc3_event_depevt *event)
1446{
1447 struct dwc3_ep *dep;
1448 u8 epnum = event->endpoint_number;
1449
1450 dep = dwc->eps[epnum];
1451
1452 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1453 dwc3_ep_event_string(event->endpoint_event));
1454
1455 if (epnum == 0 || epnum == 1) {
1456 dwc3_ep0_interrupt(dwc, event);
1457 return;
1458 }
1459
1460 switch (event->endpoint_event) {
1461 case DWC3_DEPEVT_XFERCOMPLETE:
1462 if (usb_endpoint_xfer_isoc(dep->desc)) {
1463 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1464 dep->name);
1465 return;
1466 }
1467
1468 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1469 break;
1470 case DWC3_DEPEVT_XFERINPROGRESS:
1471 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1472 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1473 dep->name);
1474 return;
1475 }
1476
1477 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1478 break;
1479 case DWC3_DEPEVT_XFERNOTREADY:
1480 if (usb_endpoint_xfer_isoc(dep->desc)) {
1481 dwc3_gadget_start_isoc(dwc, dep, event);
1482 } else {
1483 int ret;
1484
1485 dev_vdbg(dwc->dev, "%s: reason %s\n",
1486 dep->name, event->status
1487 ? "Transfer Active"
1488 : "Transfer Not Active");
1489
1490 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1491 if (!ret || ret == -EBUSY)
1492 return;
1493
1494 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1495 dep->name);
1496 }
1497
1498 break;
1499 case DWC3_DEPEVT_RXTXFIFOEVT:
1500 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1501 break;
1502 case DWC3_DEPEVT_STREAMEVT:
1503 dev_dbg(dwc->dev, "%s Stream Event\n", dep->name);
1504 break;
1505 case DWC3_DEPEVT_EPCMDCMPLT:
1506 dwc3_ep_cmd_compl(dep, event);
1507 break;
1508 }
1509}
1510
1511static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1512{
1513 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1514 spin_unlock(&dwc->lock);
1515 dwc->gadget_driver->disconnect(&dwc->gadget);
1516 spin_lock(&dwc->lock);
1517 }
1518}
1519
1520static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1521{
1522 struct dwc3_ep *dep;
1523 struct dwc3_gadget_ep_cmd_params params;
1524 u32 cmd;
1525 int ret;
1526
1527 dep = dwc->eps[epnum];
1528
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +02001529 WARN_ON(!dep->res_trans_idx);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001530 if (dep->res_trans_idx) {
1531 cmd = DWC3_DEPCMD_ENDTRANSFER;
1532 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1533 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1534 memset(&params, 0, sizeof(params));
1535 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1536 WARN_ON_ONCE(ret);
Sebastian Andrzej Siewior4df39772011-08-22 17:42:18 +02001537 dep->res_trans_idx = 0;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001538 }
1539}
1540
1541static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1542{
1543 u32 epnum;
1544
1545 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1546 struct dwc3_ep *dep;
1547
1548 dep = dwc->eps[epnum];
1549 if (!(dep->flags & DWC3_EP_ENABLED))
1550 continue;
1551
Sebastian Andrzej Siewiorb55db3b2011-08-29 13:56:37 +02001552 dwc3_remove_requests(dwc, dep);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001553 }
1554}
1555
1556static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1557{
1558 u32 epnum;
1559
1560 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1561 struct dwc3_ep *dep;
1562 struct dwc3_gadget_ep_cmd_params params;
1563 int ret;
1564
1565 dep = dwc->eps[epnum];
1566
1567 if (!(dep->flags & DWC3_EP_STALL))
1568 continue;
1569
1570 dep->flags &= ~DWC3_EP_STALL;
1571
1572 memset(&params, 0, sizeof(params));
1573 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1574 DWC3_DEPCMD_CLEARSTALL, &params);
1575 WARN_ON_ONCE(ret);
1576 }
1577}
1578
1579static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1580{
1581 dev_vdbg(dwc->dev, "%s\n", __func__);
1582#if 0
1583 XXX
1584 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1585 enable it before we can disable it.
1586
1587 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1588 reg &= ~DWC3_DCTL_INITU1ENA;
1589 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1590
1591 reg &= ~DWC3_DCTL_INITU2ENA;
1592 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1593#endif
1594
1595 dwc3_stop_active_transfers(dwc);
1596 dwc3_disconnect_gadget(dwc);
1597
1598 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1599}
1600
1601static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1602{
1603 u32 reg;
1604
1605 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1606
1607 if (on)
1608 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1609 else
1610 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1611
1612 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1613}
1614
1615static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1616{
1617 u32 reg;
1618
1619 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1620
1621 if (on)
1622 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1623 else
1624 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1625
1626 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1627}
1628
1629static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1630{
1631 u32 reg;
1632
1633 dev_vdbg(dwc->dev, "%s\n", __func__);
1634
1635 /* Enable PHYs */
1636 dwc3_gadget_usb2_phy_power(dwc, true);
1637 dwc3_gadget_usb3_phy_power(dwc, true);
1638
1639 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1640 dwc3_disconnect_gadget(dwc);
1641
1642 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1643 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1644 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1645
1646 dwc3_stop_active_transfers(dwc);
1647 dwc3_clear_stall_all_ep(dwc);
1648
1649 /* Reset device address to zero */
1650 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1651 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1652 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1653
1654 /*
1655 * Wait for RxFifo to drain
1656 *
1657 * REVISIT probably shouldn't wait forever.
1658 * In case Hardware ends up in a screwed up
1659 * case, we error out, notify the user and,
1660 * maybe, WARN() or BUG() but leave the rest
1661 * of the kernel working fine.
1662 *
1663 * REVISIT the below is rather CPU intensive,
1664 * maybe we should read and if it doesn't work
1665 * sleep (not busy wait) for a few useconds.
1666 *
1667 * REVISIT why wait until the RXFIFO is empty anyway?
1668 */
1669 while (!(dwc3_readl(dwc->regs, DWC3_DSTS)
1670 & DWC3_DSTS_RXFIFOEMPTY))
1671 cpu_relax();
1672}
1673
1674static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1675{
1676 u32 reg;
1677 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1678
1679 /*
1680 * We change the clock only at SS but I dunno why I would want to do
1681 * this. Maybe it becomes part of the power saving plan.
1682 */
1683
1684 if (speed != DWC3_DSTS_SUPERSPEED)
1685 return;
1686
1687 /*
1688 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1689 * each time on Connect Done.
1690 */
1691 if (!usb30_clock)
1692 return;
1693
1694 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1695 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1696 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1697}
1698
1699static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1700{
1701 switch (speed) {
1702 case USB_SPEED_SUPER:
1703 dwc3_gadget_usb2_phy_power(dwc, false);
1704 break;
1705 case USB_SPEED_HIGH:
1706 case USB_SPEED_FULL:
1707 case USB_SPEED_LOW:
1708 dwc3_gadget_usb3_phy_power(dwc, false);
1709 break;
1710 }
1711}
1712
1713static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1714{
1715 struct dwc3_gadget_ep_cmd_params params;
1716 struct dwc3_ep *dep;
1717 int ret;
1718 u32 reg;
1719 u8 speed;
1720
1721 dev_vdbg(dwc->dev, "%s\n", __func__);
1722
1723 memset(&params, 0x00, sizeof(params));
1724
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001725 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1726 speed = reg & DWC3_DSTS_CONNECTSPD;
1727 dwc->speed = speed;
1728
1729 dwc3_update_ram_clk_sel(dwc, speed);
1730
1731 switch (speed) {
1732 case DWC3_DCFG_SUPERSPEED:
1733 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1734 dwc->gadget.ep0->maxpacket = 512;
1735 dwc->gadget.speed = USB_SPEED_SUPER;
1736 break;
1737 case DWC3_DCFG_HIGHSPEED:
1738 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1739 dwc->gadget.ep0->maxpacket = 64;
1740 dwc->gadget.speed = USB_SPEED_HIGH;
1741 break;
1742 case DWC3_DCFG_FULLSPEED2:
1743 case DWC3_DCFG_FULLSPEED1:
1744 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1745 dwc->gadget.ep0->maxpacket = 64;
1746 dwc->gadget.speed = USB_SPEED_FULL;
1747 break;
1748 case DWC3_DCFG_LOWSPEED:
1749 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
1750 dwc->gadget.ep0->maxpacket = 8;
1751 dwc->gadget.speed = USB_SPEED_LOW;
1752 break;
1753 }
1754
1755 /* Disable unneded PHY */
1756 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
1757
1758 dep = dwc->eps[0];
1759 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1760 if (ret) {
1761 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1762 return;
1763 }
1764
1765 dep = dwc->eps[1];
1766 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1767 if (ret) {
1768 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1769 return;
1770 }
1771
1772 /*
1773 * Configure PHY via GUSB3PIPECTLn if required.
1774 *
1775 * Update GTXFIFOSIZn
1776 *
1777 * In both cases reset values should be sufficient.
1778 */
1779}
1780
1781static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
1782{
1783 dev_vdbg(dwc->dev, "%s\n", __func__);
1784
1785 /*
1786 * TODO take core out of low power mode when that's
1787 * implemented.
1788 */
1789
1790 dwc->gadget_driver->resume(&dwc->gadget);
1791}
1792
1793static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
1794 unsigned int evtinfo)
1795{
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001796 /* The fith bit says SuperSpeed yes or no. */
1797 dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
Felipe Balbi6edd5bd2011-09-08 21:18:47 +03001798
1799 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001800}
1801
1802static void dwc3_gadget_interrupt(struct dwc3 *dwc,
1803 const struct dwc3_event_devt *event)
1804{
1805 switch (event->type) {
1806 case DWC3_DEVICE_EVENT_DISCONNECT:
1807 dwc3_gadget_disconnect_interrupt(dwc);
1808 break;
1809 case DWC3_DEVICE_EVENT_RESET:
1810 dwc3_gadget_reset_interrupt(dwc);
1811 break;
1812 case DWC3_DEVICE_EVENT_CONNECT_DONE:
1813 dwc3_gadget_conndone_interrupt(dwc);
1814 break;
1815 case DWC3_DEVICE_EVENT_WAKEUP:
1816 dwc3_gadget_wakeup_interrupt(dwc);
1817 break;
1818 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
1819 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
1820 break;
1821 case DWC3_DEVICE_EVENT_EOPF:
1822 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
1823 break;
1824 case DWC3_DEVICE_EVENT_SOF:
1825 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
1826 break;
1827 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
1828 dev_vdbg(dwc->dev, "Erratic Error\n");
1829 break;
1830 case DWC3_DEVICE_EVENT_CMD_CMPL:
1831 dev_vdbg(dwc->dev, "Command Complete\n");
1832 break;
1833 case DWC3_DEVICE_EVENT_OVERFLOW:
1834 dev_vdbg(dwc->dev, "Overflow\n");
1835 break;
1836 default:
1837 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
1838 }
1839}
1840
1841static void dwc3_process_event_entry(struct dwc3 *dwc,
1842 const union dwc3_event *event)
1843{
1844 /* Endpoint IRQ, handle it and return early */
1845 if (event->type.is_devspec == 0) {
1846 /* depevt */
1847 return dwc3_endpoint_interrupt(dwc, &event->depevt);
1848 }
1849
1850 switch (event->type.type) {
1851 case DWC3_EVENT_TYPE_DEV:
1852 dwc3_gadget_interrupt(dwc, &event->devt);
1853 break;
1854 /* REVISIT what to do with Carkit and I2C events ? */
1855 default:
1856 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
1857 }
1858}
1859
1860static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
1861{
1862 struct dwc3_event_buffer *evt;
1863 int left;
1864 u32 count;
1865
1866 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
1867 count &= DWC3_GEVNTCOUNT_MASK;
1868 if (!count)
1869 return IRQ_NONE;
1870
1871 evt = dwc->ev_buffs[buf];
1872 left = count;
1873
1874 while (left > 0) {
1875 union dwc3_event event;
1876
1877 memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
1878 dwc3_process_event_entry(dwc, &event);
1879 /*
1880 * XXX we wrap around correctly to the next entry as almost all
1881 * entries are 4 bytes in size. There is one entry which has 12
1882 * bytes which is a regular entry followed by 8 bytes data. ATM
1883 * I don't know how things are organized if were get next to the
1884 * a boundary so I worry about that once we try to handle that.
1885 */
1886 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
1887 left -= 4;
1888
1889 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
1890 }
1891
1892 return IRQ_HANDLED;
1893}
1894
1895static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
1896{
1897 struct dwc3 *dwc = _dwc;
1898 int i;
1899 irqreturn_t ret = IRQ_NONE;
1900
1901 spin_lock(&dwc->lock);
1902
1903 for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
1904 irqreturn_t status;
1905
1906 status = dwc3_process_event_buf(dwc, i);
1907 if (status == IRQ_HANDLED)
1908 ret = status;
1909 }
1910
1911 spin_unlock(&dwc->lock);
1912
1913 return ret;
1914}
1915
1916/**
1917 * dwc3_gadget_init - Initializes gadget related registers
1918 * @dwc: Pointer to out controller context structure
1919 *
1920 * Returns 0 on success otherwise negative errno.
1921 */
1922int __devinit dwc3_gadget_init(struct dwc3 *dwc)
1923{
1924 u32 reg;
1925 int ret;
1926 int irq;
1927
1928 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
1929 &dwc->ctrl_req_addr, GFP_KERNEL);
1930 if (!dwc->ctrl_req) {
1931 dev_err(dwc->dev, "failed to allocate ctrl request\n");
1932 ret = -ENOMEM;
1933 goto err0;
1934 }
1935
1936 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
1937 &dwc->ep0_trb_addr, GFP_KERNEL);
1938 if (!dwc->ep0_trb) {
1939 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
1940 ret = -ENOMEM;
1941 goto err1;
1942 }
1943
1944 dwc->setup_buf = dma_alloc_coherent(dwc->dev,
1945 sizeof(*dwc->setup_buf) * 2,
1946 &dwc->setup_buf_addr, GFP_KERNEL);
1947 if (!dwc->setup_buf) {
1948 dev_err(dwc->dev, "failed to allocate setup buffer\n");
1949 ret = -ENOMEM;
1950 goto err2;
1951 }
1952
Felipe Balbi64e96342011-08-27 22:07:53 +03001953 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
1954 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
1955 if (!dwc->ep0_bounce) {
1956 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
1957 ret = -ENOMEM;
1958 goto err3;
1959 }
1960
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001961 dev_set_name(&dwc->gadget.dev, "gadget");
1962
1963 dwc->gadget.ops = &dwc3_gadget_ops;
1964 dwc->gadget.is_dualspeed = true;
1965 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1966 dwc->gadget.dev.parent = dwc->dev;
1967
1968 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
1969
1970 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
1971 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
1972 dwc->gadget.dev.release = dwc3_gadget_release;
1973 dwc->gadget.name = "dwc3-gadget";
1974
1975 /*
1976 * REVISIT: Here we should clear all pending IRQs to be
1977 * sure we're starting from a well known location.
1978 */
1979
1980 ret = dwc3_gadget_init_endpoints(dwc);
1981 if (ret)
Felipe Balbi64e96342011-08-27 22:07:53 +03001982 goto err4;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001983
1984 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1985
1986 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
1987 "dwc3", dwc);
1988 if (ret) {
1989 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1990 irq, ret);
Felipe Balbi64e96342011-08-27 22:07:53 +03001991 goto err5;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03001992 }
1993
1994 /* Enable all but Start and End of Frame IRQs */
1995 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1996 DWC3_DEVTEN_EVNTOVERFLOWEN |
1997 DWC3_DEVTEN_CMDCMPLTEN |
1998 DWC3_DEVTEN_ERRTICERREN |
1999 DWC3_DEVTEN_WKUPEVTEN |
2000 DWC3_DEVTEN_ULSTCNGEN |
2001 DWC3_DEVTEN_CONNECTDONEEN |
2002 DWC3_DEVTEN_USBRSTEN |
2003 DWC3_DEVTEN_DISCONNEVTEN);
2004 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2005
2006 ret = device_register(&dwc->gadget.dev);
2007 if (ret) {
2008 dev_err(dwc->dev, "failed to register gadget device\n");
2009 put_device(&dwc->gadget.dev);
Felipe Balbi64e96342011-08-27 22:07:53 +03002010 goto err6;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002011 }
2012
2013 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2014 if (ret) {
2015 dev_err(dwc->dev, "failed to register udc\n");
Felipe Balbi64e96342011-08-27 22:07:53 +03002016 goto err7;
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002017 }
2018
2019 return 0;
2020
Felipe Balbi64e96342011-08-27 22:07:53 +03002021err7:
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002022 device_unregister(&dwc->gadget.dev);
2023
Felipe Balbi64e96342011-08-27 22:07:53 +03002024err6:
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002025 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2026 free_irq(irq, dwc);
2027
Felipe Balbi64e96342011-08-27 22:07:53 +03002028err5:
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002029 dwc3_gadget_free_endpoints(dwc);
2030
Felipe Balbi64e96342011-08-27 22:07:53 +03002031err4:
2032 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2033 dwc->ep0_bounce_addr);
2034
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002035err3:
2036 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2037 dwc->setup_buf, dwc->setup_buf_addr);
2038
2039err2:
2040 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2041 dwc->ep0_trb, dwc->ep0_trb_addr);
2042
2043err1:
2044 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2045 dwc->ctrl_req, dwc->ctrl_req_addr);
2046
2047err0:
2048 return ret;
2049}
2050
2051void dwc3_gadget_exit(struct dwc3 *dwc)
2052{
2053 int irq;
2054 int i;
2055
2056 usb_del_gadget_udc(&dwc->gadget);
2057 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2058
2059 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2060 free_irq(irq, dwc);
2061
2062 for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
2063 __dwc3_gadget_ep_disable(dwc->eps[i]);
2064
2065 dwc3_gadget_free_endpoints(dwc);
2066
Felipe Balbi64e96342011-08-27 22:07:53 +03002067 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2068 dwc->ep0_bounce_addr);
2069
Felipe Balbi4dc64e52011-08-19 18:10:58 +03002070 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2071 dwc->setup_buf, dwc->setup_buf_addr);
2072
2073 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2074 dwc->ep0_trb, dwc->ep0_trb_addr);
2075
2076 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2077 dwc->ctrl_req, dwc->ctrl_req_addr);
2078
2079 device_unregister(&dwc->gadget.dev);
2080}