blob: 984580a18a382679ba3faaed9ccf5ee57d20cb7f [file] [log] [blame]
Felipe Balbi72246da2011-08-19 18:10:58 +03001/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
Felipe Balbi72246da2011-08-19 18:10:58 +03005 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57#define DMA_ADDR_INVALID (~(dma_addr_t)0)
58
59void dwc3_map_buffer_to_dma(struct dwc3_request *req)
60{
61 struct dwc3 *dwc = req->dep->dwc;
62
Sebastian Andrzej Siewior78c58a52011-08-31 17:12:02 +020063 if (req->request.length == 0) {
64 /* req->request.dma = dwc->setup_buf_addr; */
65 return;
66 }
67
Felipe Balbi72246da2011-08-19 18:10:58 +030068 if (req->request.dma == DMA_ADDR_INVALID) {
69 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
70 req->request.length, req->direction
71 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
72 req->mapped = true;
Felipe Balbi72246da2011-08-19 18:10:58 +030073 }
74}
75
76void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
77{
78 struct dwc3 *dwc = req->dep->dwc;
79
Sebastian Andrzej Siewior78c58a52011-08-31 17:12:02 +020080 if (req->request.length == 0) {
81 req->request.dma = DMA_ADDR_INVALID;
82 return;
83 }
84
Felipe Balbi72246da2011-08-19 18:10:58 +030085 if (req->mapped) {
86 dma_unmap_single(dwc->dev, req->request.dma,
87 req->request.length, req->direction
88 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 req->mapped = 0;
Felipe Balbif198ead2011-08-27 15:10:09 +030090 req->request.dma = DMA_ADDR_INVALID;
Felipe Balbi72246da2011-08-19 18:10:58 +030091 }
92}
93
94void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
95 int status)
96{
97 struct dwc3 *dwc = dep->dwc;
98
99 if (req->queued) {
100 dep->busy_slot++;
101 /*
102 * Skip LINK TRB. We can't use req->trb and check for
103 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
104 * completed (not the LINK TRB).
105 */
106 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
107 usb_endpoint_xfer_isoc(dep->desc))
108 dep->busy_slot++;
109 }
110 list_del(&req->list);
111
112 if (req->request.status == -EINPROGRESS)
113 req->request.status = status;
114
115 dwc3_unmap_buffer_from_dma(req);
116
117 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
118 req, dep->name, req->request.actual,
119 req->request.length, status);
120
121 spin_unlock(&dwc->lock);
122 req->request.complete(&req->dep->endpoint, &req->request);
123 spin_lock(&dwc->lock);
124}
125
126static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
127{
128 switch (cmd) {
129 case DWC3_DEPCMD_DEPSTARTCFG:
130 return "Start New Configuration";
131 case DWC3_DEPCMD_ENDTRANSFER:
132 return "End Transfer";
133 case DWC3_DEPCMD_UPDATETRANSFER:
134 return "Update Transfer";
135 case DWC3_DEPCMD_STARTTRANSFER:
136 return "Start Transfer";
137 case DWC3_DEPCMD_CLEARSTALL:
138 return "Clear Stall";
139 case DWC3_DEPCMD_SETSTALL:
140 return "Set Stall";
141 case DWC3_DEPCMD_GETSEQNUMBER:
142 return "Get Data Sequence Number";
143 case DWC3_DEPCMD_SETTRANSFRESOURCE:
144 return "Set Endpoint Transfer Resource";
145 case DWC3_DEPCMD_SETEPCONFIG:
146 return "Set Endpoint Configuration";
147 default:
148 return "UNKNOWN command";
149 }
150}
151
152int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
153 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
154{
155 struct dwc3_ep *dep = dwc->eps[ep];
Sebastian Andrzej Siewior61d58242011-08-29 16:46:38 +0200156 u32 timeout = 500;
Felipe Balbi72246da2011-08-19 18:10:58 +0300157 u32 reg;
158
159 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
160 dep->name,
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300161 dwc3_gadget_ep_cmd_string(cmd), params->param0,
162 params->param1, params->param2);
Felipe Balbi72246da2011-08-19 18:10:58 +0300163
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300164 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
165 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
166 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
Felipe Balbi72246da2011-08-19 18:10:58 +0300167
168 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
169 do {
170 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
171 if (!(reg & DWC3_DEPCMD_CMDACT)) {
Felipe Balbi164f6e12011-08-27 20:29:58 +0300172 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
173 DWC3_DEPCMD_STATUS(reg));
Felipe Balbi72246da2011-08-19 18:10:58 +0300174 return 0;
175 }
176
177 /*
Felipe Balbi72246da2011-08-19 18:10:58 +0300178 * We can't sleep here, because it is also called from
179 * interrupt context.
180 */
181 timeout--;
182 if (!timeout)
183 return -ETIMEDOUT;
184
Sebastian Andrzej Siewior61d58242011-08-29 16:46:38 +0200185 udelay(1);
Felipe Balbi72246da2011-08-19 18:10:58 +0300186 } while (1);
187}
188
189static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
190 struct dwc3_trb_hw *trb)
191{
Paul Zimmermanc439ef82011-09-30 10:58:45 +0300192 u32 offset = (char *) trb - (char *) dep->trb_pool;
Felipe Balbi72246da2011-08-19 18:10:58 +0300193
194 return dep->trb_pool_dma + offset;
195}
196
197static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
198{
199 struct dwc3 *dwc = dep->dwc;
200
201 if (dep->trb_pool)
202 return 0;
203
204 if (dep->number == 0 || dep->number == 1)
205 return 0;
206
207 dep->trb_pool = dma_alloc_coherent(dwc->dev,
208 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
209 &dep->trb_pool_dma, GFP_KERNEL);
210 if (!dep->trb_pool) {
211 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
212 dep->name);
213 return -ENOMEM;
214 }
215
216 return 0;
217}
218
219static void dwc3_free_trb_pool(struct dwc3_ep *dep)
220{
221 struct dwc3 *dwc = dep->dwc;
222
223 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
224 dep->trb_pool, dep->trb_pool_dma);
225
226 dep->trb_pool = NULL;
227 dep->trb_pool_dma = 0;
228}
229
230static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
231{
232 struct dwc3_gadget_ep_cmd_params params;
233 u32 cmd;
234
235 memset(&params, 0x00, sizeof(params));
236
237 if (dep->number != 1) {
238 cmd = DWC3_DEPCMD_DEPSTARTCFG;
239 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
Paul Zimmermanb23c8432011-09-30 10:58:42 +0300240 if (dep->number > 1) {
241 if (dwc->start_config_issued)
242 return 0;
243 dwc->start_config_issued = true;
Felipe Balbi72246da2011-08-19 18:10:58 +0300244 cmd |= DWC3_DEPCMD_PARAM(2);
Paul Zimmermanb23c8432011-09-30 10:58:42 +0300245 }
Felipe Balbi72246da2011-08-19 18:10:58 +0300246
247 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
248 }
249
250 return 0;
251}
252
253static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
254 const struct usb_endpoint_descriptor *desc)
255{
256 struct dwc3_gadget_ep_cmd_params params;
257
258 memset(&params, 0x00, sizeof(params));
259
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300260 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
261 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
262 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
Felipe Balbi72246da2011-08-19 18:10:58 +0300263
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300264 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
265 | DWC3_DEPCFG_XFER_NOT_READY_EN;
Felipe Balbi72246da2011-08-19 18:10:58 +0300266
Felipe Balbi879631a2011-09-30 10:58:47 +0300267 if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300268 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
269 | DWC3_DEPCFG_STREAM_EVENT_EN;
Felipe Balbi879631a2011-09-30 10:58:47 +0300270 dep->stream_capable = true;
271 }
272
Felipe Balbi72246da2011-08-19 18:10:58 +0300273 if (usb_endpoint_xfer_isoc(desc))
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300274 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
Felipe Balbi72246da2011-08-19 18:10:58 +0300275
276 /*
277 * We are doing 1:1 mapping for endpoints, meaning
278 * Physical Endpoints 2 maps to Logical Endpoint 2 and
279 * so on. We consider the direction bit as part of the physical
280 * endpoint number. So USB endpoint 0x81 is 0x03.
281 */
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300282 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
Felipe Balbi72246da2011-08-19 18:10:58 +0300283
284 /*
285 * We must use the lower 16 TX FIFOs even though
286 * HW might have more
287 */
288 if (dep->direction)
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300289 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
Felipe Balbi72246da2011-08-19 18:10:58 +0300290
291 if (desc->bInterval) {
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300292 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
Felipe Balbi72246da2011-08-19 18:10:58 +0300293 dep->interval = 1 << (desc->bInterval - 1);
294 }
295
296 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
297 DWC3_DEPCMD_SETEPCONFIG, &params);
298}
299
300static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
301{
302 struct dwc3_gadget_ep_cmd_params params;
303
304 memset(&params, 0x00, sizeof(params));
305
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300306 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Felipe Balbi72246da2011-08-19 18:10:58 +0300307
308 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
309 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
310}
311
312/**
313 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
314 * @dep: endpoint to be initialized
315 * @desc: USB Endpoint Descriptor
316 *
317 * Caller should take care of locking
318 */
319static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
320 const struct usb_endpoint_descriptor *desc)
321{
322 struct dwc3 *dwc = dep->dwc;
323 u32 reg;
324 int ret = -ENOMEM;
325
326 if (!(dep->flags & DWC3_EP_ENABLED)) {
327 ret = dwc3_gadget_start_config(dwc, dep);
328 if (ret)
329 return ret;
330 }
331
332 ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
333 if (ret)
334 return ret;
335
336 if (!(dep->flags & DWC3_EP_ENABLED)) {
337 struct dwc3_trb_hw *trb_st_hw;
338 struct dwc3_trb_hw *trb_link_hw;
339 struct dwc3_trb trb_link;
340
341 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
342 if (ret)
343 return ret;
344
345 dep->desc = desc;
346 dep->type = usb_endpoint_type(desc);
347 dep->flags |= DWC3_EP_ENABLED;
348
349 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
350 reg |= DWC3_DALEPENA_EP(dep->number);
351 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
352
353 if (!usb_endpoint_xfer_isoc(desc))
354 return 0;
355
356 memset(&trb_link, 0, sizeof(trb_link));
357
358 /* Link TRB for ISOC. The HWO but is never reset */
359 trb_st_hw = &dep->trb_pool[0];
360
361 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
362 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
363 trb_link.hwo = true;
364
365 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
366 dwc3_trb_to_hw(&trb_link, trb_link_hw);
367 }
368
369 return 0;
370}
371
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +0200372static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
373static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
Felipe Balbi72246da2011-08-19 18:10:58 +0300374{
375 struct dwc3_request *req;
376
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +0200377 if (!list_empty(&dep->req_queued))
378 dwc3_stop_active_transfer(dwc, dep->number);
379
Felipe Balbi72246da2011-08-19 18:10:58 +0300380 while (!list_empty(&dep->request_list)) {
381 req = next_request(&dep->request_list);
382
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +0200383 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
Felipe Balbi72246da2011-08-19 18:10:58 +0300384 }
Felipe Balbi72246da2011-08-19 18:10:58 +0300385}
386
387/**
388 * __dwc3_gadget_ep_disable - Disables a HW endpoint
389 * @dep: the endpoint to disable
390 *
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +0200391 * This function also removes requests which are currently processed ny the
392 * hardware and those which are not yet scheduled.
393 * Caller should take care of locking.
Felipe Balbi72246da2011-08-19 18:10:58 +0300394 */
Felipe Balbi72246da2011-08-19 18:10:58 +0300395static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
396{
397 struct dwc3 *dwc = dep->dwc;
398 u32 reg;
399
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +0200400 dwc3_remove_requests(dwc, dep);
Felipe Balbi72246da2011-08-19 18:10:58 +0300401
402 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
403 reg &= ~DWC3_DALEPENA_EP(dep->number);
404 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
405
Felipe Balbi879631a2011-09-30 10:58:47 +0300406 dep->stream_capable = false;
Felipe Balbi72246da2011-08-19 18:10:58 +0300407 dep->desc = NULL;
408 dep->type = 0;
Felipe Balbi879631a2011-09-30 10:58:47 +0300409 dep->flags = 0;
Felipe Balbi72246da2011-08-19 18:10:58 +0300410
411 return 0;
412}
413
414/* -------------------------------------------------------------------------- */
415
416static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
417 const struct usb_endpoint_descriptor *desc)
418{
419 return -EINVAL;
420}
421
422static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
423{
424 return -EINVAL;
425}
426
427/* -------------------------------------------------------------------------- */
428
429static int dwc3_gadget_ep_enable(struct usb_ep *ep,
430 const struct usb_endpoint_descriptor *desc)
431{
432 struct dwc3_ep *dep;
433 struct dwc3 *dwc;
434 unsigned long flags;
435 int ret;
436
437 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
438 pr_debug("dwc3: invalid parameters\n");
439 return -EINVAL;
440 }
441
442 if (!desc->wMaxPacketSize) {
443 pr_debug("dwc3: missing wMaxPacketSize\n");
444 return -EINVAL;
445 }
446
447 dep = to_dwc3_ep(ep);
448 dwc = dep->dwc;
449
450 switch (usb_endpoint_type(desc)) {
451 case USB_ENDPOINT_XFER_CONTROL:
452 strncat(dep->name, "-control", sizeof(dep->name));
453 break;
454 case USB_ENDPOINT_XFER_ISOC:
455 strncat(dep->name, "-isoc", sizeof(dep->name));
456 break;
457 case USB_ENDPOINT_XFER_BULK:
458 strncat(dep->name, "-bulk", sizeof(dep->name));
459 break;
460 case USB_ENDPOINT_XFER_INT:
461 strncat(dep->name, "-int", sizeof(dep->name));
462 break;
463 default:
464 dev_err(dwc->dev, "invalid endpoint transfer type\n");
465 }
466
467 if (dep->flags & DWC3_EP_ENABLED) {
468 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
469 dep->name);
470 return 0;
471 }
472
473 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
474
475 spin_lock_irqsave(&dwc->lock, flags);
476 ret = __dwc3_gadget_ep_enable(dep, desc);
477 spin_unlock_irqrestore(&dwc->lock, flags);
478
479 return ret;
480}
481
482static int dwc3_gadget_ep_disable(struct usb_ep *ep)
483{
484 struct dwc3_ep *dep;
485 struct dwc3 *dwc;
486 unsigned long flags;
487 int ret;
488
489 if (!ep) {
490 pr_debug("dwc3: invalid parameters\n");
491 return -EINVAL;
492 }
493
494 dep = to_dwc3_ep(ep);
495 dwc = dep->dwc;
496
497 if (!(dep->flags & DWC3_EP_ENABLED)) {
498 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
499 dep->name);
500 return 0;
501 }
502
503 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
504 dep->number >> 1,
505 (dep->number & 1) ? "in" : "out");
506
507 spin_lock_irqsave(&dwc->lock, flags);
508 ret = __dwc3_gadget_ep_disable(dep);
509 spin_unlock_irqrestore(&dwc->lock, flags);
510
511 return ret;
512}
513
514static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
515 gfp_t gfp_flags)
516{
517 struct dwc3_request *req;
518 struct dwc3_ep *dep = to_dwc3_ep(ep);
519 struct dwc3 *dwc = dep->dwc;
520
521 req = kzalloc(sizeof(*req), gfp_flags);
522 if (!req) {
523 dev_err(dwc->dev, "not enough memory\n");
524 return NULL;
525 }
526
527 req->epnum = dep->number;
528 req->dep = dep;
529 req->request.dma = DMA_ADDR_INVALID;
530
531 return &req->request;
532}
533
534static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
535 struct usb_request *request)
536{
537 struct dwc3_request *req = to_dwc3_request(request);
538
539 kfree(req);
540}
541
Felipe Balbic71fc372011-11-22 11:37:34 +0200542/**
543 * dwc3_prepare_one_trb - setup one TRB from one request
544 * @dep: endpoint for which this request is prepared
545 * @req: dwc3_request pointer
546 */
Felipe Balbi68e823e2011-11-28 12:25:01 +0200547static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
Felipe Balbic71fc372011-11-22 11:37:34 +0200548 struct dwc3_request *req, unsigned last)
549{
550 struct dwc3_trb_hw *trb_hw;
551 struct dwc3_trb trb;
552
553 unsigned int cur_slot;
554
555 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
556 cur_slot = dep->free_slot;
557 dep->free_slot++;
558
559 /* Skip the LINK-TRB on ISOC */
560 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
561 usb_endpoint_xfer_isoc(dep->desc))
Felipe Balbi68e823e2011-11-28 12:25:01 +0200562 return;
Felipe Balbic71fc372011-11-22 11:37:34 +0200563
564 dwc3_gadget_move_request_queued(req);
565 memset(&trb, 0, sizeof(trb));
566
567 req->trb = trb_hw;
568
569 if (usb_endpoint_xfer_isoc(dep->desc)) {
570 trb.isp_imi = true;
571 trb.csp = true;
572 } else {
573 trb.lst = last;
574 }
575
576 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
577 trb.sid_sofn = req->request.stream_id;
578
579 switch (usb_endpoint_type(dep->desc)) {
580 case USB_ENDPOINT_XFER_CONTROL:
581 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
582 break;
583
584 case USB_ENDPOINT_XFER_ISOC:
585 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
586
587 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
588 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
589 trb.ioc = last;
590 break;
591
592 case USB_ENDPOINT_XFER_BULK:
593 case USB_ENDPOINT_XFER_INT:
594 trb.trbctl = DWC3_TRBCTL_NORMAL;
595 break;
596 default:
597 /*
598 * This is only possible with faulty memory because we
599 * checked it already :)
600 */
601 BUG();
602 }
603
604 trb.length = req->request.length;
605 trb.bplh = req->request.dma;
606 trb.hwo = true;
607
608 dwc3_trb_to_hw(&trb, trb_hw);
609 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
Felipe Balbic71fc372011-11-22 11:37:34 +0200610}
611
Felipe Balbi72246da2011-08-19 18:10:58 +0300612/*
613 * dwc3_prepare_trbs - setup TRBs from requests
614 * @dep: endpoint for which requests are being prepared
615 * @starting: true if the endpoint is idle and no requests are queued.
616 *
617 * The functions goes through the requests list and setups TRBs for the
618 * transfers. The functions returns once there are not more TRBs available or
619 * it run out of requests.
620 */
Felipe Balbi68e823e2011-11-28 12:25:01 +0200621static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
Felipe Balbi72246da2011-08-19 18:10:58 +0300622{
Felipe Balbi68e823e2011-11-28 12:25:01 +0200623 struct dwc3_request *req, *n;
Felipe Balbi72246da2011-08-19 18:10:58 +0300624 u32 trbs_left;
Felipe Balbic71fc372011-11-22 11:37:34 +0200625 unsigned int last_one = 0;
Felipe Balbi72246da2011-08-19 18:10:58 +0300626
627 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
628
629 /* the first request must not be queued */
630 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
Felipe Balbic71fc372011-11-22 11:37:34 +0200631
Felipe Balbi72246da2011-08-19 18:10:58 +0300632 /*
633 * if busy & slot are equal than it is either full or empty. If we are
634 * starting to proceed requests then we are empty. Otherwise we ar
635 * full and don't do anything
636 */
637 if (!trbs_left) {
638 if (!starting)
Felipe Balbi68e823e2011-11-28 12:25:01 +0200639 return;
Felipe Balbi72246da2011-08-19 18:10:58 +0300640 trbs_left = DWC3_TRB_NUM;
641 /*
642 * In case we start from scratch, we queue the ISOC requests
643 * starting from slot 1. This is done because we use ring
644 * buffer and have no LST bit to stop us. Instead, we place
645 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
646 * after the first request so we start at slot 1 and have
647 * 7 requests proceed before we hit the first IOC.
648 * Other transfer types don't use the ring buffer and are
649 * processed from the first TRB until the last one. Since we
650 * don't wrap around we have to start at the beginning.
651 */
652 if (usb_endpoint_xfer_isoc(dep->desc)) {
653 dep->busy_slot = 1;
654 dep->free_slot = 1;
655 } else {
656 dep->busy_slot = 0;
657 dep->free_slot = 0;
658 }
659 }
660
661 /* The last TRB is a link TRB, not used for xfer */
662 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
Felipe Balbi68e823e2011-11-28 12:25:01 +0200663 return;
Felipe Balbi72246da2011-08-19 18:10:58 +0300664
665 list_for_each_entry_safe(req, n, &dep->request_list, list) {
Felipe Balbi72246da2011-08-19 18:10:58 +0300666 trbs_left--;
667
Felipe Balbi72246da2011-08-19 18:10:58 +0300668 if (!trbs_left)
669 last_one = 1;
Felipe Balbic71fc372011-11-22 11:37:34 +0200670
Felipe Balbi72246da2011-08-19 18:10:58 +0300671 /* Is this the last request? */
672 if (list_empty(&dep->request_list))
673 last_one = 1;
674
675 /*
676 * FIXME we shouldn't need to set LST bit always but we are
677 * facing some weird problem with the Hardware where it doesn't
678 * complete even though it has been previously started.
679 *
680 * While we're debugging the problem, as a workaround to
681 * multiple TRBs handling, use only one TRB at a time.
682 */
Felipe Balbic71fc372011-11-22 11:37:34 +0200683 dwc3_prepare_one_trb(dep, req, true);
Felipe Balbic71fc372011-11-22 11:37:34 +0200684 break;
Felipe Balbi72246da2011-08-19 18:10:58 +0300685 }
Felipe Balbi72246da2011-08-19 18:10:58 +0300686}
687
688static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
689 int start_new)
690{
691 struct dwc3_gadget_ep_cmd_params params;
692 struct dwc3_request *req;
693 struct dwc3 *dwc = dep->dwc;
694 int ret;
695 u32 cmd;
696
697 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
698 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
699 return -EBUSY;
700 }
701 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
702
703 /*
704 * If we are getting here after a short-out-packet we don't enqueue any
705 * new requests as we try to set the IOC bit only on the last request.
706 */
707 if (start_new) {
708 if (list_empty(&dep->req_queued))
709 dwc3_prepare_trbs(dep, start_new);
710
711 /* req points to the first request which will be sent */
712 req = next_request(&dep->req_queued);
713 } else {
Felipe Balbi68e823e2011-11-28 12:25:01 +0200714 dwc3_prepare_trbs(dep, start_new);
715
Felipe Balbi72246da2011-08-19 18:10:58 +0300716 /*
717 * req points to the first request where HWO changed
718 * from 0 to 1
719 */
Felipe Balbi68e823e2011-11-28 12:25:01 +0200720 req = next_request(&dep->req_queued);
Felipe Balbi72246da2011-08-19 18:10:58 +0300721 }
722 if (!req) {
723 dep->flags |= DWC3_EP_PENDING_REQUEST;
724 return 0;
725 }
726
727 memset(&params, 0, sizeof(params));
Felipe Balbidc1c70a2011-09-30 10:58:51 +0300728 params.param0 = upper_32_bits(req->trb_dma);
729 params.param1 = lower_32_bits(req->trb_dma);
Felipe Balbi72246da2011-08-19 18:10:58 +0300730
731 if (start_new)
732 cmd = DWC3_DEPCMD_STARTTRANSFER;
733 else
734 cmd = DWC3_DEPCMD_UPDATETRANSFER;
735
736 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
737 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
738 if (ret < 0) {
739 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
740
741 /*
742 * FIXME we need to iterate over the list of requests
743 * here and stop, unmap, free and del each of the linked
744 * requests instead of we do now.
745 */
746 dwc3_unmap_buffer_from_dma(req);
747 list_del(&req->list);
748 return ret;
749 }
750
751 dep->flags |= DWC3_EP_BUSY;
752 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
753 dep->number);
754 if (!dep->res_trans_idx)
755 printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
756 return 0;
757}
758
759static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
760{
761 req->request.actual = 0;
762 req->request.status = -EINPROGRESS;
763 req->direction = dep->direction;
764 req->epnum = dep->number;
765
766 /*
767 * We only add to our list of requests now and
768 * start consuming the list once we get XferNotReady
769 * IRQ.
770 *
771 * That way, we avoid doing anything that we don't need
772 * to do now and defer it until the point we receive a
773 * particular token from the Host side.
774 *
775 * This will also avoid Host cancelling URBs due to too
776 * many NACKs.
777 */
778 dwc3_map_buffer_to_dma(req);
779 list_add_tail(&req->list, &dep->request_list);
780
781 /*
782 * There is one special case: XferNotReady with
783 * empty list of requests. We need to kick the
784 * transfer here in that situation, otherwise
785 * we will be NAKing forever.
786 *
787 * If we get XferNotReady before gadget driver
788 * has a chance to queue a request, we will ACK
789 * the IRQ but won't be able to receive the data
790 * until the next request is queued. The following
791 * code is handling exactly that.
792 */
793 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
794 int ret;
795 int start_trans;
796
797 start_trans = 1;
798 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
799 dep->flags & DWC3_EP_BUSY)
800 start_trans = 0;
801
802 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
803 if (ret && ret != -EBUSY) {
804 struct dwc3 *dwc = dep->dwc;
805
806 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
807 dep->name);
808 }
809 };
810
811 return 0;
812}
813
814static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
815 gfp_t gfp_flags)
816{
817 struct dwc3_request *req = to_dwc3_request(request);
818 struct dwc3_ep *dep = to_dwc3_ep(ep);
819 struct dwc3 *dwc = dep->dwc;
820
821 unsigned long flags;
822
823 int ret;
824
825 if (!dep->desc) {
826 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
827 request, ep->name);
828 return -ESHUTDOWN;
829 }
830
831 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
832 request, ep->name, request->length);
833
834 spin_lock_irqsave(&dwc->lock, flags);
835 ret = __dwc3_gadget_ep_queue(dep, req);
836 spin_unlock_irqrestore(&dwc->lock, flags);
837
838 return ret;
839}
840
841static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
842 struct usb_request *request)
843{
844 struct dwc3_request *req = to_dwc3_request(request);
845 struct dwc3_request *r = NULL;
846
847 struct dwc3_ep *dep = to_dwc3_ep(ep);
848 struct dwc3 *dwc = dep->dwc;
849
850 unsigned long flags;
851 int ret = 0;
852
853 spin_lock_irqsave(&dwc->lock, flags);
854
855 list_for_each_entry(r, &dep->request_list, list) {
856 if (r == req)
857 break;
858 }
859
860 if (r != req) {
861 list_for_each_entry(r, &dep->req_queued, list) {
862 if (r == req)
863 break;
864 }
865 if (r == req) {
866 /* wait until it is processed */
867 dwc3_stop_active_transfer(dwc, dep->number);
868 goto out0;
869 }
870 dev_err(dwc->dev, "request %p was not queued to %s\n",
871 request, ep->name);
872 ret = -EINVAL;
873 goto out0;
874 }
875
876 /* giveback the request */
877 dwc3_gadget_giveback(dep, req, -ECONNRESET);
878
879out0:
880 spin_unlock_irqrestore(&dwc->lock, flags);
881
882 return ret;
883}
884
885int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
886{
887 struct dwc3_gadget_ep_cmd_params params;
888 struct dwc3 *dwc = dep->dwc;
889 int ret;
890
891 memset(&params, 0x00, sizeof(params));
892
893 if (value) {
Felipe Balbi0b7836a2011-08-30 15:48:08 +0300894 if (dep->number == 0 || dep->number == 1) {
895 /*
896 * Whenever EP0 is stalled, we will restart
897 * the state machine, thus moving back to
898 * Setup Phase
899 */
900 dwc->ep0state = EP0_SETUP_PHASE;
901 }
Felipe Balbi72246da2011-08-19 18:10:58 +0300902
903 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
904 DWC3_DEPCMD_SETSTALL, &params);
905 if (ret)
906 dev_err(dwc->dev, "failed to %s STALL on %s\n",
907 value ? "set" : "clear",
908 dep->name);
909 else
910 dep->flags |= DWC3_EP_STALL;
911 } else {
Paul Zimmerman52754552011-09-30 10:58:44 +0300912 if (dep->flags & DWC3_EP_WEDGE)
913 return 0;
914
Felipe Balbi72246da2011-08-19 18:10:58 +0300915 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
916 DWC3_DEPCMD_CLEARSTALL, &params);
917 if (ret)
918 dev_err(dwc->dev, "failed to %s STALL on %s\n",
919 value ? "set" : "clear",
920 dep->name);
921 else
922 dep->flags &= ~DWC3_EP_STALL;
923 }
Paul Zimmerman52754552011-09-30 10:58:44 +0300924
Felipe Balbi72246da2011-08-19 18:10:58 +0300925 return ret;
926}
927
928static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
929{
930 struct dwc3_ep *dep = to_dwc3_ep(ep);
931 struct dwc3 *dwc = dep->dwc;
932
933 unsigned long flags;
934
935 int ret;
936
937 spin_lock_irqsave(&dwc->lock, flags);
938
939 if (usb_endpoint_xfer_isoc(dep->desc)) {
940 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
941 ret = -EINVAL;
942 goto out;
943 }
944
945 ret = __dwc3_gadget_ep_set_halt(dep, value);
946out:
947 spin_unlock_irqrestore(&dwc->lock, flags);
948
949 return ret;
950}
951
952static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
953{
954 struct dwc3_ep *dep = to_dwc3_ep(ep);
955
956 dep->flags |= DWC3_EP_WEDGE;
957
Paul Zimmerman52754552011-09-30 10:58:44 +0300958 return dwc3_gadget_ep_set_halt(ep, 1);
Felipe Balbi72246da2011-08-19 18:10:58 +0300959}
960
961/* -------------------------------------------------------------------------- */
962
963static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
964 .bLength = USB_DT_ENDPOINT_SIZE,
965 .bDescriptorType = USB_DT_ENDPOINT,
966 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
967};
968
969static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
970 .enable = dwc3_gadget_ep0_enable,
971 .disable = dwc3_gadget_ep0_disable,
972 .alloc_request = dwc3_gadget_ep_alloc_request,
973 .free_request = dwc3_gadget_ep_free_request,
974 .queue = dwc3_gadget_ep0_queue,
975 .dequeue = dwc3_gadget_ep_dequeue,
976 .set_halt = dwc3_gadget_ep_set_halt,
977 .set_wedge = dwc3_gadget_ep_set_wedge,
978};
979
980static const struct usb_ep_ops dwc3_gadget_ep_ops = {
981 .enable = dwc3_gadget_ep_enable,
982 .disable = dwc3_gadget_ep_disable,
983 .alloc_request = dwc3_gadget_ep_alloc_request,
984 .free_request = dwc3_gadget_ep_free_request,
985 .queue = dwc3_gadget_ep_queue,
986 .dequeue = dwc3_gadget_ep_dequeue,
987 .set_halt = dwc3_gadget_ep_set_halt,
988 .set_wedge = dwc3_gadget_ep_set_wedge,
989};
990
991/* -------------------------------------------------------------------------- */
992
993static int dwc3_gadget_get_frame(struct usb_gadget *g)
994{
995 struct dwc3 *dwc = gadget_to_dwc(g);
996 u32 reg;
997
998 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
999 return DWC3_DSTS_SOFFN(reg);
1000}
1001
1002static int dwc3_gadget_wakeup(struct usb_gadget *g)
1003{
1004 struct dwc3 *dwc = gadget_to_dwc(g);
1005
1006 unsigned long timeout;
1007 unsigned long flags;
1008
1009 u32 reg;
1010
1011 int ret = 0;
1012
1013 u8 link_state;
1014 u8 speed;
1015
1016 spin_lock_irqsave(&dwc->lock, flags);
1017
1018 /*
1019 * According to the Databook Remote wakeup request should
1020 * be issued only when the device is in early suspend state.
1021 *
1022 * We can check that via USB Link State bits in DSTS register.
1023 */
1024 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1025
1026 speed = reg & DWC3_DSTS_CONNECTSPD;
1027 if (speed == DWC3_DSTS_SUPERSPEED) {
1028 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1029 ret = -EINVAL;
1030 goto out;
1031 }
1032
1033 link_state = DWC3_DSTS_USBLNKST(reg);
1034
1035 switch (link_state) {
1036 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1037 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1038 break;
1039 default:
1040 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1041 link_state);
1042 ret = -EINVAL;
1043 goto out;
1044 }
1045
1046 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1047
1048 /*
1049 * Switch link state to Recovery. In HS/FS/LS this means
1050 * RemoteWakeup Request
1051 */
1052 reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
1053 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1054
1055 /* wait for at least 2000us */
1056 usleep_range(2000, 2500);
1057
1058 /* write zeroes to Link Change Request */
1059 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1060 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1061
1062 /* pool until Link State change to ON */
1063 timeout = jiffies + msecs_to_jiffies(100);
1064
1065 while (!(time_after(jiffies, timeout))) {
1066 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1067
1068 /* in HS, means ON */
1069 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1070 break;
1071 }
1072
1073 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1074 dev_err(dwc->dev, "failed to send remote wakeup\n");
1075 ret = -EINVAL;
1076 }
1077
1078out:
1079 spin_unlock_irqrestore(&dwc->lock, flags);
1080
1081 return ret;
1082}
1083
1084static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1085 int is_selfpowered)
1086{
1087 struct dwc3 *dwc = gadget_to_dwc(g);
1088
1089 dwc->is_selfpowered = !!is_selfpowered;
1090
1091 return 0;
1092}
1093
1094static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1095{
1096 u32 reg;
Sebastian Andrzej Siewior61d58242011-08-29 16:46:38 +02001097 u32 timeout = 500;
Felipe Balbi72246da2011-08-19 18:10:58 +03001098
1099 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1100 if (is_on)
1101 reg |= DWC3_DCTL_RUN_STOP;
1102 else
1103 reg &= ~DWC3_DCTL_RUN_STOP;
1104
1105 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1106
1107 do {
1108 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1109 if (is_on) {
1110 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1111 break;
1112 } else {
1113 if (reg & DWC3_DSTS_DEVCTRLHLT)
1114 break;
1115 }
Felipe Balbi72246da2011-08-19 18:10:58 +03001116 timeout--;
1117 if (!timeout)
1118 break;
Sebastian Andrzej Siewior61d58242011-08-29 16:46:38 +02001119 udelay(1);
Felipe Balbi72246da2011-08-19 18:10:58 +03001120 } while (1);
1121
1122 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1123 dwc->gadget_driver
1124 ? dwc->gadget_driver->function : "no-function",
1125 is_on ? "connect" : "disconnect");
1126}
1127
1128static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1129{
1130 struct dwc3 *dwc = gadget_to_dwc(g);
1131 unsigned long flags;
1132
1133 is_on = !!is_on;
1134
1135 spin_lock_irqsave(&dwc->lock, flags);
1136 dwc3_gadget_run_stop(dwc, is_on);
1137 spin_unlock_irqrestore(&dwc->lock, flags);
1138
1139 return 0;
1140}
1141
1142static int dwc3_gadget_start(struct usb_gadget *g,
1143 struct usb_gadget_driver *driver)
1144{
1145 struct dwc3 *dwc = gadget_to_dwc(g);
1146 struct dwc3_ep *dep;
1147 unsigned long flags;
1148 int ret = 0;
1149 u32 reg;
1150
1151 spin_lock_irqsave(&dwc->lock, flags);
1152
1153 if (dwc->gadget_driver) {
1154 dev_err(dwc->dev, "%s is already bound to %s\n",
1155 dwc->gadget.name,
1156 dwc->gadget_driver->driver.name);
1157 ret = -EBUSY;
1158 goto err0;
1159 }
1160
1161 dwc->gadget_driver = driver;
1162 dwc->gadget.dev.driver = &driver->driver;
1163
1164 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1165
Felipe Balbi771f1842011-09-08 17:42:11 +03001166 reg &= ~DWC3_GCTL_SCALEDOWN(3);
1167 reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
Felipe Balbi72246da2011-08-19 18:10:58 +03001168 reg &= ~DWC3_GCTL_DISSCRAMBLE;
Felipe Balbi771f1842011-09-08 17:42:11 +03001169 reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
Felipe Balbi72246da2011-08-19 18:10:58 +03001170
Felipe Balbiaabb7072011-09-30 10:58:50 +03001171 switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams0)) {
1172 case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
1173 reg &= ~DWC3_GCTL_DSBLCLKGTNG;
1174 break;
1175 default:
1176 dev_dbg(dwc->dev, "No power optimization available\n");
1177 }
1178
Felipe Balbi72246da2011-08-19 18:10:58 +03001179 /*
1180 * WORKAROUND: DWC3 revisions <1.90a have a bug
1181 * when The device fails to connect at SuperSpeed
1182 * and falls back to high-speed mode which causes
1183 * the device to enter in a Connect/Disconnect loop
1184 */
1185 if (dwc->revision < DWC3_REVISION_190A)
1186 reg |= DWC3_GCTL_U2RSTECN;
1187
1188 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1189
1190 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1191 reg &= ~(DWC3_DCFG_SPEED_MASK);
1192 reg |= DWC3_DCFG_SUPERSPEED;
1193 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1194
Paul Zimmermanb23c8432011-09-30 10:58:42 +03001195 dwc->start_config_issued = false;
1196
Felipe Balbi72246da2011-08-19 18:10:58 +03001197 /* Start with SuperSpeed Default */
1198 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1199
1200 dep = dwc->eps[0];
1201 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1202 if (ret) {
1203 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1204 goto err0;
1205 }
1206
1207 dep = dwc->eps[1];
1208 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1209 if (ret) {
1210 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1211 goto err1;
1212 }
1213
1214 /* begin to receive SETUP packets */
Felipe Balbic7fcdeb2011-08-27 22:28:36 +03001215 dwc->ep0state = EP0_SETUP_PHASE;
Felipe Balbi72246da2011-08-19 18:10:58 +03001216 dwc3_ep0_out_start(dwc);
1217
1218 spin_unlock_irqrestore(&dwc->lock, flags);
1219
1220 return 0;
1221
1222err1:
1223 __dwc3_gadget_ep_disable(dwc->eps[0]);
1224
1225err0:
1226 spin_unlock_irqrestore(&dwc->lock, flags);
1227
1228 return ret;
1229}
1230
1231static int dwc3_gadget_stop(struct usb_gadget *g,
1232 struct usb_gadget_driver *driver)
1233{
1234 struct dwc3 *dwc = gadget_to_dwc(g);
1235 unsigned long flags;
1236
1237 spin_lock_irqsave(&dwc->lock, flags);
1238
1239 __dwc3_gadget_ep_disable(dwc->eps[0]);
1240 __dwc3_gadget_ep_disable(dwc->eps[1]);
1241
1242 dwc->gadget_driver = NULL;
1243 dwc->gadget.dev.driver = NULL;
1244
1245 spin_unlock_irqrestore(&dwc->lock, flags);
1246
1247 return 0;
1248}
1249static const struct usb_gadget_ops dwc3_gadget_ops = {
1250 .get_frame = dwc3_gadget_get_frame,
1251 .wakeup = dwc3_gadget_wakeup,
1252 .set_selfpowered = dwc3_gadget_set_selfpowered,
1253 .pullup = dwc3_gadget_pullup,
1254 .udc_start = dwc3_gadget_start,
1255 .udc_stop = dwc3_gadget_stop,
1256};
1257
1258/* -------------------------------------------------------------------------- */
1259
1260static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1261{
1262 struct dwc3_ep *dep;
1263 u8 epnum;
1264
1265 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1266
1267 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1268 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1269 if (!dep) {
1270 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1271 epnum);
1272 return -ENOMEM;
1273 }
1274
1275 dep->dwc = dwc;
1276 dep->number = epnum;
1277 dwc->eps[epnum] = dep;
1278
1279 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1280 (epnum & 1) ? "in" : "out");
1281 dep->endpoint.name = dep->name;
1282 dep->direction = (epnum & 1);
1283
1284 if (epnum == 0 || epnum == 1) {
1285 dep->endpoint.maxpacket = 512;
1286 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1287 if (!epnum)
1288 dwc->gadget.ep0 = &dep->endpoint;
1289 } else {
1290 int ret;
1291
1292 dep->endpoint.maxpacket = 1024;
Sebastian Andrzej Siewior12d36c12011-11-03 20:27:50 +01001293 dep->endpoint.max_streams = 15;
Felipe Balbi72246da2011-08-19 18:10:58 +03001294 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1295 list_add_tail(&dep->endpoint.ep_list,
1296 &dwc->gadget.ep_list);
1297
1298 ret = dwc3_alloc_trb_pool(dep);
1299 if (ret) {
1300 dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
1301 return ret;
1302 }
1303 }
1304 INIT_LIST_HEAD(&dep->request_list);
1305 INIT_LIST_HEAD(&dep->req_queued);
1306 }
1307
1308 return 0;
1309}
1310
1311static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1312{
1313 struct dwc3_ep *dep;
1314 u8 epnum;
1315
1316 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1317 dep = dwc->eps[epnum];
1318 dwc3_free_trb_pool(dep);
1319
1320 if (epnum != 0 && epnum != 1)
1321 list_del(&dep->endpoint.ep_list);
1322
1323 kfree(dep);
1324 }
1325}
1326
1327static void dwc3_gadget_release(struct device *dev)
1328{
1329 dev_dbg(dev, "%s\n", __func__);
1330}
1331
1332/* -------------------------------------------------------------------------- */
1333static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1334 const struct dwc3_event_depevt *event, int status)
1335{
1336 struct dwc3_request *req;
1337 struct dwc3_trb trb;
1338 unsigned int count;
1339 unsigned int s_pkt = 0;
1340
1341 do {
1342 req = next_request(&dep->req_queued);
1343 if (!req)
1344 break;
1345
1346 dwc3_trb_to_nat(req->trb, &trb);
1347
Sebastian Andrzej Siewior0d2f4752011-08-19 19:59:12 +02001348 if (trb.hwo && status != -ESHUTDOWN)
1349 /*
1350 * We continue despite the error. There is not much we
1351 * can do. If we don't clean in up we loop for ever. If
1352 * we skip the TRB than it gets overwritten reused after
1353 * a while since we use them in a ring buffer. a BUG()
1354 * would help. Lets hope that if this occures, someone
1355 * fixes the root cause instead of looking away :)
1356 */
Felipe Balbi72246da2011-08-19 18:10:58 +03001357 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1358 dep->name, req->trb);
Felipe Balbi72246da2011-08-19 18:10:58 +03001359 count = trb.length;
1360
1361 if (dep->direction) {
1362 if (count) {
1363 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1364 dep->name);
1365 status = -ECONNRESET;
1366 }
1367 } else {
1368 if (count && (event->status & DEPEVT_STATUS_SHORT))
1369 s_pkt = 1;
1370 }
1371
1372 /*
1373 * We assume here we will always receive the entire data block
1374 * which we should receive. Meaning, if we program RX to
1375 * receive 4K but we receive only 2K, we assume that's all we
1376 * should receive and we simply bounce the request back to the
1377 * gadget driver for further processing.
1378 */
1379 req->request.actual += req->request.length - count;
1380 dwc3_gadget_giveback(dep, req, status);
1381 if (s_pkt)
1382 break;
1383 if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1384 break;
1385 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1386 break;
1387 } while (1);
1388
1389 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1390 return 0;
1391 return 1;
1392}
1393
1394static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1395 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1396 int start_new)
1397{
1398 unsigned status = 0;
1399 int clean_busy;
1400
1401 if (event->status & DEPEVT_STATUS_BUSERR)
1402 status = -ECONNRESET;
1403
1404 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
Sebastian Andrzej Siewiora1ae9be2011-08-22 17:42:18 +02001405 if (clean_busy) {
Felipe Balbi72246da2011-08-19 18:10:58 +03001406 dep->flags &= ~DWC3_EP_BUSY;
Sebastian Andrzej Siewiora1ae9be2011-08-22 17:42:18 +02001407 dep->res_trans_idx = 0;
1408 }
Felipe Balbi72246da2011-08-19 18:10:58 +03001409}
1410
1411static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1412 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1413{
1414 u32 uf;
1415
1416 if (list_empty(&dep->request_list)) {
1417 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1418 dep->name);
1419 return;
1420 }
1421
1422 if (event->parameters) {
1423 u32 mask;
1424
1425 mask = ~(dep->interval - 1);
1426 uf = event->parameters & mask;
1427 /* 4 micro frames in the future */
1428 uf += dep->interval * 4;
1429 } else {
1430 uf = 0;
1431 }
1432
1433 __dwc3_gadget_kick_transfer(dep, uf, 1);
1434}
1435
1436static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1437 const struct dwc3_event_depevt *event)
1438{
1439 struct dwc3 *dwc = dep->dwc;
1440 struct dwc3_event_depevt mod_ev = *event;
1441
1442 /*
1443 * We were asked to remove one requests. It is possible that this
1444 * request and a few other were started together and have the same
1445 * transfer index. Since we stopped the complete endpoint we don't
1446 * know how many requests were already completed (and not yet)
1447 * reported and how could be done (later). We purge them all until
1448 * the end of the list.
1449 */
1450 mod_ev.status = DEPEVT_STATUS_LST;
1451 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1452 dep->flags &= ~DWC3_EP_BUSY;
1453 /* pending requets are ignored and are queued on XferNotReady */
Felipe Balbi72246da2011-08-19 18:10:58 +03001454}
1455
1456static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1457 const struct dwc3_event_depevt *event)
1458{
1459 u32 param = event->parameters;
1460 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1461
1462 switch (cmd_type) {
1463 case DWC3_DEPCMD_ENDTRANSFER:
1464 dwc3_process_ep_cmd_complete(dep, event);
1465 break;
1466 case DWC3_DEPCMD_STARTTRANSFER:
1467 dep->res_trans_idx = param & 0x7f;
1468 break;
1469 default:
1470 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1471 __func__, cmd_type);
1472 break;
1473 };
1474}
1475
1476static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1477 const struct dwc3_event_depevt *event)
1478{
1479 struct dwc3_ep *dep;
1480 u8 epnum = event->endpoint_number;
1481
1482 dep = dwc->eps[epnum];
1483
1484 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1485 dwc3_ep_event_string(event->endpoint_event));
1486
1487 if (epnum == 0 || epnum == 1) {
1488 dwc3_ep0_interrupt(dwc, event);
1489 return;
1490 }
1491
1492 switch (event->endpoint_event) {
1493 case DWC3_DEPEVT_XFERCOMPLETE:
1494 if (usb_endpoint_xfer_isoc(dep->desc)) {
1495 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1496 dep->name);
1497 return;
1498 }
1499
1500 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1501 break;
1502 case DWC3_DEPEVT_XFERINPROGRESS:
1503 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1504 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1505 dep->name);
1506 return;
1507 }
1508
1509 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1510 break;
1511 case DWC3_DEPEVT_XFERNOTREADY:
1512 if (usb_endpoint_xfer_isoc(dep->desc)) {
1513 dwc3_gadget_start_isoc(dwc, dep, event);
1514 } else {
1515 int ret;
1516
1517 dev_vdbg(dwc->dev, "%s: reason %s\n",
1518 dep->name, event->status
1519 ? "Transfer Active"
1520 : "Transfer Not Active");
1521
1522 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1523 if (!ret || ret == -EBUSY)
1524 return;
1525
1526 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1527 dep->name);
1528 }
1529
1530 break;
Felipe Balbi879631a2011-09-30 10:58:47 +03001531 case DWC3_DEPEVT_STREAMEVT:
1532 if (!usb_endpoint_xfer_bulk(dep->desc)) {
1533 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1534 dep->name);
1535 return;
1536 }
1537
1538 switch (event->status) {
1539 case DEPEVT_STREAMEVT_FOUND:
1540 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1541 event->parameters);
1542
1543 break;
1544 case DEPEVT_STREAMEVT_NOTFOUND:
1545 /* FALLTHROUGH */
1546 default:
1547 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1548 }
1549 break;
Felipe Balbi72246da2011-08-19 18:10:58 +03001550 case DWC3_DEPEVT_RXTXFIFOEVT:
1551 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1552 break;
Felipe Balbi72246da2011-08-19 18:10:58 +03001553 case DWC3_DEPEVT_EPCMDCMPLT:
1554 dwc3_ep_cmd_compl(dep, event);
1555 break;
1556 }
1557}
1558
1559static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1560{
1561 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1562 spin_unlock(&dwc->lock);
1563 dwc->gadget_driver->disconnect(&dwc->gadget);
1564 spin_lock(&dwc->lock);
1565 }
1566}
1567
1568static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1569{
1570 struct dwc3_ep *dep;
1571 struct dwc3_gadget_ep_cmd_params params;
1572 u32 cmd;
1573 int ret;
1574
1575 dep = dwc->eps[epnum];
1576
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +02001577 WARN_ON(!dep->res_trans_idx);
Felipe Balbi72246da2011-08-19 18:10:58 +03001578 if (dep->res_trans_idx) {
1579 cmd = DWC3_DEPCMD_ENDTRANSFER;
1580 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1581 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1582 memset(&params, 0, sizeof(params));
1583 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1584 WARN_ON_ONCE(ret);
Sebastian Andrzej Siewiora1ae9be2011-08-22 17:42:18 +02001585 dep->res_trans_idx = 0;
Felipe Balbi72246da2011-08-19 18:10:58 +03001586 }
1587}
1588
1589static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1590{
1591 u32 epnum;
1592
1593 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1594 struct dwc3_ep *dep;
1595
1596 dep = dwc->eps[epnum];
1597 if (!(dep->flags & DWC3_EP_ENABLED))
1598 continue;
1599
Sebastian Andrzej Siewior624407f2011-08-29 13:56:37 +02001600 dwc3_remove_requests(dwc, dep);
Felipe Balbi72246da2011-08-19 18:10:58 +03001601 }
1602}
1603
1604static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1605{
1606 u32 epnum;
1607
1608 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1609 struct dwc3_ep *dep;
1610 struct dwc3_gadget_ep_cmd_params params;
1611 int ret;
1612
1613 dep = dwc->eps[epnum];
1614
1615 if (!(dep->flags & DWC3_EP_STALL))
1616 continue;
1617
1618 dep->flags &= ~DWC3_EP_STALL;
1619
1620 memset(&params, 0, sizeof(params));
1621 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1622 DWC3_DEPCMD_CLEARSTALL, &params);
1623 WARN_ON_ONCE(ret);
1624 }
1625}
1626
1627static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1628{
1629 dev_vdbg(dwc->dev, "%s\n", __func__);
1630#if 0
1631 XXX
1632 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1633 enable it before we can disable it.
1634
1635 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1636 reg &= ~DWC3_DCTL_INITU1ENA;
1637 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1638
1639 reg &= ~DWC3_DCTL_INITU2ENA;
1640 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1641#endif
1642
1643 dwc3_stop_active_transfers(dwc);
1644 dwc3_disconnect_gadget(dwc);
Paul Zimmermanb23c8432011-09-30 10:58:42 +03001645 dwc->start_config_issued = false;
Felipe Balbi72246da2011-08-19 18:10:58 +03001646
1647 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1648}
1649
1650static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1651{
1652 u32 reg;
1653
1654 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1655
1656 if (on)
1657 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1658 else
1659 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1660
1661 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1662}
1663
1664static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1665{
1666 u32 reg;
1667
1668 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1669
1670 if (on)
1671 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1672 else
1673 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1674
1675 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1676}
1677
1678static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1679{
1680 u32 reg;
1681
1682 dev_vdbg(dwc->dev, "%s\n", __func__);
1683
1684 /* Enable PHYs */
1685 dwc3_gadget_usb2_phy_power(dwc, true);
1686 dwc3_gadget_usb3_phy_power(dwc, true);
1687
1688 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1689 dwc3_disconnect_gadget(dwc);
1690
1691 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1692 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1693 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1694
1695 dwc3_stop_active_transfers(dwc);
1696 dwc3_clear_stall_all_ep(dwc);
Paul Zimmermanb23c8432011-09-30 10:58:42 +03001697 dwc->start_config_issued = false;
Felipe Balbi72246da2011-08-19 18:10:58 +03001698
1699 /* Reset device address to zero */
1700 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1701 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1702 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
Felipe Balbi72246da2011-08-19 18:10:58 +03001703}
1704
1705static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1706{
1707 u32 reg;
1708 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1709
1710 /*
1711 * We change the clock only at SS but I dunno why I would want to do
1712 * this. Maybe it becomes part of the power saving plan.
1713 */
1714
1715 if (speed != DWC3_DSTS_SUPERSPEED)
1716 return;
1717
1718 /*
1719 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1720 * each time on Connect Done.
1721 */
1722 if (!usb30_clock)
1723 return;
1724
1725 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1726 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1727 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1728}
1729
1730static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1731{
1732 switch (speed) {
1733 case USB_SPEED_SUPER:
1734 dwc3_gadget_usb2_phy_power(dwc, false);
1735 break;
1736 case USB_SPEED_HIGH:
1737 case USB_SPEED_FULL:
1738 case USB_SPEED_LOW:
1739 dwc3_gadget_usb3_phy_power(dwc, false);
1740 break;
1741 }
1742}
1743
1744static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1745{
1746 struct dwc3_gadget_ep_cmd_params params;
1747 struct dwc3_ep *dep;
1748 int ret;
1749 u32 reg;
1750 u8 speed;
1751
1752 dev_vdbg(dwc->dev, "%s\n", __func__);
1753
1754 memset(&params, 0x00, sizeof(params));
1755
Felipe Balbi72246da2011-08-19 18:10:58 +03001756 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1757 speed = reg & DWC3_DSTS_CONNECTSPD;
1758 dwc->speed = speed;
1759
1760 dwc3_update_ram_clk_sel(dwc, speed);
1761
1762 switch (speed) {
1763 case DWC3_DCFG_SUPERSPEED:
1764 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1765 dwc->gadget.ep0->maxpacket = 512;
1766 dwc->gadget.speed = USB_SPEED_SUPER;
1767 break;
1768 case DWC3_DCFG_HIGHSPEED:
1769 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1770 dwc->gadget.ep0->maxpacket = 64;
1771 dwc->gadget.speed = USB_SPEED_HIGH;
1772 break;
1773 case DWC3_DCFG_FULLSPEED2:
1774 case DWC3_DCFG_FULLSPEED1:
1775 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1776 dwc->gadget.ep0->maxpacket = 64;
1777 dwc->gadget.speed = USB_SPEED_FULL;
1778 break;
1779 case DWC3_DCFG_LOWSPEED:
1780 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
1781 dwc->gadget.ep0->maxpacket = 8;
1782 dwc->gadget.speed = USB_SPEED_LOW;
1783 break;
1784 }
1785
1786 /* Disable unneded PHY */
1787 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
1788
1789 dep = dwc->eps[0];
1790 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1791 if (ret) {
1792 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1793 return;
1794 }
1795
1796 dep = dwc->eps[1];
1797 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1798 if (ret) {
1799 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1800 return;
1801 }
1802
1803 /*
1804 * Configure PHY via GUSB3PIPECTLn if required.
1805 *
1806 * Update GTXFIFOSIZn
1807 *
1808 * In both cases reset values should be sufficient.
1809 */
1810}
1811
1812static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
1813{
1814 dev_vdbg(dwc->dev, "%s\n", __func__);
1815
1816 /*
1817 * TODO take core out of low power mode when that's
1818 * implemented.
1819 */
1820
1821 dwc->gadget_driver->resume(&dwc->gadget);
1822}
1823
1824static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
1825 unsigned int evtinfo)
1826{
Felipe Balbi72246da2011-08-19 18:10:58 +03001827 /* The fith bit says SuperSpeed yes or no. */
1828 dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
Felipe Balbi019ac832011-09-08 21:18:47 +03001829
1830 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
Felipe Balbi72246da2011-08-19 18:10:58 +03001831}
1832
1833static void dwc3_gadget_interrupt(struct dwc3 *dwc,
1834 const struct dwc3_event_devt *event)
1835{
1836 switch (event->type) {
1837 case DWC3_DEVICE_EVENT_DISCONNECT:
1838 dwc3_gadget_disconnect_interrupt(dwc);
1839 break;
1840 case DWC3_DEVICE_EVENT_RESET:
1841 dwc3_gadget_reset_interrupt(dwc);
1842 break;
1843 case DWC3_DEVICE_EVENT_CONNECT_DONE:
1844 dwc3_gadget_conndone_interrupt(dwc);
1845 break;
1846 case DWC3_DEVICE_EVENT_WAKEUP:
1847 dwc3_gadget_wakeup_interrupt(dwc);
1848 break;
1849 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
1850 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
1851 break;
1852 case DWC3_DEVICE_EVENT_EOPF:
1853 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
1854 break;
1855 case DWC3_DEVICE_EVENT_SOF:
1856 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
1857 break;
1858 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
1859 dev_vdbg(dwc->dev, "Erratic Error\n");
1860 break;
1861 case DWC3_DEVICE_EVENT_CMD_CMPL:
1862 dev_vdbg(dwc->dev, "Command Complete\n");
1863 break;
1864 case DWC3_DEVICE_EVENT_OVERFLOW:
1865 dev_vdbg(dwc->dev, "Overflow\n");
1866 break;
1867 default:
1868 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
1869 }
1870}
1871
1872static void dwc3_process_event_entry(struct dwc3 *dwc,
1873 const union dwc3_event *event)
1874{
1875 /* Endpoint IRQ, handle it and return early */
1876 if (event->type.is_devspec == 0) {
1877 /* depevt */
1878 return dwc3_endpoint_interrupt(dwc, &event->depevt);
1879 }
1880
1881 switch (event->type.type) {
1882 case DWC3_EVENT_TYPE_DEV:
1883 dwc3_gadget_interrupt(dwc, &event->devt);
1884 break;
1885 /* REVISIT what to do with Carkit and I2C events ? */
1886 default:
1887 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
1888 }
1889}
1890
1891static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
1892{
1893 struct dwc3_event_buffer *evt;
1894 int left;
1895 u32 count;
1896
1897 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
1898 count &= DWC3_GEVNTCOUNT_MASK;
1899 if (!count)
1900 return IRQ_NONE;
1901
1902 evt = dwc->ev_buffs[buf];
1903 left = count;
1904
1905 while (left > 0) {
1906 union dwc3_event event;
1907
1908 memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
1909 dwc3_process_event_entry(dwc, &event);
1910 /*
1911 * XXX we wrap around correctly to the next entry as almost all
1912 * entries are 4 bytes in size. There is one entry which has 12
1913 * bytes which is a regular entry followed by 8 bytes data. ATM
1914 * I don't know how things are organized if were get next to the
1915 * a boundary so I worry about that once we try to handle that.
1916 */
1917 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
1918 left -= 4;
1919
1920 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
1921 }
1922
1923 return IRQ_HANDLED;
1924}
1925
1926static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
1927{
1928 struct dwc3 *dwc = _dwc;
1929 int i;
1930 irqreturn_t ret = IRQ_NONE;
1931
1932 spin_lock(&dwc->lock);
1933
1934 for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
1935 irqreturn_t status;
1936
1937 status = dwc3_process_event_buf(dwc, i);
1938 if (status == IRQ_HANDLED)
1939 ret = status;
1940 }
1941
1942 spin_unlock(&dwc->lock);
1943
1944 return ret;
1945}
1946
1947/**
1948 * dwc3_gadget_init - Initializes gadget related registers
1949 * @dwc: Pointer to out controller context structure
1950 *
1951 * Returns 0 on success otherwise negative errno.
1952 */
1953int __devinit dwc3_gadget_init(struct dwc3 *dwc)
1954{
1955 u32 reg;
1956 int ret;
1957 int irq;
1958
1959 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
1960 &dwc->ctrl_req_addr, GFP_KERNEL);
1961 if (!dwc->ctrl_req) {
1962 dev_err(dwc->dev, "failed to allocate ctrl request\n");
1963 ret = -ENOMEM;
1964 goto err0;
1965 }
1966
1967 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
1968 &dwc->ep0_trb_addr, GFP_KERNEL);
1969 if (!dwc->ep0_trb) {
1970 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
1971 ret = -ENOMEM;
1972 goto err1;
1973 }
1974
1975 dwc->setup_buf = dma_alloc_coherent(dwc->dev,
1976 sizeof(*dwc->setup_buf) * 2,
1977 &dwc->setup_buf_addr, GFP_KERNEL);
1978 if (!dwc->setup_buf) {
1979 dev_err(dwc->dev, "failed to allocate setup buffer\n");
1980 ret = -ENOMEM;
1981 goto err2;
1982 }
1983
Felipe Balbi5812b1c2011-08-27 22:07:53 +03001984 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
1985 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
1986 if (!dwc->ep0_bounce) {
1987 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
1988 ret = -ENOMEM;
1989 goto err3;
1990 }
1991
Felipe Balbi72246da2011-08-19 18:10:58 +03001992 dev_set_name(&dwc->gadget.dev, "gadget");
1993
1994 dwc->gadget.ops = &dwc3_gadget_ops;
Michal Nazarewiczd327ab52011-11-19 18:27:37 +01001995 dwc->gadget.max_speed = USB_SPEED_SUPER;
Felipe Balbi72246da2011-08-19 18:10:58 +03001996 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1997 dwc->gadget.dev.parent = dwc->dev;
1998
1999 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2000
2001 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
2002 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
2003 dwc->gadget.dev.release = dwc3_gadget_release;
2004 dwc->gadget.name = "dwc3-gadget";
2005
2006 /*
2007 * REVISIT: Here we should clear all pending IRQs to be
2008 * sure we're starting from a well known location.
2009 */
2010
2011 ret = dwc3_gadget_init_endpoints(dwc);
2012 if (ret)
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002013 goto err4;
Felipe Balbi72246da2011-08-19 18:10:58 +03002014
2015 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2016
2017 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2018 "dwc3", dwc);
2019 if (ret) {
2020 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2021 irq, ret);
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002022 goto err5;
Felipe Balbi72246da2011-08-19 18:10:58 +03002023 }
2024
2025 /* Enable all but Start and End of Frame IRQs */
2026 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2027 DWC3_DEVTEN_EVNTOVERFLOWEN |
2028 DWC3_DEVTEN_CMDCMPLTEN |
2029 DWC3_DEVTEN_ERRTICERREN |
2030 DWC3_DEVTEN_WKUPEVTEN |
2031 DWC3_DEVTEN_ULSTCNGEN |
2032 DWC3_DEVTEN_CONNECTDONEEN |
2033 DWC3_DEVTEN_USBRSTEN |
2034 DWC3_DEVTEN_DISCONNEVTEN);
2035 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2036
2037 ret = device_register(&dwc->gadget.dev);
2038 if (ret) {
2039 dev_err(dwc->dev, "failed to register gadget device\n");
2040 put_device(&dwc->gadget.dev);
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002041 goto err6;
Felipe Balbi72246da2011-08-19 18:10:58 +03002042 }
2043
2044 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2045 if (ret) {
2046 dev_err(dwc->dev, "failed to register udc\n");
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002047 goto err7;
Felipe Balbi72246da2011-08-19 18:10:58 +03002048 }
2049
2050 return 0;
2051
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002052err7:
Felipe Balbi72246da2011-08-19 18:10:58 +03002053 device_unregister(&dwc->gadget.dev);
2054
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002055err6:
Felipe Balbi72246da2011-08-19 18:10:58 +03002056 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2057 free_irq(irq, dwc);
2058
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002059err5:
Felipe Balbi72246da2011-08-19 18:10:58 +03002060 dwc3_gadget_free_endpoints(dwc);
2061
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002062err4:
2063 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2064 dwc->ep0_bounce_addr);
2065
Felipe Balbi72246da2011-08-19 18:10:58 +03002066err3:
2067 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2068 dwc->setup_buf, dwc->setup_buf_addr);
2069
2070err2:
2071 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2072 dwc->ep0_trb, dwc->ep0_trb_addr);
2073
2074err1:
2075 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2076 dwc->ctrl_req, dwc->ctrl_req_addr);
2077
2078err0:
2079 return ret;
2080}
2081
2082void dwc3_gadget_exit(struct dwc3 *dwc)
2083{
2084 int irq;
2085 int i;
2086
2087 usb_del_gadget_udc(&dwc->gadget);
2088 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2089
2090 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2091 free_irq(irq, dwc);
2092
2093 for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
2094 __dwc3_gadget_ep_disable(dwc->eps[i]);
2095
2096 dwc3_gadget_free_endpoints(dwc);
2097
Felipe Balbi5812b1c2011-08-27 22:07:53 +03002098 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2099 dwc->ep0_bounce_addr);
2100
Felipe Balbi72246da2011-08-19 18:10:58 +03002101 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2102 dwc->setup_buf, dwc->setup_buf_addr);
2103
2104 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2105 dwc->ep0_trb, dwc->ep0_trb_addr);
2106
2107 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2108 dwc->ctrl_req, dwc->ctrl_req_addr);
2109
2110 device_unregister(&dwc->gadget.dev);
2111}