| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * amd5536.c -- AMD 5536 UDC high/full speed USB device controller | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2005-2007 AMD (http://www.amd.com) | 
|  | 5 | * Author: Thomas Dahlmann | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or modify | 
|  | 8 | * it under the terms of the GNU General Public License as published by | 
|  | 9 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 10 | * (at your option) any later version. | 
|  | 11 | * | 
|  | 12 | * This program is distributed in the hope that it will be useful, | 
|  | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 15 | * GNU General Public License for more details. | 
|  | 16 | * | 
|  | 17 | * You should have received a copy of the GNU General Public License | 
|  | 18 | * along with this program; if not, write to the Free Software | 
|  | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
|  | 20 | */ | 
|  | 21 |  | 
|  | 22 | /* | 
|  | 23 | * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536. | 
|  | 24 | * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it | 
|  | 25 | * provides 4 IN and 4 OUT endpoints (bulk or interrupt type). | 
|  | 26 | * | 
|  | 27 | * Make sure that UDC is assigned to port 4 by BIOS settings (port can also | 
|  | 28 | * be used as host port) and UOC bits PAD_EN and APU are set (should be done | 
|  | 29 | * by BIOS init). | 
|  | 30 | * | 
|  | 31 | * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not | 
|  | 32 | * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0") | 
|  | 33 | * can be used with gadget ether. | 
|  | 34 | */ | 
|  | 35 |  | 
|  | 36 | /* debug control */ | 
|  | 37 | /* #define UDC_VERBOSE */ | 
|  | 38 |  | 
|  | 39 | /* Driver strings */ | 
|  | 40 | #define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller" | 
|  | 41 | #define UDC_DRIVER_VERSION_STRING	"01.00.0206 - $Revision: #3 $" | 
|  | 42 |  | 
|  | 43 | /* system */ | 
|  | 44 | #include <linux/module.h> | 
|  | 45 | #include <linux/pci.h> | 
|  | 46 | #include <linux/kernel.h> | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 47 | #include <linux/delay.h> | 
|  | 48 | #include <linux/ioport.h> | 
|  | 49 | #include <linux/sched.h> | 
|  | 50 | #include <linux/slab.h> | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 51 | #include <linux/errno.h> | 
|  | 52 | #include <linux/init.h> | 
|  | 53 | #include <linux/timer.h> | 
|  | 54 | #include <linux/list.h> | 
|  | 55 | #include <linux/interrupt.h> | 
|  | 56 | #include <linux/ioctl.h> | 
|  | 57 | #include <linux/fs.h> | 
|  | 58 | #include <linux/dmapool.h> | 
|  | 59 | #include <linux/moduleparam.h> | 
|  | 60 | #include <linux/device.h> | 
|  | 61 | #include <linux/io.h> | 
|  | 62 | #include <linux/irq.h> | 
|  | 63 |  | 
|  | 64 | #include <asm/byteorder.h> | 
|  | 65 | #include <asm/system.h> | 
|  | 66 | #include <asm/unaligned.h> | 
|  | 67 |  | 
|  | 68 | /* gadget stack */ | 
|  | 69 | #include <linux/usb/ch9.h> | 
| David Brownell | 9454a57 | 2007-10-04 18:05:17 -0700 | [diff] [blame] | 70 | #include <linux/usb/gadget.h> | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 71 |  | 
|  | 72 | /* udc specific */ | 
|  | 73 | #include "amd5536udc.h" | 
|  | 74 |  | 
|  | 75 |  | 
|  | 76 | static void udc_tasklet_disconnect(unsigned long); | 
|  | 77 | static void empty_req_queue(struct udc_ep *); | 
|  | 78 | static int udc_probe(struct udc *dev); | 
|  | 79 | static void udc_basic_init(struct udc *dev); | 
|  | 80 | static void udc_setup_endpoints(struct udc *dev); | 
|  | 81 | static void udc_soft_reset(struct udc *dev); | 
|  | 82 | static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); | 
|  | 83 | static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq); | 
|  | 84 | static int udc_free_dma_chain(struct udc *dev, struct udc_request *req); | 
|  | 85 | static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, | 
|  | 86 | unsigned long buf_len, gfp_t gfp_flags); | 
|  | 87 | static int udc_remote_wakeup(struct udc *dev); | 
|  | 88 | static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | 
|  | 89 | static void udc_pci_remove(struct pci_dev *pdev); | 
|  | 90 |  | 
|  | 91 | /* description */ | 
|  | 92 | static const char mod_desc[] = UDC_MOD_DESCRIPTION; | 
|  | 93 | static const char name[] = "amd5536udc"; | 
|  | 94 |  | 
|  | 95 | /* structure to hold endpoint function pointers */ | 
|  | 96 | static const struct usb_ep_ops udc_ep_ops; | 
|  | 97 |  | 
|  | 98 | /* received setup data */ | 
|  | 99 | static union udc_setup_data setup_data; | 
|  | 100 |  | 
|  | 101 | /* pointer to device object */ | 
|  | 102 | static struct udc *udc; | 
|  | 103 |  | 
|  | 104 | /* irq spin lock for soft reset */ | 
|  | 105 | static DEFINE_SPINLOCK(udc_irq_spinlock); | 
|  | 106 | /* stall spin lock */ | 
|  | 107 | static DEFINE_SPINLOCK(udc_stall_spinlock); | 
|  | 108 |  | 
|  | 109 | /* | 
|  | 110 | * slave mode: pending bytes in rx fifo after nyet, | 
|  | 111 | * used if EPIN irq came but no req was available | 
|  | 112 | */ | 
|  | 113 | static unsigned int udc_rxfifo_pending; | 
|  | 114 |  | 
|  | 115 | /* count soft resets after suspend to avoid loop */ | 
|  | 116 | static int soft_reset_occured; | 
|  | 117 | static int soft_reset_after_usbreset_occured; | 
|  | 118 |  | 
|  | 119 | /* timer */ | 
|  | 120 | static struct timer_list udc_timer; | 
|  | 121 | static int stop_timer; | 
|  | 122 |  | 
|  | 123 | /* set_rde -- Is used to control enabling of RX DMA. Problem is | 
|  | 124 | * that UDC has only one bit (RDE) to enable/disable RX DMA for | 
|  | 125 | * all OUT endpoints. So we have to handle race conditions like | 
|  | 126 | * when OUT data reaches the fifo but no request was queued yet. | 
|  | 127 | * This cannot be solved by letting the RX DMA disabled until a | 
|  | 128 | * request gets queued because there may be other OUT packets | 
|  | 129 | * in the FIFO (important for not blocking control traffic). | 
|  | 130 | * The value of set_rde controls the correspondig timer. | 
|  | 131 | * | 
|  | 132 | * set_rde -1 == not used, means it is alloed to be set to 0 or 1 | 
|  | 133 | * set_rde  0 == do not touch RDE, do no start the RDE timer | 
|  | 134 | * set_rde  1 == timer function will look whether FIFO has data | 
|  | 135 | * set_rde  2 == set by timer function to enable RX DMA on next call | 
|  | 136 | */ | 
|  | 137 | static int set_rde = -1; | 
|  | 138 |  | 
|  | 139 | static DECLARE_COMPLETION(on_exit); | 
|  | 140 | static struct timer_list udc_pollstall_timer; | 
|  | 141 | static int stop_pollstall_timer; | 
|  | 142 | static DECLARE_COMPLETION(on_pollstall_exit); | 
|  | 143 |  | 
|  | 144 | /* tasklet for usb disconnect */ | 
|  | 145 | static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, | 
|  | 146 | (unsigned long) &udc); | 
|  | 147 |  | 
|  | 148 |  | 
|  | 149 | /* endpoint names used for print */ | 
|  | 150 | static const char ep0_string[] = "ep0in"; | 
|  | 151 | static const char *ep_string[] = { | 
|  | 152 | ep0_string, | 
|  | 153 | "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", | 
|  | 154 | "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", | 
|  | 155 | "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", | 
|  | 156 | "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", | 
|  | 157 | "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", | 
|  | 158 | "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", | 
|  | 159 | "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" | 
|  | 160 | }; | 
|  | 161 |  | 
|  | 162 | /* DMA usage flag */ | 
|  | 163 | static int use_dma = 1; | 
|  | 164 | /* packet per buffer dma */ | 
|  | 165 | static int use_dma_ppb = 1; | 
|  | 166 | /* with per descr. update */ | 
|  | 167 | static int use_dma_ppb_du; | 
|  | 168 | /* buffer fill mode */ | 
|  | 169 | static int use_dma_bufferfill_mode; | 
|  | 170 | /* full speed only mode */ | 
|  | 171 | static int use_fullspeed; | 
|  | 172 | /* tx buffer size for high speed */ | 
|  | 173 | static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE; | 
|  | 174 |  | 
|  | 175 | /* module parameters */ | 
|  | 176 | module_param(use_dma, bool, S_IRUGO); | 
|  | 177 | MODULE_PARM_DESC(use_dma, "true for DMA"); | 
|  | 178 | module_param(use_dma_ppb, bool, S_IRUGO); | 
|  | 179 | MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode"); | 
|  | 180 | module_param(use_dma_ppb_du, bool, S_IRUGO); | 
|  | 181 | MODULE_PARM_DESC(use_dma_ppb_du, | 
|  | 182 | "true for DMA in packet per buffer mode with descriptor update"); | 
|  | 183 | module_param(use_fullspeed, bool, S_IRUGO); | 
|  | 184 | MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); | 
|  | 185 |  | 
|  | 186 | /*---------------------------------------------------------------------------*/ | 
|  | 187 | /* Prints UDC device registers and endpoint irq registers */ | 
|  | 188 | static void print_regs(struct udc *dev) | 
|  | 189 | { | 
|  | 190 | DBG(dev, "------- Device registers -------\n"); | 
|  | 191 | DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg)); | 
|  | 192 | DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl)); | 
|  | 193 | DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts)); | 
|  | 194 | DBG(dev, "\n"); | 
|  | 195 | DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts)); | 
|  | 196 | DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk)); | 
|  | 197 | DBG(dev, "\n"); | 
|  | 198 | DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts)); | 
|  | 199 | DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); | 
|  | 200 | DBG(dev, "\n"); | 
|  | 201 | DBG(dev, "USE DMA        = %d\n", use_dma); | 
|  | 202 | if (use_dma && use_dma_ppb && !use_dma_ppb_du) { | 
|  | 203 | DBG(dev, "DMA mode       = PPBNDU (packet per buffer " | 
|  | 204 | "WITHOUT desc. update)\n"); | 
|  | 205 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); | 
|  | 206 | } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) { | 
|  | 207 | DBG(dev, "DMA mode       = PPBDU (packet per buffer " | 
|  | 208 | "WITH desc. update)\n"); | 
|  | 209 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); | 
|  | 210 | } | 
|  | 211 | if (use_dma && use_dma_bufferfill_mode) { | 
|  | 212 | DBG(dev, "DMA mode       = BF (buffer fill mode)\n"); | 
|  | 213 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF"); | 
|  | 214 | } | 
|  | 215 | if (!use_dma) { | 
|  | 216 | dev_info(&dev->pdev->dev, "FIFO mode\n"); | 
|  | 217 | } | 
|  | 218 | DBG(dev, "-------------------------------------------------------\n"); | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | /* Masks unused interrupts */ | 
|  | 222 | static int udc_mask_unused_interrupts(struct udc *dev) | 
|  | 223 | { | 
|  | 224 | u32 tmp; | 
|  | 225 |  | 
|  | 226 | /* mask all dev interrupts */ | 
|  | 227 | tmp =	AMD_BIT(UDC_DEVINT_SVC) | | 
|  | 228 | AMD_BIT(UDC_DEVINT_ENUM) | | 
|  | 229 | AMD_BIT(UDC_DEVINT_US) | | 
|  | 230 | AMD_BIT(UDC_DEVINT_UR) | | 
|  | 231 | AMD_BIT(UDC_DEVINT_ES) | | 
|  | 232 | AMD_BIT(UDC_DEVINT_SI) | | 
|  | 233 | AMD_BIT(UDC_DEVINT_SOF)| | 
|  | 234 | AMD_BIT(UDC_DEVINT_SC); | 
|  | 235 | writel(tmp, &dev->regs->irqmsk); | 
|  | 236 |  | 
|  | 237 | /* mask all ep interrupts */ | 
|  | 238 | writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk); | 
|  | 239 |  | 
|  | 240 | return 0; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | /* Enables endpoint 0 interrupts */ | 
|  | 244 | static int udc_enable_ep0_interrupts(struct udc *dev) | 
|  | 245 | { | 
|  | 246 | u32 tmp; | 
|  | 247 |  | 
|  | 248 | DBG(dev, "udc_enable_ep0_interrupts()\n"); | 
|  | 249 |  | 
|  | 250 | /* read irq mask */ | 
|  | 251 | tmp = readl(&dev->regs->ep_irqmsk); | 
|  | 252 | /* enable ep0 irq's */ | 
|  | 253 | tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0) | 
|  | 254 | & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0); | 
|  | 255 | writel(tmp, &dev->regs->ep_irqmsk); | 
|  | 256 |  | 
|  | 257 | return 0; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | /* Enables device interrupts for SET_INTF and SET_CONFIG */ | 
|  | 261 | static int udc_enable_dev_setup_interrupts(struct udc *dev) | 
|  | 262 | { | 
|  | 263 | u32 tmp; | 
|  | 264 |  | 
|  | 265 | DBG(dev, "enable device interrupts for setup data\n"); | 
|  | 266 |  | 
|  | 267 | /* read irq mask */ | 
|  | 268 | tmp = readl(&dev->regs->irqmsk); | 
|  | 269 |  | 
|  | 270 | /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */ | 
|  | 271 | tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI) | 
|  | 272 | & AMD_UNMASK_BIT(UDC_DEVINT_SC) | 
|  | 273 | & AMD_UNMASK_BIT(UDC_DEVINT_UR) | 
|  | 274 | & AMD_UNMASK_BIT(UDC_DEVINT_SVC) | 
|  | 275 | & AMD_UNMASK_BIT(UDC_DEVINT_ENUM); | 
|  | 276 | writel(tmp, &dev->regs->irqmsk); | 
|  | 277 |  | 
|  | 278 | return 0; | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | /* Calculates fifo start of endpoint based on preceeding endpoints */ | 
|  | 282 | static int udc_set_txfifo_addr(struct udc_ep *ep) | 
|  | 283 | { | 
|  | 284 | struct udc	*dev; | 
|  | 285 | u32 tmp; | 
|  | 286 | int i; | 
|  | 287 |  | 
|  | 288 | if (!ep || !(ep->in)) | 
|  | 289 | return -EINVAL; | 
|  | 290 |  | 
|  | 291 | dev = ep->dev; | 
|  | 292 | ep->txfifo = dev->txfifo; | 
|  | 293 |  | 
|  | 294 | /* traverse ep's */ | 
|  | 295 | for (i = 0; i < ep->num; i++) { | 
|  | 296 | if (dev->ep[i].regs) { | 
|  | 297 | /* read fifo size */ | 
|  | 298 | tmp = readl(&dev->ep[i].regs->bufin_framenum); | 
|  | 299 | tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE); | 
|  | 300 | ep->txfifo += tmp; | 
|  | 301 | } | 
|  | 302 | } | 
|  | 303 | return 0; | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */ | 
|  | 307 | static u32 cnak_pending; | 
|  | 308 |  | 
|  | 309 | static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) | 
|  | 310 | { | 
|  | 311 | if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { | 
|  | 312 | DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); | 
|  | 313 | cnak_pending |= 1 << (num); | 
|  | 314 | ep->naking = 1; | 
|  | 315 | } else | 
|  | 316 | cnak_pending = cnak_pending & (~(1 << (num))); | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 |  | 
|  | 320 | /* Enables endpoint, is called by gadget driver */ | 
|  | 321 | static int | 
|  | 322 | udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc) | 
|  | 323 | { | 
|  | 324 | struct udc_ep		*ep; | 
|  | 325 | struct udc		*dev; | 
|  | 326 | u32			tmp; | 
|  | 327 | unsigned long		iflags; | 
|  | 328 | u8 udc_csr_epix; | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 329 | unsigned		maxpacket; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 330 |  | 
|  | 331 | if (!usbep | 
|  | 332 | || usbep->name == ep0_string | 
|  | 333 | || !desc | 
|  | 334 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 
|  | 335 | return -EINVAL; | 
|  | 336 |  | 
|  | 337 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 338 | dev = ep->dev; | 
|  | 339 |  | 
|  | 340 | DBG(dev, "udc_ep_enable() ep %d\n", ep->num); | 
|  | 341 |  | 
|  | 342 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | 
|  | 343 | return -ESHUTDOWN; | 
|  | 344 |  | 
|  | 345 | spin_lock_irqsave(&dev->lock, iflags); | 
|  | 346 | ep->desc = desc; | 
|  | 347 |  | 
|  | 348 | ep->halted = 0; | 
|  | 349 |  | 
|  | 350 | /* set traffic type */ | 
|  | 351 | tmp = readl(&dev->ep[ep->num].regs->ctl); | 
|  | 352 | tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET); | 
|  | 353 | writel(tmp, &dev->ep[ep->num].regs->ctl); | 
|  | 354 |  | 
|  | 355 | /* set max packet size */ | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 356 | maxpacket = le16_to_cpu(desc->wMaxPacketSize); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 357 | tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 358 | tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE); | 
|  | 359 | ep->ep.maxpacket = maxpacket; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 360 | writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); | 
|  | 361 |  | 
|  | 362 | /* IN ep */ | 
|  | 363 | if (ep->in) { | 
|  | 364 |  | 
|  | 365 | /* ep ix in UDC CSR register space */ | 
|  | 366 | udc_csr_epix = ep->num; | 
|  | 367 |  | 
|  | 368 | /* set buffer size (tx fifo entries) */ | 
|  | 369 | tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); | 
|  | 370 | /* double buffering: fifo size = 2 x max packet size */ | 
|  | 371 | tmp = AMD_ADDBITS( | 
|  | 372 | tmp, | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 373 | maxpacket * UDC_EPIN_BUFF_SIZE_MULT | 
|  | 374 | / UDC_DWORD_BYTES, | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 375 | UDC_EPIN_BUFF_SIZE); | 
|  | 376 | writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); | 
|  | 377 |  | 
|  | 378 | /* calc. tx fifo base addr */ | 
|  | 379 | udc_set_txfifo_addr(ep); | 
|  | 380 |  | 
|  | 381 | /* flush fifo */ | 
|  | 382 | tmp = readl(&ep->regs->ctl); | 
|  | 383 | tmp |= AMD_BIT(UDC_EPCTL_F); | 
|  | 384 | writel(tmp, &ep->regs->ctl); | 
|  | 385 |  | 
|  | 386 | /* OUT ep */ | 
|  | 387 | } else { | 
|  | 388 | /* ep ix in UDC CSR register space */ | 
|  | 389 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | 
|  | 390 |  | 
|  | 391 | /* set max packet size UDC CSR	*/ | 
|  | 392 | tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 393 | tmp = AMD_ADDBITS(tmp, maxpacket, | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 394 | UDC_CSR_NE_MAX_PKT); | 
|  | 395 | writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); | 
|  | 396 |  | 
|  | 397 | if (use_dma && !ep->in) { | 
|  | 398 | /* alloc and init BNA dummy request */ | 
|  | 399 | ep->bna_dummy_req = udc_alloc_bna_dummy(ep); | 
|  | 400 | ep->bna_occurred = 0; | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 | if (ep->num != UDC_EP0OUT_IX) | 
|  | 404 | dev->data_ep_enabled = 1; | 
|  | 405 | } | 
|  | 406 |  | 
|  | 407 | /* set ep values */ | 
|  | 408 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | 
|  | 409 | /* max packet */ | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 410 | tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 411 | /* ep number */ | 
|  | 412 | tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM); | 
|  | 413 | /* ep direction */ | 
|  | 414 | tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); | 
|  | 415 | /* ep type */ | 
|  | 416 | tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE); | 
|  | 417 | /* ep config */ | 
|  | 418 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); | 
|  | 419 | /* ep interface */ | 
|  | 420 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); | 
|  | 421 | /* ep alt */ | 
|  | 422 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); | 
|  | 423 | /* write reg */ | 
|  | 424 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | 
|  | 425 |  | 
|  | 426 | /* enable ep irq */ | 
|  | 427 | tmp = readl(&dev->regs->ep_irqmsk); | 
|  | 428 | tmp &= AMD_UNMASK_BIT(ep->num); | 
|  | 429 | writel(tmp, &dev->regs->ep_irqmsk); | 
|  | 430 |  | 
|  | 431 | /* | 
|  | 432 | * clear NAK by writing CNAK | 
|  | 433 | * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written | 
|  | 434 | */ | 
|  | 435 | if (!use_dma || ep->in) { | 
|  | 436 | tmp = readl(&ep->regs->ctl); | 
|  | 437 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 438 | writel(tmp, &ep->regs->ctl); | 
|  | 439 | ep->naking = 0; | 
|  | 440 | UDC_QUEUE_CNAK(ep, ep->num); | 
|  | 441 | } | 
|  | 442 | tmp = desc->bEndpointAddress; | 
|  | 443 | DBG(dev, "%s enabled\n", usbep->name); | 
|  | 444 |  | 
|  | 445 | spin_unlock_irqrestore(&dev->lock, iflags); | 
|  | 446 | return 0; | 
|  | 447 | } | 
|  | 448 |  | 
|  | 449 | /* Resets endpoint */ | 
|  | 450 | static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) | 
|  | 451 | { | 
|  | 452 | u32		tmp; | 
|  | 453 |  | 
|  | 454 | VDBG(ep->dev, "ep-%d reset\n", ep->num); | 
|  | 455 | ep->desc = NULL; | 
|  | 456 | ep->ep.ops = &udc_ep_ops; | 
|  | 457 | INIT_LIST_HEAD(&ep->queue); | 
|  | 458 |  | 
|  | 459 | ep->ep.maxpacket = (u16) ~0; | 
|  | 460 | /* set NAK */ | 
|  | 461 | tmp = readl(&ep->regs->ctl); | 
|  | 462 | tmp |= AMD_BIT(UDC_EPCTL_SNAK); | 
|  | 463 | writel(tmp, &ep->regs->ctl); | 
|  | 464 | ep->naking = 1; | 
|  | 465 |  | 
|  | 466 | /* disable interrupt */ | 
|  | 467 | tmp = readl(®s->ep_irqmsk); | 
|  | 468 | tmp |= AMD_BIT(ep->num); | 
|  | 469 | writel(tmp, ®s->ep_irqmsk); | 
|  | 470 |  | 
|  | 471 | if (ep->in) { | 
|  | 472 | /* unset P and IN bit of potential former DMA */ | 
|  | 473 | tmp = readl(&ep->regs->ctl); | 
|  | 474 | tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P); | 
|  | 475 | writel(tmp, &ep->regs->ctl); | 
|  | 476 |  | 
|  | 477 | tmp = readl(&ep->regs->sts); | 
|  | 478 | tmp |= AMD_BIT(UDC_EPSTS_IN); | 
|  | 479 | writel(tmp, &ep->regs->sts); | 
|  | 480 |  | 
|  | 481 | /* flush the fifo */ | 
|  | 482 | tmp = readl(&ep->regs->ctl); | 
|  | 483 | tmp |= AMD_BIT(UDC_EPCTL_F); | 
|  | 484 | writel(tmp, &ep->regs->ctl); | 
|  | 485 |  | 
|  | 486 | } | 
|  | 487 | /* reset desc pointer */ | 
|  | 488 | writel(0, &ep->regs->desptr); | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | /* Disables endpoint, is called by gadget driver */ | 
|  | 492 | static int udc_ep_disable(struct usb_ep *usbep) | 
|  | 493 | { | 
|  | 494 | struct udc_ep	*ep = NULL; | 
|  | 495 | unsigned long	iflags; | 
|  | 496 |  | 
|  | 497 | if (!usbep) | 
|  | 498 | return -EINVAL; | 
|  | 499 |  | 
|  | 500 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 501 | if (usbep->name == ep0_string || !ep->desc) | 
|  | 502 | return -EINVAL; | 
|  | 503 |  | 
|  | 504 | DBG(ep->dev, "Disable ep-%d\n", ep->num); | 
|  | 505 |  | 
|  | 506 | spin_lock_irqsave(&ep->dev->lock, iflags); | 
|  | 507 | udc_free_request(&ep->ep, &ep->bna_dummy_req->req); | 
|  | 508 | empty_req_queue(ep); | 
|  | 509 | ep_init(ep->dev->regs, ep); | 
|  | 510 | spin_unlock_irqrestore(&ep->dev->lock, iflags); | 
|  | 511 |  | 
|  | 512 | return 0; | 
|  | 513 | } | 
|  | 514 |  | 
|  | 515 | /* Allocates request packet, called by gadget driver */ | 
|  | 516 | static struct usb_request * | 
|  | 517 | udc_alloc_request(struct usb_ep *usbep, gfp_t gfp) | 
|  | 518 | { | 
|  | 519 | struct udc_request	*req; | 
|  | 520 | struct udc_data_dma	*dma_desc; | 
|  | 521 | struct udc_ep	*ep; | 
|  | 522 |  | 
|  | 523 | if (!usbep) | 
|  | 524 | return NULL; | 
|  | 525 |  | 
|  | 526 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 527 |  | 
|  | 528 | VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); | 
|  | 529 | req = kzalloc(sizeof(struct udc_request), gfp); | 
|  | 530 | if (!req) | 
|  | 531 | return NULL; | 
|  | 532 |  | 
|  | 533 | req->req.dma = DMA_DONT_USE; | 
|  | 534 | INIT_LIST_HEAD(&req->queue); | 
|  | 535 |  | 
|  | 536 | if (ep->dma) { | 
|  | 537 | /* ep0 in requests are allocated from data pool here */ | 
|  | 538 | dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, | 
|  | 539 | &req->td_phys); | 
|  | 540 | if (!dma_desc) { | 
|  | 541 | kfree(req); | 
|  | 542 | return NULL; | 
|  | 543 | } | 
|  | 544 |  | 
|  | 545 | VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " | 
|  | 546 | "td_phys = %lx\n", | 
|  | 547 | req, dma_desc, | 
|  | 548 | (unsigned long)req->td_phys); | 
|  | 549 | /* prevent from using desc. - set HOST BUSY */ | 
|  | 550 | dma_desc->status = AMD_ADDBITS(dma_desc->status, | 
|  | 551 | UDC_DMA_STP_STS_BS_HOST_BUSY, | 
|  | 552 | UDC_DMA_STP_STS_BS); | 
| Harvey Harrison | 551509d | 2009-02-11 14:11:36 -0800 | [diff] [blame] | 553 | dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 554 | req->td_data = dma_desc; | 
|  | 555 | req->td_data_last = NULL; | 
|  | 556 | req->chain_len = 1; | 
|  | 557 | } | 
|  | 558 |  | 
|  | 559 | return &req->req; | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | /* Frees request packet, called by gadget driver */ | 
|  | 563 | static void | 
|  | 564 | udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) | 
|  | 565 | { | 
|  | 566 | struct udc_ep	*ep; | 
|  | 567 | struct udc_request	*req; | 
|  | 568 |  | 
|  | 569 | if (!usbep || !usbreq) | 
|  | 570 | return; | 
|  | 571 |  | 
|  | 572 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 573 | req = container_of(usbreq, struct udc_request, req); | 
|  | 574 | VDBG(ep->dev, "free_req req=%p\n", req); | 
|  | 575 | BUG_ON(!list_empty(&req->queue)); | 
|  | 576 | if (req->td_data) { | 
|  | 577 | VDBG(ep->dev, "req->td_data=%p\n", req->td_data); | 
|  | 578 |  | 
|  | 579 | /* free dma chain if created */ | 
|  | 580 | if (req->chain_len > 1) { | 
|  | 581 | udc_free_dma_chain(ep->dev, req); | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | pci_pool_free(ep->dev->data_requests, req->td_data, | 
|  | 585 | req->td_phys); | 
|  | 586 | } | 
|  | 587 | kfree(req); | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */ | 
|  | 591 | static void udc_init_bna_dummy(struct udc_request *req) | 
|  | 592 | { | 
|  | 593 | if (req) { | 
|  | 594 | /* set last bit */ | 
|  | 595 | req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); | 
|  | 596 | /* set next pointer to itself */ | 
|  | 597 | req->td_data->next = req->td_phys; | 
|  | 598 | /* set HOST BUSY */ | 
|  | 599 | req->td_data->status | 
|  | 600 | = AMD_ADDBITS(req->td_data->status, | 
|  | 601 | UDC_DMA_STP_STS_BS_DMA_DONE, | 
|  | 602 | UDC_DMA_STP_STS_BS); | 
|  | 603 | #ifdef UDC_VERBOSE | 
|  | 604 | pr_debug("bna desc = %p, sts = %08x\n", | 
|  | 605 | req->td_data, req->td_data->status); | 
|  | 606 | #endif | 
|  | 607 | } | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | /* Allocate BNA dummy descriptor */ | 
|  | 611 | static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) | 
|  | 612 | { | 
|  | 613 | struct udc_request *req = NULL; | 
|  | 614 | struct usb_request *_req = NULL; | 
|  | 615 |  | 
|  | 616 | /* alloc the dummy request */ | 
|  | 617 | _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); | 
|  | 618 | if (_req) { | 
|  | 619 | req = container_of(_req, struct udc_request, req); | 
|  | 620 | ep->bna_dummy_req = req; | 
|  | 621 | udc_init_bna_dummy(req); | 
|  | 622 | } | 
|  | 623 | return req; | 
|  | 624 | } | 
|  | 625 |  | 
|  | 626 | /* Write data to TX fifo for IN packets */ | 
|  | 627 | static void | 
|  | 628 | udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) | 
|  | 629 | { | 
|  | 630 | u8			*req_buf; | 
|  | 631 | u32			*buf; | 
|  | 632 | int			i, j; | 
|  | 633 | unsigned		bytes = 0; | 
|  | 634 | unsigned		remaining = 0; | 
|  | 635 |  | 
|  | 636 | if (!req || !ep) | 
|  | 637 | return; | 
|  | 638 |  | 
|  | 639 | req_buf = req->buf + req->actual; | 
|  | 640 | prefetch(req_buf); | 
|  | 641 | remaining = req->length - req->actual; | 
|  | 642 |  | 
|  | 643 | buf = (u32 *) req_buf; | 
|  | 644 |  | 
|  | 645 | bytes = ep->ep.maxpacket; | 
|  | 646 | if (bytes > remaining) | 
|  | 647 | bytes = remaining; | 
|  | 648 |  | 
|  | 649 | /* dwords first */ | 
|  | 650 | for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { | 
|  | 651 | writel(*(buf + i), ep->txfifo); | 
|  | 652 | } | 
|  | 653 |  | 
|  | 654 | /* remaining bytes must be written by byte access */ | 
|  | 655 | for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { | 
|  | 656 | writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)), | 
|  | 657 | ep->txfifo); | 
|  | 658 | } | 
|  | 659 |  | 
|  | 660 | /* dummy write confirm */ | 
|  | 661 | writel(0, &ep->regs->confirm); | 
|  | 662 | } | 
|  | 663 |  | 
|  | 664 | /* Read dwords from RX fifo for OUT transfers */ | 
|  | 665 | static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords) | 
|  | 666 | { | 
|  | 667 | int i; | 
|  | 668 |  | 
|  | 669 | VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords); | 
|  | 670 |  | 
|  | 671 | for (i = 0; i < dwords; i++) { | 
|  | 672 | *(buf + i) = readl(dev->rxfifo); | 
|  | 673 | } | 
|  | 674 | return 0; | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | /* Read bytes from RX fifo for OUT transfers */ | 
|  | 678 | static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes) | 
|  | 679 | { | 
|  | 680 | int i, j; | 
|  | 681 | u32 tmp; | 
|  | 682 |  | 
|  | 683 | VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes); | 
|  | 684 |  | 
|  | 685 | /* dwords first */ | 
|  | 686 | for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { | 
|  | 687 | *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo); | 
|  | 688 | } | 
|  | 689 |  | 
|  | 690 | /* remaining bytes must be read by byte access */ | 
|  | 691 | if (bytes % UDC_DWORD_BYTES) { | 
|  | 692 | tmp = readl(dev->rxfifo); | 
|  | 693 | for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { | 
|  | 694 | *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK); | 
|  | 695 | tmp = tmp >> UDC_BITS_PER_BYTE; | 
|  | 696 | } | 
|  | 697 | } | 
|  | 698 |  | 
|  | 699 | return 0; | 
|  | 700 | } | 
|  | 701 |  | 
|  | 702 | /* Read data from RX fifo for OUT transfers */ | 
|  | 703 | static int | 
|  | 704 | udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) | 
|  | 705 | { | 
|  | 706 | u8 *buf; | 
|  | 707 | unsigned buf_space; | 
|  | 708 | unsigned bytes = 0; | 
|  | 709 | unsigned finished = 0; | 
|  | 710 |  | 
|  | 711 | /* received number bytes */ | 
|  | 712 | bytes = readl(&ep->regs->sts); | 
|  | 713 | bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE); | 
|  | 714 |  | 
|  | 715 | buf_space = req->req.length - req->req.actual; | 
|  | 716 | buf = req->req.buf + req->req.actual; | 
|  | 717 | if (bytes > buf_space) { | 
|  | 718 | if ((buf_space % ep->ep.maxpacket) != 0) { | 
|  | 719 | DBG(ep->dev, | 
|  | 720 | "%s: rx %d bytes, rx-buf space = %d bytesn\n", | 
|  | 721 | ep->ep.name, bytes, buf_space); | 
|  | 722 | req->req.status = -EOVERFLOW; | 
|  | 723 | } | 
|  | 724 | bytes = buf_space; | 
|  | 725 | } | 
|  | 726 | req->req.actual += bytes; | 
|  | 727 |  | 
|  | 728 | /* last packet ? */ | 
|  | 729 | if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) | 
|  | 730 | || ((req->req.actual == req->req.length) && !req->req.zero)) | 
|  | 731 | finished = 1; | 
|  | 732 |  | 
|  | 733 | /* read rx fifo bytes */ | 
|  | 734 | VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); | 
|  | 735 | udc_rxfifo_read_bytes(ep->dev, buf, bytes); | 
|  | 736 |  | 
|  | 737 | return finished; | 
|  | 738 | } | 
|  | 739 |  | 
|  | 740 | /* create/re-init a DMA descriptor or a DMA descriptor chain */ | 
|  | 741 | static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) | 
|  | 742 | { | 
|  | 743 | int	retval = 0; | 
|  | 744 | u32	tmp; | 
|  | 745 |  | 
|  | 746 | VDBG(ep->dev, "prep_dma\n"); | 
|  | 747 | VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", | 
|  | 748 | ep->num, req->td_data); | 
|  | 749 |  | 
|  | 750 | /* set buffer pointer */ | 
|  | 751 | req->td_data->bufptr = req->req.dma; | 
|  | 752 |  | 
|  | 753 | /* set last bit */ | 
|  | 754 | req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); | 
|  | 755 |  | 
|  | 756 | /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */ | 
|  | 757 | if (use_dma_ppb) { | 
|  | 758 |  | 
|  | 759 | retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); | 
|  | 760 | if (retval != 0) { | 
|  | 761 | if (retval == -ENOMEM) | 
|  | 762 | DBG(ep->dev, "Out of DMA memory\n"); | 
|  | 763 | return retval; | 
|  | 764 | } | 
|  | 765 | if (ep->in) { | 
|  | 766 | if (req->req.length == ep->ep.maxpacket) { | 
|  | 767 | /* write tx bytes */ | 
|  | 768 | req->td_data->status = | 
|  | 769 | AMD_ADDBITS(req->td_data->status, | 
|  | 770 | ep->ep.maxpacket, | 
|  | 771 | UDC_DMA_IN_STS_TXBYTES); | 
|  | 772 |  | 
|  | 773 | } | 
|  | 774 | } | 
|  | 775 |  | 
|  | 776 | } | 
|  | 777 |  | 
|  | 778 | if (ep->in) { | 
|  | 779 | VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " | 
|  | 780 | "maxpacket=%d ep%d\n", | 
|  | 781 | use_dma_ppb, req->req.length, | 
|  | 782 | ep->ep.maxpacket, ep->num); | 
|  | 783 | /* | 
|  | 784 | * if bytes < max packet then tx bytes must | 
|  | 785 | * be written in packet per buffer mode | 
|  | 786 | */ | 
|  | 787 | if (!use_dma_ppb || req->req.length < ep->ep.maxpacket | 
|  | 788 | || ep->num == UDC_EP0OUT_IX | 
|  | 789 | || ep->num == UDC_EP0IN_IX) { | 
|  | 790 | /* write tx bytes */ | 
|  | 791 | req->td_data->status = | 
|  | 792 | AMD_ADDBITS(req->td_data->status, | 
|  | 793 | req->req.length, | 
|  | 794 | UDC_DMA_IN_STS_TXBYTES); | 
|  | 795 | /* reset frame num */ | 
|  | 796 | req->td_data->status = | 
|  | 797 | AMD_ADDBITS(req->td_data->status, | 
|  | 798 | 0, | 
|  | 799 | UDC_DMA_IN_STS_FRAMENUM); | 
|  | 800 | } | 
|  | 801 | /* set HOST BUSY */ | 
|  | 802 | req->td_data->status = | 
|  | 803 | AMD_ADDBITS(req->td_data->status, | 
|  | 804 | UDC_DMA_STP_STS_BS_HOST_BUSY, | 
|  | 805 | UDC_DMA_STP_STS_BS); | 
|  | 806 | } else { | 
|  | 807 | VDBG(ep->dev, "OUT set host ready\n"); | 
|  | 808 | /* set HOST READY */ | 
|  | 809 | req->td_data->status = | 
|  | 810 | AMD_ADDBITS(req->td_data->status, | 
|  | 811 | UDC_DMA_STP_STS_BS_HOST_READY, | 
|  | 812 | UDC_DMA_STP_STS_BS); | 
|  | 813 |  | 
|  | 814 |  | 
|  | 815 | /* clear NAK by writing CNAK */ | 
|  | 816 | if (ep->naking) { | 
|  | 817 | tmp = readl(&ep->regs->ctl); | 
|  | 818 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 819 | writel(tmp, &ep->regs->ctl); | 
|  | 820 | ep->naking = 0; | 
|  | 821 | UDC_QUEUE_CNAK(ep, ep->num); | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | } | 
|  | 825 |  | 
|  | 826 | return retval; | 
|  | 827 | } | 
|  | 828 |  | 
|  | 829 | /* Completes request packet ... caller MUST hold lock */ | 
|  | 830 | static void | 
|  | 831 | complete_req(struct udc_ep *ep, struct udc_request *req, int sts) | 
|  | 832 | __releases(ep->dev->lock) | 
|  | 833 | __acquires(ep->dev->lock) | 
|  | 834 | { | 
|  | 835 | struct udc		*dev; | 
|  | 836 | unsigned		halted; | 
|  | 837 |  | 
|  | 838 | VDBG(ep->dev, "complete_req(): ep%d\n", ep->num); | 
|  | 839 |  | 
|  | 840 | dev = ep->dev; | 
|  | 841 | /* unmap DMA */ | 
|  | 842 | if (req->dma_mapping) { | 
|  | 843 | if (ep->in) | 
|  | 844 | pci_unmap_single(dev->pdev, | 
|  | 845 | req->req.dma, | 
|  | 846 | req->req.length, | 
|  | 847 | PCI_DMA_TODEVICE); | 
|  | 848 | else | 
|  | 849 | pci_unmap_single(dev->pdev, | 
|  | 850 | req->req.dma, | 
|  | 851 | req->req.length, | 
|  | 852 | PCI_DMA_FROMDEVICE); | 
|  | 853 | req->dma_mapping = 0; | 
|  | 854 | req->req.dma = DMA_DONT_USE; | 
|  | 855 | } | 
|  | 856 |  | 
|  | 857 | halted = ep->halted; | 
|  | 858 | ep->halted = 1; | 
|  | 859 |  | 
|  | 860 | /* set new status if pending */ | 
|  | 861 | if (req->req.status == -EINPROGRESS) | 
|  | 862 | req->req.status = sts; | 
|  | 863 |  | 
|  | 864 | /* remove from ep queue */ | 
|  | 865 | list_del_init(&req->queue); | 
|  | 866 |  | 
|  | 867 | VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n", | 
|  | 868 | &req->req, req->req.length, ep->ep.name, sts); | 
|  | 869 |  | 
|  | 870 | spin_unlock(&dev->lock); | 
|  | 871 | req->req.complete(&ep->ep, &req->req); | 
|  | 872 | spin_lock(&dev->lock); | 
|  | 873 | ep->halted = halted; | 
|  | 874 | } | 
|  | 875 |  | 
|  | 876 | /* frees pci pool descriptors of a DMA chain */ | 
|  | 877 | static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) | 
|  | 878 | { | 
|  | 879 |  | 
|  | 880 | int ret_val = 0; | 
|  | 881 | struct udc_data_dma	*td; | 
|  | 882 | struct udc_data_dma	*td_last = NULL; | 
|  | 883 | unsigned int i; | 
|  | 884 |  | 
|  | 885 | DBG(dev, "free chain req = %p\n", req); | 
|  | 886 |  | 
|  | 887 | /* do not free first desc., will be done by free for request */ | 
|  | 888 | td_last = req->td_data; | 
|  | 889 | td = phys_to_virt(td_last->next); | 
|  | 890 |  | 
|  | 891 | for (i = 1; i < req->chain_len; i++) { | 
|  | 892 |  | 
|  | 893 | pci_pool_free(dev->data_requests, td, | 
|  | 894 | (dma_addr_t) td_last->next); | 
|  | 895 | td_last = td; | 
|  | 896 | td = phys_to_virt(td_last->next); | 
|  | 897 | } | 
|  | 898 |  | 
|  | 899 | return ret_val; | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | /* Iterates to the end of a DMA chain and returns last descriptor */ | 
|  | 903 | static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) | 
|  | 904 | { | 
|  | 905 | struct udc_data_dma	*td; | 
|  | 906 |  | 
|  | 907 | td = req->td_data; | 
|  | 908 | while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { | 
|  | 909 | td = phys_to_virt(td->next); | 
|  | 910 | } | 
|  | 911 |  | 
|  | 912 | return td; | 
|  | 913 |  | 
|  | 914 | } | 
|  | 915 |  | 
|  | 916 | /* Iterates to the end of a DMA chain and counts bytes received */ | 
|  | 917 | static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) | 
|  | 918 | { | 
|  | 919 | struct udc_data_dma	*td; | 
|  | 920 | u32 count; | 
|  | 921 |  | 
|  | 922 | td = req->td_data; | 
|  | 923 | /* received number bytes */ | 
|  | 924 | count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); | 
|  | 925 |  | 
|  | 926 | while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { | 
|  | 927 | td = phys_to_virt(td->next); | 
|  | 928 | /* received number bytes */ | 
|  | 929 | if (td) { | 
|  | 930 | count += AMD_GETBITS(td->status, | 
|  | 931 | UDC_DMA_OUT_STS_RXBYTES); | 
|  | 932 | } | 
|  | 933 | } | 
|  | 934 |  | 
|  | 935 | return count; | 
|  | 936 |  | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | /* Creates or re-inits a DMA chain */ | 
|  | 940 | static int udc_create_dma_chain( | 
|  | 941 | struct udc_ep *ep, | 
|  | 942 | struct udc_request *req, | 
|  | 943 | unsigned long buf_len, gfp_t gfp_flags | 
|  | 944 | ) | 
|  | 945 | { | 
|  | 946 | unsigned long bytes = req->req.length; | 
|  | 947 | unsigned int i; | 
|  | 948 | dma_addr_t dma_addr; | 
|  | 949 | struct udc_data_dma	*td = NULL; | 
|  | 950 | struct udc_data_dma	*last = NULL; | 
|  | 951 | unsigned long txbytes; | 
|  | 952 | unsigned create_new_chain = 0; | 
|  | 953 | unsigned len; | 
|  | 954 |  | 
|  | 955 | VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", | 
|  | 956 | bytes, buf_len); | 
|  | 957 | dma_addr = DMA_DONT_USE; | 
|  | 958 |  | 
|  | 959 | /* unset L bit in first desc for OUT */ | 
|  | 960 | if (!ep->in) { | 
|  | 961 | req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); | 
|  | 962 | } | 
|  | 963 |  | 
|  | 964 | /* alloc only new desc's if not already available */ | 
|  | 965 | len = req->req.length / ep->ep.maxpacket; | 
|  | 966 | if (req->req.length % ep->ep.maxpacket) { | 
|  | 967 | len++; | 
|  | 968 | } | 
|  | 969 |  | 
|  | 970 | if (len > req->chain_len) { | 
|  | 971 | /* shorter chain already allocated before */ | 
|  | 972 | if (req->chain_len > 1) { | 
|  | 973 | udc_free_dma_chain(ep->dev, req); | 
|  | 974 | } | 
|  | 975 | req->chain_len = len; | 
|  | 976 | create_new_chain = 1; | 
|  | 977 | } | 
|  | 978 |  | 
|  | 979 | td = req->td_data; | 
|  | 980 | /* gen. required number of descriptors and buffers */ | 
|  | 981 | for (i = buf_len; i < bytes; i += buf_len) { | 
|  | 982 | /* create or determine next desc. */ | 
|  | 983 | if (create_new_chain) { | 
|  | 984 |  | 
|  | 985 | td = pci_pool_alloc(ep->dev->data_requests, | 
|  | 986 | gfp_flags, &dma_addr); | 
|  | 987 | if (!td) | 
|  | 988 | return -ENOMEM; | 
|  | 989 |  | 
|  | 990 | td->status = 0; | 
|  | 991 | } else if (i == buf_len) { | 
|  | 992 | /* first td */ | 
|  | 993 | td = (struct udc_data_dma *) phys_to_virt( | 
|  | 994 | req->td_data->next); | 
|  | 995 | td->status = 0; | 
|  | 996 | } else { | 
|  | 997 | td = (struct udc_data_dma *) phys_to_virt(last->next); | 
|  | 998 | td->status = 0; | 
|  | 999 | } | 
|  | 1000 |  | 
|  | 1001 |  | 
|  | 1002 | if (td) | 
|  | 1003 | td->bufptr = req->req.dma + i; /* assign buffer */ | 
|  | 1004 | else | 
|  | 1005 | break; | 
|  | 1006 |  | 
|  | 1007 | /* short packet ? */ | 
|  | 1008 | if ((bytes - i) >= buf_len) { | 
|  | 1009 | txbytes = buf_len; | 
|  | 1010 | } else { | 
|  | 1011 | /* short packet */ | 
|  | 1012 | txbytes = bytes - i; | 
|  | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | /* link td and assign tx bytes */ | 
|  | 1016 | if (i == buf_len) { | 
|  | 1017 | if (create_new_chain) { | 
|  | 1018 | req->td_data->next = dma_addr; | 
|  | 1019 | } else { | 
|  | 1020 | /* req->td_data->next = virt_to_phys(td); */ | 
|  | 1021 | } | 
|  | 1022 | /* write tx bytes */ | 
|  | 1023 | if (ep->in) { | 
|  | 1024 | /* first desc */ | 
|  | 1025 | req->td_data->status = | 
|  | 1026 | AMD_ADDBITS(req->td_data->status, | 
|  | 1027 | ep->ep.maxpacket, | 
|  | 1028 | UDC_DMA_IN_STS_TXBYTES); | 
|  | 1029 | /* second desc */ | 
|  | 1030 | td->status = AMD_ADDBITS(td->status, | 
|  | 1031 | txbytes, | 
|  | 1032 | UDC_DMA_IN_STS_TXBYTES); | 
|  | 1033 | } | 
|  | 1034 | } else { | 
|  | 1035 | if (create_new_chain) { | 
|  | 1036 | last->next = dma_addr; | 
|  | 1037 | } else { | 
|  | 1038 | /* last->next = virt_to_phys(td); */ | 
|  | 1039 | } | 
|  | 1040 | if (ep->in) { | 
|  | 1041 | /* write tx bytes */ | 
|  | 1042 | td->status = AMD_ADDBITS(td->status, | 
|  | 1043 | txbytes, | 
|  | 1044 | UDC_DMA_IN_STS_TXBYTES); | 
|  | 1045 | } | 
|  | 1046 | } | 
|  | 1047 | last = td; | 
|  | 1048 | } | 
|  | 1049 | /* set last bit */ | 
|  | 1050 | if (td) { | 
|  | 1051 | td->status |= AMD_BIT(UDC_DMA_IN_STS_L); | 
|  | 1052 | /* last desc. points to itself */ | 
|  | 1053 | req->td_data_last = td; | 
|  | 1054 | } | 
|  | 1055 |  | 
|  | 1056 | return 0; | 
|  | 1057 | } | 
|  | 1058 |  | 
|  | 1059 | /* Enabling RX DMA */ | 
|  | 1060 | static void udc_set_rde(struct udc *dev) | 
|  | 1061 | { | 
|  | 1062 | u32 tmp; | 
|  | 1063 |  | 
|  | 1064 | VDBG(dev, "udc_set_rde()\n"); | 
|  | 1065 | /* stop RDE timer */ | 
|  | 1066 | if (timer_pending(&udc_timer)) { | 
|  | 1067 | set_rde = 0; | 
|  | 1068 | mod_timer(&udc_timer, jiffies - 1); | 
|  | 1069 | } | 
|  | 1070 | /* set RDE */ | 
|  | 1071 | tmp = readl(&dev->regs->ctl); | 
|  | 1072 | tmp |= AMD_BIT(UDC_DEVCTL_RDE); | 
|  | 1073 | writel(tmp, &dev->regs->ctl); | 
|  | 1074 | } | 
|  | 1075 |  | 
|  | 1076 | /* Queues a request packet, called by gadget driver */ | 
|  | 1077 | static int | 
|  | 1078 | udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) | 
|  | 1079 | { | 
|  | 1080 | int			retval = 0; | 
|  | 1081 | u8			open_rxfifo = 0; | 
|  | 1082 | unsigned long		iflags; | 
|  | 1083 | struct udc_ep		*ep; | 
|  | 1084 | struct udc_request	*req; | 
|  | 1085 | struct udc		*dev; | 
|  | 1086 | u32			tmp; | 
|  | 1087 |  | 
|  | 1088 | /* check the inputs */ | 
|  | 1089 | req = container_of(usbreq, struct udc_request, req); | 
|  | 1090 |  | 
|  | 1091 | if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf | 
|  | 1092 | || !list_empty(&req->queue)) | 
|  | 1093 | return -EINVAL; | 
|  | 1094 |  | 
|  | 1095 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 1096 | if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) | 
|  | 1097 | return -EINVAL; | 
|  | 1098 |  | 
|  | 1099 | VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); | 
|  | 1100 | dev = ep->dev; | 
|  | 1101 |  | 
|  | 1102 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | 
|  | 1103 | return -ESHUTDOWN; | 
|  | 1104 |  | 
|  | 1105 | /* map dma (usually done before) */ | 
|  | 1106 | if (ep->dma && usbreq->length != 0 | 
|  | 1107 | && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) { | 
|  | 1108 | VDBG(dev, "DMA map req %p\n", req); | 
|  | 1109 | if (ep->in) | 
|  | 1110 | usbreq->dma = pci_map_single(dev->pdev, | 
|  | 1111 | usbreq->buf, | 
|  | 1112 | usbreq->length, | 
|  | 1113 | PCI_DMA_TODEVICE); | 
|  | 1114 | else | 
|  | 1115 | usbreq->dma = pci_map_single(dev->pdev, | 
|  | 1116 | usbreq->buf, | 
|  | 1117 | usbreq->length, | 
|  | 1118 | PCI_DMA_FROMDEVICE); | 
|  | 1119 | req->dma_mapping = 1; | 
|  | 1120 | } | 
|  | 1121 |  | 
|  | 1122 | VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", | 
|  | 1123 | usbep->name, usbreq, usbreq->length, | 
|  | 1124 | req->td_data, usbreq->buf); | 
|  | 1125 |  | 
|  | 1126 | spin_lock_irqsave(&dev->lock, iflags); | 
|  | 1127 | usbreq->actual = 0; | 
|  | 1128 | usbreq->status = -EINPROGRESS; | 
|  | 1129 | req->dma_done = 0; | 
|  | 1130 |  | 
|  | 1131 | /* on empty queue just do first transfer */ | 
|  | 1132 | if (list_empty(&ep->queue)) { | 
|  | 1133 | /* zlp */ | 
|  | 1134 | if (usbreq->length == 0) { | 
|  | 1135 | /* IN zlp's are handled by hardware */ | 
|  | 1136 | complete_req(ep, req, 0); | 
|  | 1137 | VDBG(dev, "%s: zlp\n", ep->ep.name); | 
|  | 1138 | /* | 
|  | 1139 | * if set_config or set_intf is waiting for ack by zlp | 
|  | 1140 | * then set CSR_DONE | 
|  | 1141 | */ | 
|  | 1142 | if (dev->set_cfg_not_acked) { | 
|  | 1143 | tmp = readl(&dev->regs->ctl); | 
|  | 1144 | tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE); | 
|  | 1145 | writel(tmp, &dev->regs->ctl); | 
|  | 1146 | dev->set_cfg_not_acked = 0; | 
|  | 1147 | } | 
|  | 1148 | /* setup command is ACK'ed now by zlp */ | 
|  | 1149 | if (dev->waiting_zlp_ack_ep0in) { | 
|  | 1150 | /* clear NAK by writing CNAK in EP0_IN */ | 
|  | 1151 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 1152 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 1153 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 1154 | dev->ep[UDC_EP0IN_IX].naking = 0; | 
|  | 1155 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], | 
|  | 1156 | UDC_EP0IN_IX); | 
|  | 1157 | dev->waiting_zlp_ack_ep0in = 0; | 
|  | 1158 | } | 
|  | 1159 | goto finished; | 
|  | 1160 | } | 
|  | 1161 | if (ep->dma) { | 
|  | 1162 | retval = prep_dma(ep, req, gfp); | 
|  | 1163 | if (retval != 0) | 
|  | 1164 | goto finished; | 
|  | 1165 | /* write desc pointer to enable DMA */ | 
|  | 1166 | if (ep->in) { | 
|  | 1167 | /* set HOST READY */ | 
|  | 1168 | req->td_data->status = | 
|  | 1169 | AMD_ADDBITS(req->td_data->status, | 
|  | 1170 | UDC_DMA_IN_STS_BS_HOST_READY, | 
|  | 1171 | UDC_DMA_IN_STS_BS); | 
|  | 1172 | } | 
|  | 1173 |  | 
|  | 1174 | /* disabled rx dma while descriptor update */ | 
|  | 1175 | if (!ep->in) { | 
|  | 1176 | /* stop RDE timer */ | 
|  | 1177 | if (timer_pending(&udc_timer)) { | 
|  | 1178 | set_rde = 0; | 
|  | 1179 | mod_timer(&udc_timer, jiffies - 1); | 
|  | 1180 | } | 
|  | 1181 | /* clear RDE */ | 
|  | 1182 | tmp = readl(&dev->regs->ctl); | 
|  | 1183 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); | 
|  | 1184 | writel(tmp, &dev->regs->ctl); | 
|  | 1185 | open_rxfifo = 1; | 
|  | 1186 |  | 
|  | 1187 | /* | 
|  | 1188 | * if BNA occurred then let BNA dummy desc. | 
|  | 1189 | * point to current desc. | 
|  | 1190 | */ | 
|  | 1191 | if (ep->bna_occurred) { | 
|  | 1192 | VDBG(dev, "copy to BNA dummy desc.\n"); | 
|  | 1193 | memcpy(ep->bna_dummy_req->td_data, | 
|  | 1194 | req->td_data, | 
|  | 1195 | sizeof(struct udc_data_dma)); | 
|  | 1196 | } | 
|  | 1197 | } | 
|  | 1198 | /* write desc pointer */ | 
|  | 1199 | writel(req->td_phys, &ep->regs->desptr); | 
|  | 1200 |  | 
|  | 1201 | /* clear NAK by writing CNAK */ | 
|  | 1202 | if (ep->naking) { | 
|  | 1203 | tmp = readl(&ep->regs->ctl); | 
|  | 1204 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 1205 | writel(tmp, &ep->regs->ctl); | 
|  | 1206 | ep->naking = 0; | 
|  | 1207 | UDC_QUEUE_CNAK(ep, ep->num); | 
|  | 1208 | } | 
|  | 1209 |  | 
|  | 1210 | if (ep->in) { | 
|  | 1211 | /* enable ep irq */ | 
|  | 1212 | tmp = readl(&dev->regs->ep_irqmsk); | 
|  | 1213 | tmp &= AMD_UNMASK_BIT(ep->num); | 
|  | 1214 | writel(tmp, &dev->regs->ep_irqmsk); | 
|  | 1215 | } | 
|  | 1216 | } | 
|  | 1217 |  | 
|  | 1218 | } else if (ep->dma) { | 
|  | 1219 |  | 
|  | 1220 | /* | 
|  | 1221 | * prep_dma not used for OUT ep's, this is not possible | 
|  | 1222 | * for PPB modes, because of chain creation reasons | 
|  | 1223 | */ | 
|  | 1224 | if (ep->in) { | 
|  | 1225 | retval = prep_dma(ep, req, gfp); | 
|  | 1226 | if (retval != 0) | 
|  | 1227 | goto finished; | 
|  | 1228 | } | 
|  | 1229 | } | 
|  | 1230 | VDBG(dev, "list_add\n"); | 
|  | 1231 | /* add request to ep queue */ | 
|  | 1232 | if (req) { | 
|  | 1233 |  | 
|  | 1234 | list_add_tail(&req->queue, &ep->queue); | 
|  | 1235 |  | 
|  | 1236 | /* open rxfifo if out data queued */ | 
|  | 1237 | if (open_rxfifo) { | 
|  | 1238 | /* enable DMA */ | 
|  | 1239 | req->dma_going = 1; | 
|  | 1240 | udc_set_rde(dev); | 
|  | 1241 | if (ep->num != UDC_EP0OUT_IX) | 
|  | 1242 | dev->data_ep_queued = 1; | 
|  | 1243 | } | 
|  | 1244 | /* stop OUT naking */ | 
|  | 1245 | if (!ep->in) { | 
|  | 1246 | if (!use_dma && udc_rxfifo_pending) { | 
| Joe Perches | fec8de3 | 2007-11-19 17:53:33 -0800 | [diff] [blame] | 1247 | DBG(dev, "udc_queue(): pending bytes in " | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 1248 | "rxfifo after nyet\n"); | 
|  | 1249 | /* | 
|  | 1250 | * read pending bytes afer nyet: | 
|  | 1251 | * referring to isr | 
|  | 1252 | */ | 
|  | 1253 | if (udc_rxfifo_read(ep, req)) { | 
|  | 1254 | /* finish */ | 
|  | 1255 | complete_req(ep, req, 0); | 
|  | 1256 | } | 
|  | 1257 | udc_rxfifo_pending = 0; | 
|  | 1258 |  | 
|  | 1259 | } | 
|  | 1260 | } | 
|  | 1261 | } | 
|  | 1262 |  | 
|  | 1263 | finished: | 
|  | 1264 | spin_unlock_irqrestore(&dev->lock, iflags); | 
|  | 1265 | return retval; | 
|  | 1266 | } | 
|  | 1267 |  | 
|  | 1268 | /* Empty request queue of an endpoint; caller holds spinlock */ | 
|  | 1269 | static void empty_req_queue(struct udc_ep *ep) | 
|  | 1270 | { | 
|  | 1271 | struct udc_request	*req; | 
|  | 1272 |  | 
|  | 1273 | ep->halted = 1; | 
|  | 1274 | while (!list_empty(&ep->queue)) { | 
|  | 1275 | req = list_entry(ep->queue.next, | 
|  | 1276 | struct udc_request, | 
|  | 1277 | queue); | 
|  | 1278 | complete_req(ep, req, -ESHUTDOWN); | 
|  | 1279 | } | 
|  | 1280 | } | 
|  | 1281 |  | 
|  | 1282 | /* Dequeues a request packet, called by gadget driver */ | 
|  | 1283 | static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq) | 
|  | 1284 | { | 
|  | 1285 | struct udc_ep		*ep; | 
|  | 1286 | struct udc_request	*req; | 
|  | 1287 | unsigned		halted; | 
|  | 1288 | unsigned long		iflags; | 
|  | 1289 |  | 
|  | 1290 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 1291 | if (!usbep || !usbreq || (!ep->desc && (ep->num != 0 | 
|  | 1292 | && ep->num != UDC_EP0OUT_IX))) | 
|  | 1293 | return -EINVAL; | 
|  | 1294 |  | 
|  | 1295 | req = container_of(usbreq, struct udc_request, req); | 
|  | 1296 |  | 
|  | 1297 | spin_lock_irqsave(&ep->dev->lock, iflags); | 
|  | 1298 | halted = ep->halted; | 
|  | 1299 | ep->halted = 1; | 
|  | 1300 | /* request in processing or next one */ | 
|  | 1301 | if (ep->queue.next == &req->queue) { | 
|  | 1302 | if (ep->dma && req->dma_going) { | 
|  | 1303 | if (ep->in) | 
|  | 1304 | ep->cancel_transfer = 1; | 
|  | 1305 | else { | 
|  | 1306 | u32 tmp; | 
|  | 1307 | u32 dma_sts; | 
|  | 1308 | /* stop potential receive DMA */ | 
|  | 1309 | tmp = readl(&udc->regs->ctl); | 
|  | 1310 | writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE), | 
|  | 1311 | &udc->regs->ctl); | 
|  | 1312 | /* | 
|  | 1313 | * Cancel transfer later in ISR | 
|  | 1314 | * if descriptor was touched. | 
|  | 1315 | */ | 
|  | 1316 | dma_sts = AMD_GETBITS(req->td_data->status, | 
|  | 1317 | UDC_DMA_OUT_STS_BS); | 
|  | 1318 | if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY) | 
|  | 1319 | ep->cancel_transfer = 1; | 
|  | 1320 | else { | 
|  | 1321 | udc_init_bna_dummy(ep->req); | 
|  | 1322 | writel(ep->bna_dummy_req->td_phys, | 
|  | 1323 | &ep->regs->desptr); | 
|  | 1324 | } | 
|  | 1325 | writel(tmp, &udc->regs->ctl); | 
|  | 1326 | } | 
|  | 1327 | } | 
|  | 1328 | } | 
|  | 1329 | complete_req(ep, req, -ECONNRESET); | 
|  | 1330 | ep->halted = halted; | 
|  | 1331 |  | 
|  | 1332 | spin_unlock_irqrestore(&ep->dev->lock, iflags); | 
|  | 1333 | return 0; | 
|  | 1334 | } | 
|  | 1335 |  | 
|  | 1336 | /* Halt or clear halt of endpoint */ | 
|  | 1337 | static int | 
|  | 1338 | udc_set_halt(struct usb_ep *usbep, int halt) | 
|  | 1339 | { | 
|  | 1340 | struct udc_ep	*ep; | 
|  | 1341 | u32 tmp; | 
|  | 1342 | unsigned long iflags; | 
|  | 1343 | int retval = 0; | 
|  | 1344 |  | 
|  | 1345 | if (!usbep) | 
|  | 1346 | return -EINVAL; | 
|  | 1347 |  | 
|  | 1348 | pr_debug("set_halt %s: halt=%d\n", usbep->name, halt); | 
|  | 1349 |  | 
|  | 1350 | ep = container_of(usbep, struct udc_ep, ep); | 
|  | 1351 | if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) | 
|  | 1352 | return -EINVAL; | 
|  | 1353 | if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) | 
|  | 1354 | return -ESHUTDOWN; | 
|  | 1355 |  | 
|  | 1356 | spin_lock_irqsave(&udc_stall_spinlock, iflags); | 
|  | 1357 | /* halt or clear halt */ | 
|  | 1358 | if (halt) { | 
|  | 1359 | if (ep->num == 0) | 
|  | 1360 | ep->dev->stall_ep0in = 1; | 
|  | 1361 | else { | 
|  | 1362 | /* | 
|  | 1363 | * set STALL | 
|  | 1364 | * rxfifo empty not taken into acount | 
|  | 1365 | */ | 
|  | 1366 | tmp = readl(&ep->regs->ctl); | 
|  | 1367 | tmp |= AMD_BIT(UDC_EPCTL_S); | 
|  | 1368 | writel(tmp, &ep->regs->ctl); | 
|  | 1369 | ep->halted = 1; | 
|  | 1370 |  | 
|  | 1371 | /* setup poll timer */ | 
|  | 1372 | if (!timer_pending(&udc_pollstall_timer)) { | 
|  | 1373 | udc_pollstall_timer.expires = jiffies + | 
|  | 1374 | HZ * UDC_POLLSTALL_TIMER_USECONDS | 
|  | 1375 | / (1000 * 1000); | 
|  | 1376 | if (!stop_pollstall_timer) { | 
|  | 1377 | DBG(ep->dev, "start polltimer\n"); | 
|  | 1378 | add_timer(&udc_pollstall_timer); | 
|  | 1379 | } | 
|  | 1380 | } | 
|  | 1381 | } | 
|  | 1382 | } else { | 
|  | 1383 | /* ep is halted by set_halt() before */ | 
|  | 1384 | if (ep->halted) { | 
|  | 1385 | tmp = readl(&ep->regs->ctl); | 
|  | 1386 | /* clear stall bit */ | 
|  | 1387 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | 
|  | 1388 | /* clear NAK by writing CNAK */ | 
|  | 1389 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 1390 | writel(tmp, &ep->regs->ctl); | 
|  | 1391 | ep->halted = 0; | 
|  | 1392 | UDC_QUEUE_CNAK(ep, ep->num); | 
|  | 1393 | } | 
|  | 1394 | } | 
|  | 1395 | spin_unlock_irqrestore(&udc_stall_spinlock, iflags); | 
|  | 1396 | return retval; | 
|  | 1397 | } | 
|  | 1398 |  | 
|  | 1399 | /* gadget interface */ | 
|  | 1400 | static const struct usb_ep_ops udc_ep_ops = { | 
|  | 1401 | .enable		= udc_ep_enable, | 
|  | 1402 | .disable	= udc_ep_disable, | 
|  | 1403 |  | 
|  | 1404 | .alloc_request	= udc_alloc_request, | 
|  | 1405 | .free_request	= udc_free_request, | 
|  | 1406 |  | 
|  | 1407 | .queue		= udc_queue, | 
|  | 1408 | .dequeue	= udc_dequeue, | 
|  | 1409 |  | 
|  | 1410 | .set_halt	= udc_set_halt, | 
|  | 1411 | /* fifo ops not implemented */ | 
|  | 1412 | }; | 
|  | 1413 |  | 
|  | 1414 | /*-------------------------------------------------------------------------*/ | 
|  | 1415 |  | 
|  | 1416 | /* Get frame counter (not implemented) */ | 
|  | 1417 | static int udc_get_frame(struct usb_gadget *gadget) | 
|  | 1418 | { | 
|  | 1419 | return -EOPNOTSUPP; | 
|  | 1420 | } | 
|  | 1421 |  | 
|  | 1422 | /* Remote wakeup gadget interface */ | 
|  | 1423 | static int udc_wakeup(struct usb_gadget *gadget) | 
|  | 1424 | { | 
|  | 1425 | struct udc		*dev; | 
|  | 1426 |  | 
|  | 1427 | if (!gadget) | 
|  | 1428 | return -EINVAL; | 
|  | 1429 | dev = container_of(gadget, struct udc, gadget); | 
|  | 1430 | udc_remote_wakeup(dev); | 
|  | 1431 |  | 
|  | 1432 | return 0; | 
|  | 1433 | } | 
|  | 1434 |  | 
|  | 1435 | /* gadget operations */ | 
|  | 1436 | static const struct usb_gadget_ops udc_ops = { | 
|  | 1437 | .wakeup		= udc_wakeup, | 
|  | 1438 | .get_frame	= udc_get_frame, | 
|  | 1439 | }; | 
|  | 1440 |  | 
|  | 1441 | /* Setups endpoint parameters, adds endpoints to linked list */ | 
|  | 1442 | static void make_ep_lists(struct udc *dev) | 
|  | 1443 | { | 
|  | 1444 | /* make gadget ep lists */ | 
|  | 1445 | INIT_LIST_HEAD(&dev->gadget.ep_list); | 
|  | 1446 | list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, | 
|  | 1447 | &dev->gadget.ep_list); | 
|  | 1448 | list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, | 
|  | 1449 | &dev->gadget.ep_list); | 
|  | 1450 | list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, | 
|  | 1451 | &dev->gadget.ep_list); | 
|  | 1452 |  | 
|  | 1453 | /* fifo config */ | 
|  | 1454 | dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; | 
|  | 1455 | if (dev->gadget.speed == USB_SPEED_FULL) | 
|  | 1456 | dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; | 
|  | 1457 | else if (dev->gadget.speed == USB_SPEED_HIGH) | 
|  | 1458 | dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; | 
|  | 1459 | dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; | 
|  | 1460 | } | 
|  | 1461 |  | 
|  | 1462 | /* init registers at driver load time */ | 
|  | 1463 | static int startup_registers(struct udc *dev) | 
|  | 1464 | { | 
|  | 1465 | u32 tmp; | 
|  | 1466 |  | 
|  | 1467 | /* init controller by soft reset */ | 
|  | 1468 | udc_soft_reset(dev); | 
|  | 1469 |  | 
|  | 1470 | /* mask not needed interrupts */ | 
|  | 1471 | udc_mask_unused_interrupts(dev); | 
|  | 1472 |  | 
|  | 1473 | /* put into initial config */ | 
|  | 1474 | udc_basic_init(dev); | 
|  | 1475 | /* link up all endpoints */ | 
|  | 1476 | udc_setup_endpoints(dev); | 
|  | 1477 |  | 
|  | 1478 | /* program speed */ | 
|  | 1479 | tmp = readl(&dev->regs->cfg); | 
|  | 1480 | if (use_fullspeed) { | 
|  | 1481 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); | 
|  | 1482 | } else { | 
|  | 1483 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD); | 
|  | 1484 | } | 
|  | 1485 | writel(tmp, &dev->regs->cfg); | 
|  | 1486 |  | 
|  | 1487 | return 0; | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | /* Inits UDC context */ | 
|  | 1491 | static void udc_basic_init(struct udc *dev) | 
|  | 1492 | { | 
|  | 1493 | u32	tmp; | 
|  | 1494 |  | 
|  | 1495 | DBG(dev, "udc_basic_init()\n"); | 
|  | 1496 |  | 
|  | 1497 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 
|  | 1498 |  | 
|  | 1499 | /* stop RDE timer */ | 
|  | 1500 | if (timer_pending(&udc_timer)) { | 
|  | 1501 | set_rde = 0; | 
|  | 1502 | mod_timer(&udc_timer, jiffies - 1); | 
|  | 1503 | } | 
|  | 1504 | /* stop poll stall timer */ | 
|  | 1505 | if (timer_pending(&udc_pollstall_timer)) { | 
|  | 1506 | mod_timer(&udc_pollstall_timer, jiffies - 1); | 
|  | 1507 | } | 
|  | 1508 | /* disable DMA */ | 
|  | 1509 | tmp = readl(&dev->regs->ctl); | 
|  | 1510 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); | 
|  | 1511 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE); | 
|  | 1512 | writel(tmp, &dev->regs->ctl); | 
|  | 1513 |  | 
|  | 1514 | /* enable dynamic CSR programming */ | 
|  | 1515 | tmp = readl(&dev->regs->cfg); | 
|  | 1516 | tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG); | 
|  | 1517 | /* set self powered */ | 
|  | 1518 | tmp |= AMD_BIT(UDC_DEVCFG_SP); | 
|  | 1519 | /* set remote wakeupable */ | 
|  | 1520 | tmp |= AMD_BIT(UDC_DEVCFG_RWKP); | 
|  | 1521 | writel(tmp, &dev->regs->cfg); | 
|  | 1522 |  | 
|  | 1523 | make_ep_lists(dev); | 
|  | 1524 |  | 
|  | 1525 | dev->data_ep_enabled = 0; | 
|  | 1526 | dev->data_ep_queued = 0; | 
|  | 1527 | } | 
|  | 1528 |  | 
|  | 1529 | /* Sets initial endpoint parameters */ | 
|  | 1530 | static void udc_setup_endpoints(struct udc *dev) | 
|  | 1531 | { | 
|  | 1532 | struct udc_ep	*ep; | 
|  | 1533 | u32	tmp; | 
|  | 1534 | u32	reg; | 
|  | 1535 |  | 
|  | 1536 | DBG(dev, "udc_setup_endpoints()\n"); | 
|  | 1537 |  | 
|  | 1538 | /* read enum speed */ | 
|  | 1539 | tmp = readl(&dev->regs->sts); | 
|  | 1540 | tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED); | 
|  | 1541 | if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) { | 
|  | 1542 | dev->gadget.speed = USB_SPEED_HIGH; | 
|  | 1543 | } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) { | 
|  | 1544 | dev->gadget.speed = USB_SPEED_FULL; | 
|  | 1545 | } | 
|  | 1546 |  | 
|  | 1547 | /* set basic ep parameters */ | 
|  | 1548 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | 
|  | 1549 | ep = &dev->ep[tmp]; | 
|  | 1550 | ep->dev = dev; | 
|  | 1551 | ep->ep.name = ep_string[tmp]; | 
|  | 1552 | ep->num = tmp; | 
|  | 1553 | /* txfifo size is calculated at enable time */ | 
|  | 1554 | ep->txfifo = dev->txfifo; | 
|  | 1555 |  | 
|  | 1556 | /* fifo size */ | 
|  | 1557 | if (tmp < UDC_EPIN_NUM) { | 
|  | 1558 | ep->fifo_depth = UDC_TXFIFO_SIZE; | 
|  | 1559 | ep->in = 1; | 
|  | 1560 | } else { | 
|  | 1561 | ep->fifo_depth = UDC_RXFIFO_SIZE; | 
|  | 1562 | ep->in = 0; | 
|  | 1563 |  | 
|  | 1564 | } | 
|  | 1565 | ep->regs = &dev->ep_regs[tmp]; | 
|  | 1566 | /* | 
|  | 1567 | * ep will be reset only if ep was not enabled before to avoid | 
|  | 1568 | * disabling ep interrupts when ENUM interrupt occurs but ep is | 
|  | 1569 | * not enabled by gadget driver | 
|  | 1570 | */ | 
|  | 1571 | if (!ep->desc) { | 
|  | 1572 | ep_init(dev->regs, ep); | 
|  | 1573 | } | 
|  | 1574 |  | 
|  | 1575 | if (use_dma) { | 
|  | 1576 | /* | 
|  | 1577 | * ep->dma is not really used, just to indicate that | 
|  | 1578 | * DMA is active: remove this | 
|  | 1579 | * dma regs = dev control regs | 
|  | 1580 | */ | 
|  | 1581 | ep->dma = &dev->regs->ctl; | 
|  | 1582 |  | 
|  | 1583 | /* nak OUT endpoints until enable - not for ep0 */ | 
|  | 1584 | if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX | 
|  | 1585 | && tmp > UDC_EPIN_NUM) { | 
|  | 1586 | /* set NAK */ | 
|  | 1587 | reg = readl(&dev->ep[tmp].regs->ctl); | 
|  | 1588 | reg |= AMD_BIT(UDC_EPCTL_SNAK); | 
|  | 1589 | writel(reg, &dev->ep[tmp].regs->ctl); | 
|  | 1590 | dev->ep[tmp].naking = 1; | 
|  | 1591 |  | 
|  | 1592 | } | 
|  | 1593 | } | 
|  | 1594 | } | 
|  | 1595 | /* EP0 max packet */ | 
|  | 1596 | if (dev->gadget.speed == USB_SPEED_FULL) { | 
|  | 1597 | dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE; | 
|  | 1598 | dev->ep[UDC_EP0OUT_IX].ep.maxpacket = | 
|  | 1599 | UDC_FS_EP0OUT_MAX_PKT_SIZE; | 
|  | 1600 | } else if (dev->gadget.speed == USB_SPEED_HIGH) { | 
|  | 1601 | dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; | 
|  | 1602 | dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; | 
|  | 1603 | } | 
|  | 1604 |  | 
|  | 1605 | /* | 
|  | 1606 | * with suspend bug workaround, ep0 params for gadget driver | 
|  | 1607 | * are set at gadget driver bind() call | 
|  | 1608 | */ | 
|  | 1609 | dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; | 
|  | 1610 | dev->ep[UDC_EP0IN_IX].halted = 0; | 
|  | 1611 | INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); | 
|  | 1612 |  | 
|  | 1613 | /* init cfg/alt/int */ | 
|  | 1614 | dev->cur_config = 0; | 
|  | 1615 | dev->cur_intf = 0; | 
|  | 1616 | dev->cur_alt = 0; | 
|  | 1617 | } | 
|  | 1618 |  | 
|  | 1619 | /* Bringup after Connect event, initial bringup to be ready for ep0 events */ | 
|  | 1620 | static void usb_connect(struct udc *dev) | 
|  | 1621 | { | 
|  | 1622 |  | 
|  | 1623 | dev_info(&dev->pdev->dev, "USB Connect\n"); | 
|  | 1624 |  | 
|  | 1625 | dev->connected = 1; | 
|  | 1626 |  | 
|  | 1627 | /* put into initial config */ | 
|  | 1628 | udc_basic_init(dev); | 
|  | 1629 |  | 
|  | 1630 | /* enable device setup interrupts */ | 
|  | 1631 | udc_enable_dev_setup_interrupts(dev); | 
|  | 1632 | } | 
|  | 1633 |  | 
|  | 1634 | /* | 
|  | 1635 | * Calls gadget with disconnect event and resets the UDC and makes | 
|  | 1636 | * initial bringup to be ready for ep0 events | 
|  | 1637 | */ | 
|  | 1638 | static void usb_disconnect(struct udc *dev) | 
|  | 1639 | { | 
|  | 1640 |  | 
|  | 1641 | dev_info(&dev->pdev->dev, "USB Disconnect\n"); | 
|  | 1642 |  | 
|  | 1643 | dev->connected = 0; | 
|  | 1644 |  | 
|  | 1645 | /* mask interrupts */ | 
|  | 1646 | udc_mask_unused_interrupts(dev); | 
|  | 1647 |  | 
|  | 1648 | /* REVISIT there doesn't seem to be a point to having this | 
|  | 1649 | * talk to a tasklet ... do it directly, we already hold | 
|  | 1650 | * the spinlock needed to process the disconnect. | 
|  | 1651 | */ | 
|  | 1652 |  | 
|  | 1653 | tasklet_schedule(&disconnect_tasklet); | 
|  | 1654 | } | 
|  | 1655 |  | 
|  | 1656 | /* Tasklet for disconnect to be outside of interrupt context */ | 
|  | 1657 | static void udc_tasklet_disconnect(unsigned long par) | 
|  | 1658 | { | 
|  | 1659 | struct udc *dev = (struct udc *)(*((struct udc **) par)); | 
|  | 1660 | u32 tmp; | 
|  | 1661 |  | 
|  | 1662 | DBG(dev, "Tasklet disconnect\n"); | 
|  | 1663 | spin_lock_irq(&dev->lock); | 
|  | 1664 |  | 
|  | 1665 | if (dev->driver) { | 
|  | 1666 | spin_unlock(&dev->lock); | 
|  | 1667 | dev->driver->disconnect(&dev->gadget); | 
|  | 1668 | spin_lock(&dev->lock); | 
|  | 1669 |  | 
|  | 1670 | /* empty queues */ | 
|  | 1671 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | 
|  | 1672 | empty_req_queue(&dev->ep[tmp]); | 
|  | 1673 | } | 
|  | 1674 |  | 
|  | 1675 | } | 
|  | 1676 |  | 
|  | 1677 | /* disable ep0 */ | 
|  | 1678 | ep_init(dev->regs, | 
|  | 1679 | &dev->ep[UDC_EP0IN_IX]); | 
|  | 1680 |  | 
|  | 1681 |  | 
|  | 1682 | if (!soft_reset_occured) { | 
|  | 1683 | /* init controller by soft reset */ | 
|  | 1684 | udc_soft_reset(dev); | 
|  | 1685 | soft_reset_occured++; | 
|  | 1686 | } | 
|  | 1687 |  | 
|  | 1688 | /* re-enable dev interrupts */ | 
|  | 1689 | udc_enable_dev_setup_interrupts(dev); | 
|  | 1690 | /* back to full speed ? */ | 
|  | 1691 | if (use_fullspeed) { | 
|  | 1692 | tmp = readl(&dev->regs->cfg); | 
|  | 1693 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); | 
|  | 1694 | writel(tmp, &dev->regs->cfg); | 
|  | 1695 | } | 
|  | 1696 |  | 
|  | 1697 | spin_unlock_irq(&dev->lock); | 
|  | 1698 | } | 
|  | 1699 |  | 
|  | 1700 | /* Reset the UDC core */ | 
|  | 1701 | static void udc_soft_reset(struct udc *dev) | 
|  | 1702 | { | 
|  | 1703 | unsigned long	flags; | 
|  | 1704 |  | 
|  | 1705 | DBG(dev, "Soft reset\n"); | 
|  | 1706 | /* | 
|  | 1707 | * reset possible waiting interrupts, because int. | 
|  | 1708 | * status is lost after soft reset, | 
|  | 1709 | * ep int. status reset | 
|  | 1710 | */ | 
|  | 1711 | writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts); | 
|  | 1712 | /* device int. status reset */ | 
|  | 1713 | writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts); | 
|  | 1714 |  | 
|  | 1715 | spin_lock_irqsave(&udc_irq_spinlock, flags); | 
|  | 1716 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | 
|  | 1717 | readl(&dev->regs->cfg); | 
|  | 1718 | spin_unlock_irqrestore(&udc_irq_spinlock, flags); | 
|  | 1719 |  | 
|  | 1720 | } | 
|  | 1721 |  | 
|  | 1722 | /* RDE timer callback to set RDE bit */ | 
|  | 1723 | static void udc_timer_function(unsigned long v) | 
|  | 1724 | { | 
|  | 1725 | u32 tmp; | 
|  | 1726 |  | 
|  | 1727 | spin_lock_irq(&udc_irq_spinlock); | 
|  | 1728 |  | 
|  | 1729 | if (set_rde > 0) { | 
|  | 1730 | /* | 
|  | 1731 | * open the fifo if fifo was filled on last timer call | 
|  | 1732 | * conditionally | 
|  | 1733 | */ | 
|  | 1734 | if (set_rde > 1) { | 
|  | 1735 | /* set RDE to receive setup data */ | 
|  | 1736 | tmp = readl(&udc->regs->ctl); | 
|  | 1737 | tmp |= AMD_BIT(UDC_DEVCTL_RDE); | 
|  | 1738 | writel(tmp, &udc->regs->ctl); | 
|  | 1739 | set_rde = -1; | 
|  | 1740 | } else if (readl(&udc->regs->sts) | 
|  | 1741 | & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | 
|  | 1742 | /* | 
|  | 1743 | * if fifo empty setup polling, do not just | 
|  | 1744 | * open the fifo | 
|  | 1745 | */ | 
|  | 1746 | udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; | 
|  | 1747 | if (!stop_timer) { | 
|  | 1748 | add_timer(&udc_timer); | 
|  | 1749 | } | 
|  | 1750 | } else { | 
|  | 1751 | /* | 
|  | 1752 | * fifo contains data now, setup timer for opening | 
|  | 1753 | * the fifo when timer expires to be able to receive | 
|  | 1754 | * setup packets, when data packets gets queued by | 
|  | 1755 | * gadget layer then timer will forced to expire with | 
|  | 1756 | * set_rde=0 (RDE is set in udc_queue()) | 
|  | 1757 | */ | 
|  | 1758 | set_rde++; | 
|  | 1759 | /* debug: lhadmot_timer_start = 221070 */ | 
|  | 1760 | udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; | 
|  | 1761 | if (!stop_timer) { | 
|  | 1762 | add_timer(&udc_timer); | 
|  | 1763 | } | 
|  | 1764 | } | 
|  | 1765 |  | 
|  | 1766 | } else | 
|  | 1767 | set_rde = -1; /* RDE was set by udc_queue() */ | 
|  | 1768 | spin_unlock_irq(&udc_irq_spinlock); | 
|  | 1769 | if (stop_timer) | 
|  | 1770 | complete(&on_exit); | 
|  | 1771 |  | 
|  | 1772 | } | 
|  | 1773 |  | 
|  | 1774 | /* Handle halt state, used in stall poll timer */ | 
|  | 1775 | static void udc_handle_halt_state(struct udc_ep *ep) | 
|  | 1776 | { | 
|  | 1777 | u32 tmp; | 
|  | 1778 | /* set stall as long not halted */ | 
|  | 1779 | if (ep->halted == 1) { | 
|  | 1780 | tmp = readl(&ep->regs->ctl); | 
|  | 1781 | /* STALL cleared ? */ | 
|  | 1782 | if (!(tmp & AMD_BIT(UDC_EPCTL_S))) { | 
|  | 1783 | /* | 
|  | 1784 | * FIXME: MSC spec requires that stall remains | 
|  | 1785 | * even on receivng of CLEAR_FEATURE HALT. So | 
|  | 1786 | * we would set STALL again here to be compliant. | 
|  | 1787 | * But with current mass storage drivers this does | 
|  | 1788 | * not work (would produce endless host retries). | 
|  | 1789 | * So we clear halt on CLEAR_FEATURE. | 
|  | 1790 | * | 
|  | 1791 | DBG(ep->dev, "ep %d: set STALL again\n", ep->num); | 
|  | 1792 | tmp |= AMD_BIT(UDC_EPCTL_S); | 
|  | 1793 | writel(tmp, &ep->regs->ctl);*/ | 
|  | 1794 |  | 
|  | 1795 | /* clear NAK by writing CNAK */ | 
|  | 1796 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 1797 | writel(tmp, &ep->regs->ctl); | 
|  | 1798 | ep->halted = 0; | 
|  | 1799 | UDC_QUEUE_CNAK(ep, ep->num); | 
|  | 1800 | } | 
|  | 1801 | } | 
|  | 1802 | } | 
|  | 1803 |  | 
|  | 1804 | /* Stall timer callback to poll S bit and set it again after */ | 
|  | 1805 | static void udc_pollstall_timer_function(unsigned long v) | 
|  | 1806 | { | 
|  | 1807 | struct udc_ep *ep; | 
|  | 1808 | int halted = 0; | 
|  | 1809 |  | 
|  | 1810 | spin_lock_irq(&udc_stall_spinlock); | 
|  | 1811 | /* | 
|  | 1812 | * only one IN and OUT endpoints are handled | 
|  | 1813 | * IN poll stall | 
|  | 1814 | */ | 
|  | 1815 | ep = &udc->ep[UDC_EPIN_IX]; | 
|  | 1816 | udc_handle_halt_state(ep); | 
|  | 1817 | if (ep->halted) | 
|  | 1818 | halted = 1; | 
|  | 1819 | /* OUT poll stall */ | 
|  | 1820 | ep = &udc->ep[UDC_EPOUT_IX]; | 
|  | 1821 | udc_handle_halt_state(ep); | 
|  | 1822 | if (ep->halted) | 
|  | 1823 | halted = 1; | 
|  | 1824 |  | 
|  | 1825 | /* setup timer again when still halted */ | 
|  | 1826 | if (!stop_pollstall_timer && halted) { | 
|  | 1827 | udc_pollstall_timer.expires = jiffies + | 
|  | 1828 | HZ * UDC_POLLSTALL_TIMER_USECONDS | 
|  | 1829 | / (1000 * 1000); | 
|  | 1830 | add_timer(&udc_pollstall_timer); | 
|  | 1831 | } | 
|  | 1832 | spin_unlock_irq(&udc_stall_spinlock); | 
|  | 1833 |  | 
|  | 1834 | if (stop_pollstall_timer) | 
|  | 1835 | complete(&on_pollstall_exit); | 
|  | 1836 | } | 
|  | 1837 |  | 
|  | 1838 | /* Inits endpoint 0 so that SETUP packets are processed */ | 
|  | 1839 | static void activate_control_endpoints(struct udc *dev) | 
|  | 1840 | { | 
|  | 1841 | u32 tmp; | 
|  | 1842 |  | 
|  | 1843 | DBG(dev, "activate_control_endpoints\n"); | 
|  | 1844 |  | 
|  | 1845 | /* flush fifo */ | 
|  | 1846 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 1847 | tmp |= AMD_BIT(UDC_EPCTL_F); | 
|  | 1848 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 1849 |  | 
|  | 1850 | /* set ep0 directions */ | 
|  | 1851 | dev->ep[UDC_EP0IN_IX].in = 1; | 
|  | 1852 | dev->ep[UDC_EP0OUT_IX].in = 0; | 
|  | 1853 |  | 
|  | 1854 | /* set buffer size (tx fifo entries) of EP0_IN */ | 
|  | 1855 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); | 
|  | 1856 | if (dev->gadget.speed == USB_SPEED_FULL) | 
|  | 1857 | tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE, | 
|  | 1858 | UDC_EPIN_BUFF_SIZE); | 
|  | 1859 | else if (dev->gadget.speed == USB_SPEED_HIGH) | 
|  | 1860 | tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE, | 
|  | 1861 | UDC_EPIN_BUFF_SIZE); | 
|  | 1862 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); | 
|  | 1863 |  | 
|  | 1864 | /* set max packet size of EP0_IN */ | 
|  | 1865 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); | 
|  | 1866 | if (dev->gadget.speed == USB_SPEED_FULL) | 
|  | 1867 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE, | 
|  | 1868 | UDC_EP_MAX_PKT_SIZE); | 
|  | 1869 | else if (dev->gadget.speed == USB_SPEED_HIGH) | 
|  | 1870 | tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE, | 
|  | 1871 | UDC_EP_MAX_PKT_SIZE); | 
|  | 1872 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); | 
|  | 1873 |  | 
|  | 1874 | /* set max packet size of EP0_OUT */ | 
|  | 1875 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); | 
|  | 1876 | if (dev->gadget.speed == USB_SPEED_FULL) | 
|  | 1877 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, | 
|  | 1878 | UDC_EP_MAX_PKT_SIZE); | 
|  | 1879 | else if (dev->gadget.speed == USB_SPEED_HIGH) | 
|  | 1880 | tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, | 
|  | 1881 | UDC_EP_MAX_PKT_SIZE); | 
|  | 1882 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); | 
|  | 1883 |  | 
|  | 1884 | /* set max packet size of EP0 in UDC CSR */ | 
|  | 1885 | tmp = readl(&dev->csr->ne[0]); | 
|  | 1886 | if (dev->gadget.speed == USB_SPEED_FULL) | 
|  | 1887 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, | 
|  | 1888 | UDC_CSR_NE_MAX_PKT); | 
|  | 1889 | else if (dev->gadget.speed == USB_SPEED_HIGH) | 
|  | 1890 | tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, | 
|  | 1891 | UDC_CSR_NE_MAX_PKT); | 
|  | 1892 | writel(tmp, &dev->csr->ne[0]); | 
|  | 1893 |  | 
|  | 1894 | if (use_dma) { | 
|  | 1895 | dev->ep[UDC_EP0OUT_IX].td->status |= | 
|  | 1896 | AMD_BIT(UDC_DMA_OUT_STS_L); | 
|  | 1897 | /* write dma desc address */ | 
|  | 1898 | writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, | 
|  | 1899 | &dev->ep[UDC_EP0OUT_IX].regs->subptr); | 
|  | 1900 | writel(dev->ep[UDC_EP0OUT_IX].td_phys, | 
|  | 1901 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | 
|  | 1902 | /* stop RDE timer */ | 
|  | 1903 | if (timer_pending(&udc_timer)) { | 
|  | 1904 | set_rde = 0; | 
|  | 1905 | mod_timer(&udc_timer, jiffies - 1); | 
|  | 1906 | } | 
|  | 1907 | /* stop pollstall timer */ | 
|  | 1908 | if (timer_pending(&udc_pollstall_timer)) { | 
|  | 1909 | mod_timer(&udc_pollstall_timer, jiffies - 1); | 
|  | 1910 | } | 
|  | 1911 | /* enable DMA */ | 
|  | 1912 | tmp = readl(&dev->regs->ctl); | 
|  | 1913 | tmp |= AMD_BIT(UDC_DEVCTL_MODE) | 
|  | 1914 | | AMD_BIT(UDC_DEVCTL_RDE) | 
|  | 1915 | | AMD_BIT(UDC_DEVCTL_TDE); | 
|  | 1916 | if (use_dma_bufferfill_mode) { | 
|  | 1917 | tmp |= AMD_BIT(UDC_DEVCTL_BF); | 
|  | 1918 | } else if (use_dma_ppb_du) { | 
|  | 1919 | tmp |= AMD_BIT(UDC_DEVCTL_DU); | 
|  | 1920 | } | 
|  | 1921 | writel(tmp, &dev->regs->ctl); | 
|  | 1922 | } | 
|  | 1923 |  | 
|  | 1924 | /* clear NAK by writing CNAK for EP0IN */ | 
|  | 1925 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 1926 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 1927 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 1928 | dev->ep[UDC_EP0IN_IX].naking = 0; | 
|  | 1929 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); | 
|  | 1930 |  | 
|  | 1931 | /* clear NAK by writing CNAK for EP0OUT */ | 
|  | 1932 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | 
|  | 1933 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 1934 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | 
|  | 1935 | dev->ep[UDC_EP0OUT_IX].naking = 0; | 
|  | 1936 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); | 
|  | 1937 | } | 
|  | 1938 |  | 
|  | 1939 | /* Make endpoint 0 ready for control traffic */ | 
|  | 1940 | static int setup_ep0(struct udc *dev) | 
|  | 1941 | { | 
|  | 1942 | activate_control_endpoints(dev); | 
|  | 1943 | /* enable ep0 interrupts */ | 
|  | 1944 | udc_enable_ep0_interrupts(dev); | 
|  | 1945 | /* enable device setup interrupts */ | 
|  | 1946 | udc_enable_dev_setup_interrupts(dev); | 
|  | 1947 |  | 
|  | 1948 | return 0; | 
|  | 1949 | } | 
|  | 1950 |  | 
|  | 1951 | /* Called by gadget driver to register itself */ | 
|  | 1952 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 
|  | 1953 | { | 
|  | 1954 | struct udc		*dev = udc; | 
|  | 1955 | int			retval; | 
|  | 1956 | u32 tmp; | 
|  | 1957 |  | 
|  | 1958 | if (!driver || !driver->bind || !driver->setup | 
|  | 1959 | || driver->speed != USB_SPEED_HIGH) | 
|  | 1960 | return -EINVAL; | 
|  | 1961 | if (!dev) | 
|  | 1962 | return -ENODEV; | 
|  | 1963 | if (dev->driver) | 
|  | 1964 | return -EBUSY; | 
|  | 1965 |  | 
|  | 1966 | driver->driver.bus = NULL; | 
|  | 1967 | dev->driver = driver; | 
|  | 1968 | dev->gadget.dev.driver = &driver->driver; | 
|  | 1969 |  | 
|  | 1970 | retval = driver->bind(&dev->gadget); | 
|  | 1971 |  | 
|  | 1972 | /* Some gadget drivers use both ep0 directions. | 
|  | 1973 | * NOTE: to gadget driver, ep0 is just one endpoint... | 
|  | 1974 | */ | 
|  | 1975 | dev->ep[UDC_EP0OUT_IX].ep.driver_data = | 
|  | 1976 | dev->ep[UDC_EP0IN_IX].ep.driver_data; | 
|  | 1977 |  | 
|  | 1978 | if (retval) { | 
|  | 1979 | DBG(dev, "binding to %s returning %d\n", | 
|  | 1980 | driver->driver.name, retval); | 
|  | 1981 | dev->driver = NULL; | 
|  | 1982 | dev->gadget.dev.driver = NULL; | 
|  | 1983 | return retval; | 
|  | 1984 | } | 
|  | 1985 |  | 
|  | 1986 | /* get ready for ep0 traffic */ | 
|  | 1987 | setup_ep0(dev); | 
|  | 1988 |  | 
|  | 1989 | /* clear SD */ | 
|  | 1990 | tmp = readl(&dev->regs->ctl); | 
|  | 1991 | tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD); | 
|  | 1992 | writel(tmp, &dev->regs->ctl); | 
|  | 1993 |  | 
|  | 1994 | usb_connect(dev); | 
|  | 1995 |  | 
|  | 1996 | return 0; | 
|  | 1997 | } | 
|  | 1998 | EXPORT_SYMBOL(usb_gadget_register_driver); | 
|  | 1999 |  | 
|  | 2000 | /* shutdown requests and disconnect from gadget */ | 
|  | 2001 | static void | 
|  | 2002 | shutdown(struct udc *dev, struct usb_gadget_driver *driver) | 
|  | 2003 | __releases(dev->lock) | 
|  | 2004 | __acquires(dev->lock) | 
|  | 2005 | { | 
|  | 2006 | int tmp; | 
|  | 2007 |  | 
|  | 2008 | /* empty queues and init hardware */ | 
|  | 2009 | udc_basic_init(dev); | 
|  | 2010 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | 
|  | 2011 | empty_req_queue(&dev->ep[tmp]); | 
|  | 2012 | } | 
|  | 2013 |  | 
|  | 2014 | if (dev->gadget.speed != USB_SPEED_UNKNOWN) { | 
|  | 2015 | spin_unlock(&dev->lock); | 
|  | 2016 | driver->disconnect(&dev->gadget); | 
|  | 2017 | spin_lock(&dev->lock); | 
|  | 2018 | } | 
|  | 2019 | /* init */ | 
|  | 2020 | udc_setup_endpoints(dev); | 
|  | 2021 | } | 
|  | 2022 |  | 
|  | 2023 | /* Called by gadget driver to unregister itself */ | 
|  | 2024 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 
|  | 2025 | { | 
|  | 2026 | struct udc	*dev = udc; | 
|  | 2027 | unsigned long	flags; | 
|  | 2028 | u32 tmp; | 
|  | 2029 |  | 
|  | 2030 | if (!dev) | 
|  | 2031 | return -ENODEV; | 
|  | 2032 | if (!driver || driver != dev->driver || !driver->unbind) | 
|  | 2033 | return -EINVAL; | 
|  | 2034 |  | 
|  | 2035 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 2036 | udc_mask_unused_interrupts(dev); | 
|  | 2037 | shutdown(dev, driver); | 
|  | 2038 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 2039 |  | 
|  | 2040 | driver->unbind(&dev->gadget); | 
| Patrik Sevallius | eb0be47 | 2007-11-20 09:32:00 -0800 | [diff] [blame] | 2041 | dev->gadget.dev.driver = NULL; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 2042 | dev->driver = NULL; | 
|  | 2043 |  | 
|  | 2044 | /* set SD */ | 
|  | 2045 | tmp = readl(&dev->regs->ctl); | 
|  | 2046 | tmp |= AMD_BIT(UDC_DEVCTL_SD); | 
|  | 2047 | writel(tmp, &dev->regs->ctl); | 
|  | 2048 |  | 
|  | 2049 |  | 
|  | 2050 | DBG(dev, "%s: unregistered\n", driver->driver.name); | 
|  | 2051 |  | 
|  | 2052 | return 0; | 
|  | 2053 | } | 
|  | 2054 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | 
|  | 2055 |  | 
|  | 2056 |  | 
|  | 2057 | /* Clear pending NAK bits */ | 
|  | 2058 | static void udc_process_cnak_queue(struct udc *dev) | 
|  | 2059 | { | 
|  | 2060 | u32 tmp; | 
|  | 2061 | u32 reg; | 
|  | 2062 |  | 
|  | 2063 | /* check epin's */ | 
|  | 2064 | DBG(dev, "CNAK pending queue processing\n"); | 
|  | 2065 | for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) { | 
|  | 2066 | if (cnak_pending & (1 << tmp)) { | 
|  | 2067 | DBG(dev, "CNAK pending for ep%d\n", tmp); | 
|  | 2068 | /* clear NAK by writing CNAK */ | 
|  | 2069 | reg = readl(&dev->ep[tmp].regs->ctl); | 
|  | 2070 | reg |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 2071 | writel(reg, &dev->ep[tmp].regs->ctl); | 
|  | 2072 | dev->ep[tmp].naking = 0; | 
|  | 2073 | UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); | 
|  | 2074 | } | 
|  | 2075 | } | 
|  | 2076 | /* ...	and ep0out */ | 
|  | 2077 | if (cnak_pending & (1 << UDC_EP0OUT_IX)) { | 
|  | 2078 | DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); | 
|  | 2079 | /* clear NAK by writing CNAK */ | 
|  | 2080 | reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | 
|  | 2081 | reg |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 2082 | writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | 
|  | 2083 | dev->ep[UDC_EP0OUT_IX].naking = 0; | 
|  | 2084 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], | 
|  | 2085 | dev->ep[UDC_EP0OUT_IX].num); | 
|  | 2086 | } | 
|  | 2087 | } | 
|  | 2088 |  | 
|  | 2089 | /* Enabling RX DMA after setup packet */ | 
|  | 2090 | static void udc_ep0_set_rde(struct udc *dev) | 
|  | 2091 | { | 
|  | 2092 | if (use_dma) { | 
|  | 2093 | /* | 
|  | 2094 | * only enable RXDMA when no data endpoint enabled | 
|  | 2095 | * or data is queued | 
|  | 2096 | */ | 
|  | 2097 | if (!dev->data_ep_enabled || dev->data_ep_queued) { | 
|  | 2098 | udc_set_rde(dev); | 
|  | 2099 | } else { | 
|  | 2100 | /* | 
|  | 2101 | * setup timer for enabling RDE (to not enable | 
|  | 2102 | * RXFIFO DMA for data endpoints to early) | 
|  | 2103 | */ | 
|  | 2104 | if (set_rde != 0 && !timer_pending(&udc_timer)) { | 
|  | 2105 | udc_timer.expires = | 
|  | 2106 | jiffies + HZ/UDC_RDE_TIMER_DIV; | 
|  | 2107 | set_rde = 1; | 
|  | 2108 | if (!stop_timer) { | 
|  | 2109 | add_timer(&udc_timer); | 
|  | 2110 | } | 
|  | 2111 | } | 
|  | 2112 | } | 
|  | 2113 | } | 
|  | 2114 | } | 
|  | 2115 |  | 
|  | 2116 |  | 
|  | 2117 | /* Interrupt handler for data OUT traffic */ | 
|  | 2118 | static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix) | 
|  | 2119 | { | 
|  | 2120 | irqreturn_t		ret_val = IRQ_NONE; | 
|  | 2121 | u32			tmp; | 
|  | 2122 | struct udc_ep		*ep; | 
|  | 2123 | struct udc_request	*req; | 
|  | 2124 | unsigned int		count; | 
|  | 2125 | struct udc_data_dma	*td = NULL; | 
|  | 2126 | unsigned		dma_done; | 
|  | 2127 |  | 
|  | 2128 | VDBG(dev, "ep%d irq\n", ep_ix); | 
|  | 2129 | ep = &dev->ep[ep_ix]; | 
|  | 2130 |  | 
|  | 2131 | tmp = readl(&ep->regs->sts); | 
|  | 2132 | if (use_dma) { | 
|  | 2133 | /* BNA event ? */ | 
|  | 2134 | if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { | 
|  | 2135 | DBG(dev, "BNA ep%dout occured - DESPTR = %x \n", | 
|  | 2136 | ep->num, readl(&ep->regs->desptr)); | 
|  | 2137 | /* clear BNA */ | 
|  | 2138 | writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); | 
|  | 2139 | if (!ep->cancel_transfer) | 
|  | 2140 | ep->bna_occurred = 1; | 
|  | 2141 | else | 
|  | 2142 | ep->cancel_transfer = 0; | 
|  | 2143 | ret_val = IRQ_HANDLED; | 
|  | 2144 | goto finished; | 
|  | 2145 | } | 
|  | 2146 | } | 
|  | 2147 | /* HE event ? */ | 
|  | 2148 | if (tmp & AMD_BIT(UDC_EPSTS_HE)) { | 
|  | 2149 | dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num); | 
|  | 2150 |  | 
|  | 2151 | /* clear HE */ | 
|  | 2152 | writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); | 
|  | 2153 | ret_val = IRQ_HANDLED; | 
|  | 2154 | goto finished; | 
|  | 2155 | } | 
|  | 2156 |  | 
|  | 2157 | if (!list_empty(&ep->queue)) { | 
|  | 2158 |  | 
|  | 2159 | /* next request */ | 
|  | 2160 | req = list_entry(ep->queue.next, | 
|  | 2161 | struct udc_request, queue); | 
|  | 2162 | } else { | 
|  | 2163 | req = NULL; | 
|  | 2164 | udc_rxfifo_pending = 1; | 
|  | 2165 | } | 
|  | 2166 | VDBG(dev, "req = %p\n", req); | 
|  | 2167 | /* fifo mode */ | 
|  | 2168 | if (!use_dma) { | 
|  | 2169 |  | 
|  | 2170 | /* read fifo */ | 
|  | 2171 | if (req && udc_rxfifo_read(ep, req)) { | 
|  | 2172 | ret_val = IRQ_HANDLED; | 
|  | 2173 |  | 
|  | 2174 | /* finish */ | 
|  | 2175 | complete_req(ep, req, 0); | 
|  | 2176 | /* next request */ | 
|  | 2177 | if (!list_empty(&ep->queue) && !ep->halted) { | 
|  | 2178 | req = list_entry(ep->queue.next, | 
|  | 2179 | struct udc_request, queue); | 
|  | 2180 | } else | 
|  | 2181 | req = NULL; | 
|  | 2182 | } | 
|  | 2183 |  | 
|  | 2184 | /* DMA */ | 
|  | 2185 | } else if (!ep->cancel_transfer && req != NULL) { | 
|  | 2186 | ret_val = IRQ_HANDLED; | 
|  | 2187 |  | 
|  | 2188 | /* check for DMA done */ | 
|  | 2189 | if (!use_dma_ppb) { | 
|  | 2190 | dma_done = AMD_GETBITS(req->td_data->status, | 
|  | 2191 | UDC_DMA_OUT_STS_BS); | 
|  | 2192 | /* packet per buffer mode - rx bytes */ | 
|  | 2193 | } else { | 
|  | 2194 | /* | 
|  | 2195 | * if BNA occurred then recover desc. from | 
|  | 2196 | * BNA dummy desc. | 
|  | 2197 | */ | 
|  | 2198 | if (ep->bna_occurred) { | 
|  | 2199 | VDBG(dev, "Recover desc. from BNA dummy\n"); | 
|  | 2200 | memcpy(req->td_data, ep->bna_dummy_req->td_data, | 
|  | 2201 | sizeof(struct udc_data_dma)); | 
|  | 2202 | ep->bna_occurred = 0; | 
|  | 2203 | udc_init_bna_dummy(ep->req); | 
|  | 2204 | } | 
|  | 2205 | td = udc_get_last_dma_desc(req); | 
|  | 2206 | dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS); | 
|  | 2207 | } | 
|  | 2208 | if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) { | 
|  | 2209 | /* buffer fill mode - rx bytes */ | 
|  | 2210 | if (!use_dma_ppb) { | 
|  | 2211 | /* received number bytes */ | 
|  | 2212 | count = AMD_GETBITS(req->td_data->status, | 
|  | 2213 | UDC_DMA_OUT_STS_RXBYTES); | 
|  | 2214 | VDBG(dev, "rx bytes=%u\n", count); | 
|  | 2215 | /* packet per buffer mode - rx bytes */ | 
|  | 2216 | } else { | 
|  | 2217 | VDBG(dev, "req->td_data=%p\n", req->td_data); | 
|  | 2218 | VDBG(dev, "last desc = %p\n", td); | 
|  | 2219 | /* received number bytes */ | 
|  | 2220 | if (use_dma_ppb_du) { | 
|  | 2221 | /* every desc. counts bytes */ | 
|  | 2222 | count = udc_get_ppbdu_rxbytes(req); | 
|  | 2223 | } else { | 
|  | 2224 | /* last desc. counts bytes */ | 
|  | 2225 | count = AMD_GETBITS(td->status, | 
|  | 2226 | UDC_DMA_OUT_STS_RXBYTES); | 
|  | 2227 | if (!count && req->req.length | 
|  | 2228 | == UDC_DMA_MAXPACKET) { | 
|  | 2229 | /* | 
|  | 2230 | * on 64k packets the RXBYTES | 
|  | 2231 | * field is zero | 
|  | 2232 | */ | 
|  | 2233 | count = UDC_DMA_MAXPACKET; | 
|  | 2234 | } | 
|  | 2235 | } | 
|  | 2236 | VDBG(dev, "last desc rx bytes=%u\n", count); | 
|  | 2237 | } | 
|  | 2238 |  | 
|  | 2239 | tmp = req->req.length - req->req.actual; | 
|  | 2240 | if (count > tmp) { | 
|  | 2241 | if ((tmp % ep->ep.maxpacket) != 0) { | 
|  | 2242 | DBG(dev, "%s: rx %db, space=%db\n", | 
|  | 2243 | ep->ep.name, count, tmp); | 
|  | 2244 | req->req.status = -EOVERFLOW; | 
|  | 2245 | } | 
|  | 2246 | count = tmp; | 
|  | 2247 | } | 
|  | 2248 | req->req.actual += count; | 
|  | 2249 | req->dma_going = 0; | 
|  | 2250 | /* complete request */ | 
|  | 2251 | complete_req(ep, req, 0); | 
|  | 2252 |  | 
|  | 2253 | /* next request */ | 
|  | 2254 | if (!list_empty(&ep->queue) && !ep->halted) { | 
|  | 2255 | req = list_entry(ep->queue.next, | 
|  | 2256 | struct udc_request, | 
|  | 2257 | queue); | 
|  | 2258 | /* | 
|  | 2259 | * DMA may be already started by udc_queue() | 
|  | 2260 | * called by gadget drivers completion | 
|  | 2261 | * routine. This happens when queue | 
|  | 2262 | * holds one request only. | 
|  | 2263 | */ | 
|  | 2264 | if (req->dma_going == 0) { | 
|  | 2265 | /* next dma */ | 
|  | 2266 | if (prep_dma(ep, req, GFP_ATOMIC) != 0) | 
|  | 2267 | goto finished; | 
|  | 2268 | /* write desc pointer */ | 
|  | 2269 | writel(req->td_phys, | 
|  | 2270 | &ep->regs->desptr); | 
|  | 2271 | req->dma_going = 1; | 
|  | 2272 | /* enable DMA */ | 
|  | 2273 | udc_set_rde(dev); | 
|  | 2274 | } | 
|  | 2275 | } else { | 
|  | 2276 | /* | 
|  | 2277 | * implant BNA dummy descriptor to allow | 
|  | 2278 | * RXFIFO opening by RDE | 
|  | 2279 | */ | 
|  | 2280 | if (ep->bna_dummy_req) { | 
|  | 2281 | /* write desc pointer */ | 
|  | 2282 | writel(ep->bna_dummy_req->td_phys, | 
|  | 2283 | &ep->regs->desptr); | 
|  | 2284 | ep->bna_occurred = 0; | 
|  | 2285 | } | 
|  | 2286 |  | 
|  | 2287 | /* | 
|  | 2288 | * schedule timer for setting RDE if queue | 
|  | 2289 | * remains empty to allow ep0 packets pass | 
|  | 2290 | * through | 
|  | 2291 | */ | 
|  | 2292 | if (set_rde != 0 | 
|  | 2293 | && !timer_pending(&udc_timer)) { | 
|  | 2294 | udc_timer.expires = | 
|  | 2295 | jiffies | 
|  | 2296 | + HZ*UDC_RDE_TIMER_SECONDS; | 
|  | 2297 | set_rde = 1; | 
|  | 2298 | if (!stop_timer) { | 
|  | 2299 | add_timer(&udc_timer); | 
|  | 2300 | } | 
|  | 2301 | } | 
|  | 2302 | if (ep->num != UDC_EP0OUT_IX) | 
|  | 2303 | dev->data_ep_queued = 0; | 
|  | 2304 | } | 
|  | 2305 |  | 
|  | 2306 | } else { | 
|  | 2307 | /* | 
|  | 2308 | * RX DMA must be reenabled for each desc in PPBDU mode | 
|  | 2309 | * and must be enabled for PPBNDU mode in case of BNA | 
|  | 2310 | */ | 
|  | 2311 | udc_set_rde(dev); | 
|  | 2312 | } | 
|  | 2313 |  | 
|  | 2314 | } else if (ep->cancel_transfer) { | 
|  | 2315 | ret_val = IRQ_HANDLED; | 
|  | 2316 | ep->cancel_transfer = 0; | 
|  | 2317 | } | 
|  | 2318 |  | 
|  | 2319 | /* check pending CNAKS */ | 
|  | 2320 | if (cnak_pending) { | 
|  | 2321 | /* CNAk processing when rxfifo empty only */ | 
|  | 2322 | if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | 
|  | 2323 | udc_process_cnak_queue(dev); | 
|  | 2324 | } | 
|  | 2325 | } | 
|  | 2326 |  | 
|  | 2327 | /* clear OUT bits in ep status */ | 
|  | 2328 | writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); | 
|  | 2329 | finished: | 
|  | 2330 | return ret_val; | 
|  | 2331 | } | 
|  | 2332 |  | 
|  | 2333 | /* Interrupt handler for data IN traffic */ | 
|  | 2334 | static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) | 
|  | 2335 | { | 
|  | 2336 | irqreturn_t ret_val = IRQ_NONE; | 
|  | 2337 | u32 tmp; | 
|  | 2338 | u32 epsts; | 
|  | 2339 | struct udc_ep *ep; | 
|  | 2340 | struct udc_request *req; | 
|  | 2341 | struct udc_data_dma *td; | 
|  | 2342 | unsigned dma_done; | 
|  | 2343 | unsigned len; | 
|  | 2344 |  | 
|  | 2345 | ep = &dev->ep[ep_ix]; | 
|  | 2346 |  | 
|  | 2347 | epsts = readl(&ep->regs->sts); | 
|  | 2348 | if (use_dma) { | 
|  | 2349 | /* BNA ? */ | 
|  | 2350 | if (epsts & AMD_BIT(UDC_EPSTS_BNA)) { | 
|  | 2351 | dev_err(&dev->pdev->dev, | 
|  | 2352 | "BNA ep%din occured - DESPTR = %08lx \n", | 
|  | 2353 | ep->num, | 
|  | 2354 | (unsigned long) readl(&ep->regs->desptr)); | 
|  | 2355 |  | 
|  | 2356 | /* clear BNA */ | 
|  | 2357 | writel(epsts, &ep->regs->sts); | 
|  | 2358 | ret_val = IRQ_HANDLED; | 
|  | 2359 | goto finished; | 
|  | 2360 | } | 
|  | 2361 | } | 
|  | 2362 | /* HE event ? */ | 
|  | 2363 | if (epsts & AMD_BIT(UDC_EPSTS_HE)) { | 
|  | 2364 | dev_err(&dev->pdev->dev, | 
|  | 2365 | "HE ep%dn occured - DESPTR = %08lx \n", | 
|  | 2366 | ep->num, (unsigned long) readl(&ep->regs->desptr)); | 
|  | 2367 |  | 
|  | 2368 | /* clear HE */ | 
|  | 2369 | writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); | 
|  | 2370 | ret_val = IRQ_HANDLED; | 
|  | 2371 | goto finished; | 
|  | 2372 | } | 
|  | 2373 |  | 
|  | 2374 | /* DMA completion */ | 
|  | 2375 | if (epsts & AMD_BIT(UDC_EPSTS_TDC)) { | 
|  | 2376 | VDBG(dev, "TDC set- completion\n"); | 
|  | 2377 | ret_val = IRQ_HANDLED; | 
|  | 2378 | if (!ep->cancel_transfer && !list_empty(&ep->queue)) { | 
|  | 2379 | req = list_entry(ep->queue.next, | 
|  | 2380 | struct udc_request, queue); | 
|  | 2381 | if (req) { | 
|  | 2382 | /* | 
|  | 2383 | * length bytes transfered | 
|  | 2384 | * check dma done of last desc. in PPBDU mode | 
|  | 2385 | */ | 
|  | 2386 | if (use_dma_ppb_du) { | 
|  | 2387 | td = udc_get_last_dma_desc(req); | 
|  | 2388 | if (td) { | 
|  | 2389 | dma_done = | 
|  | 2390 | AMD_GETBITS(td->status, | 
|  | 2391 | UDC_DMA_IN_STS_BS); | 
|  | 2392 | /* don't care DMA done */ | 
|  | 2393 | req->req.actual = | 
|  | 2394 | req->req.length; | 
|  | 2395 | } | 
|  | 2396 | } else { | 
|  | 2397 | /* assume all bytes transferred */ | 
|  | 2398 | req->req.actual = req->req.length; | 
|  | 2399 | } | 
|  | 2400 |  | 
|  | 2401 | if (req->req.actual == req->req.length) { | 
|  | 2402 | /* complete req */ | 
|  | 2403 | complete_req(ep, req, 0); | 
|  | 2404 | req->dma_going = 0; | 
|  | 2405 | /* further request available ? */ | 
|  | 2406 | if (list_empty(&ep->queue)) { | 
|  | 2407 | /* disable interrupt */ | 
|  | 2408 | tmp = readl( | 
|  | 2409 | &dev->regs->ep_irqmsk); | 
|  | 2410 | tmp |= AMD_BIT(ep->num); | 
|  | 2411 | writel(tmp, | 
|  | 2412 | &dev->regs->ep_irqmsk); | 
|  | 2413 | } | 
|  | 2414 |  | 
|  | 2415 | } | 
|  | 2416 | } | 
|  | 2417 | } | 
|  | 2418 | ep->cancel_transfer = 0; | 
|  | 2419 |  | 
|  | 2420 | } | 
|  | 2421 | /* | 
|  | 2422 | * status reg has IN bit set and TDC not set (if TDC was handled, | 
|  | 2423 | * IN must not be handled (UDC defect) ? | 
|  | 2424 | */ | 
|  | 2425 | if ((epsts & AMD_BIT(UDC_EPSTS_IN)) | 
|  | 2426 | && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) { | 
|  | 2427 | ret_val = IRQ_HANDLED; | 
|  | 2428 | if (!list_empty(&ep->queue)) { | 
|  | 2429 | /* next request */ | 
|  | 2430 | req = list_entry(ep->queue.next, | 
|  | 2431 | struct udc_request, queue); | 
|  | 2432 | /* FIFO mode */ | 
|  | 2433 | if (!use_dma) { | 
|  | 2434 | /* write fifo */ | 
|  | 2435 | udc_txfifo_write(ep, &req->req); | 
|  | 2436 | len = req->req.length - req->req.actual; | 
|  | 2437 | if (len > ep->ep.maxpacket) | 
|  | 2438 | len = ep->ep.maxpacket; | 
|  | 2439 | req->req.actual += len; | 
|  | 2440 | if (req->req.actual == req->req.length | 
|  | 2441 | || (len != ep->ep.maxpacket)) { | 
|  | 2442 | /* complete req */ | 
|  | 2443 | complete_req(ep, req, 0); | 
|  | 2444 | } | 
|  | 2445 | /* DMA */ | 
|  | 2446 | } else if (req && !req->dma_going) { | 
|  | 2447 | VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", | 
|  | 2448 | req, req->td_data); | 
|  | 2449 | if (req->td_data) { | 
|  | 2450 |  | 
|  | 2451 | req->dma_going = 1; | 
|  | 2452 |  | 
|  | 2453 | /* | 
|  | 2454 | * unset L bit of first desc. | 
|  | 2455 | * for chain | 
|  | 2456 | */ | 
|  | 2457 | if (use_dma_ppb && req->req.length > | 
|  | 2458 | ep->ep.maxpacket) { | 
|  | 2459 | req->td_data->status &= | 
|  | 2460 | AMD_CLEAR_BIT( | 
|  | 2461 | UDC_DMA_IN_STS_L); | 
|  | 2462 | } | 
|  | 2463 |  | 
|  | 2464 | /* write desc pointer */ | 
|  | 2465 | writel(req->td_phys, &ep->regs->desptr); | 
|  | 2466 |  | 
|  | 2467 | /* set HOST READY */ | 
|  | 2468 | req->td_data->status = | 
|  | 2469 | AMD_ADDBITS( | 
|  | 2470 | req->td_data->status, | 
|  | 2471 | UDC_DMA_IN_STS_BS_HOST_READY, | 
|  | 2472 | UDC_DMA_IN_STS_BS); | 
|  | 2473 |  | 
|  | 2474 | /* set poll demand bit */ | 
|  | 2475 | tmp = readl(&ep->regs->ctl); | 
|  | 2476 | tmp |= AMD_BIT(UDC_EPCTL_P); | 
|  | 2477 | writel(tmp, &ep->regs->ctl); | 
|  | 2478 | } | 
|  | 2479 | } | 
|  | 2480 |  | 
|  | 2481 | } | 
|  | 2482 | } | 
|  | 2483 | /* clear status bits */ | 
|  | 2484 | writel(epsts, &ep->regs->sts); | 
|  | 2485 |  | 
|  | 2486 | finished: | 
|  | 2487 | return ret_val; | 
|  | 2488 |  | 
|  | 2489 | } | 
|  | 2490 |  | 
|  | 2491 | /* Interrupt handler for Control OUT traffic */ | 
|  | 2492 | static irqreturn_t udc_control_out_isr(struct udc *dev) | 
|  | 2493 | __releases(dev->lock) | 
|  | 2494 | __acquires(dev->lock) | 
|  | 2495 | { | 
|  | 2496 | irqreturn_t ret_val = IRQ_NONE; | 
|  | 2497 | u32 tmp; | 
|  | 2498 | int setup_supported; | 
|  | 2499 | u32 count; | 
|  | 2500 | int set = 0; | 
|  | 2501 | struct udc_ep	*ep; | 
|  | 2502 | struct udc_ep	*ep_tmp; | 
|  | 2503 |  | 
|  | 2504 | ep = &dev->ep[UDC_EP0OUT_IX]; | 
|  | 2505 |  | 
|  | 2506 | /* clear irq */ | 
|  | 2507 | writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts); | 
|  | 2508 |  | 
|  | 2509 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); | 
|  | 2510 | /* check BNA and clear if set */ | 
|  | 2511 | if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { | 
|  | 2512 | VDBG(dev, "ep0: BNA set\n"); | 
|  | 2513 | writel(AMD_BIT(UDC_EPSTS_BNA), | 
|  | 2514 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | 
|  | 2515 | ep->bna_occurred = 1; | 
|  | 2516 | ret_val = IRQ_HANDLED; | 
|  | 2517 | goto finished; | 
|  | 2518 | } | 
|  | 2519 |  | 
|  | 2520 | /* type of data: SETUP or DATA 0 bytes */ | 
|  | 2521 | tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT); | 
|  | 2522 | VDBG(dev, "data_typ = %x\n", tmp); | 
|  | 2523 |  | 
|  | 2524 | /* setup data */ | 
|  | 2525 | if (tmp == UDC_EPSTS_OUT_SETUP) { | 
|  | 2526 | ret_val = IRQ_HANDLED; | 
|  | 2527 |  | 
|  | 2528 | ep->dev->stall_ep0in = 0; | 
|  | 2529 | dev->waiting_zlp_ack_ep0in = 0; | 
|  | 2530 |  | 
|  | 2531 | /* set NAK for EP0_IN */ | 
|  | 2532 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2533 | tmp |= AMD_BIT(UDC_EPCTL_SNAK); | 
|  | 2534 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2535 | dev->ep[UDC_EP0IN_IX].naking = 1; | 
|  | 2536 | /* get setup data */ | 
|  | 2537 | if (use_dma) { | 
|  | 2538 |  | 
|  | 2539 | /* clear OUT bits in ep status */ | 
|  | 2540 | writel(UDC_EPSTS_OUT_CLEAR, | 
|  | 2541 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | 
|  | 2542 |  | 
|  | 2543 | setup_data.data[0] = | 
|  | 2544 | dev->ep[UDC_EP0OUT_IX].td_stp->data12; | 
|  | 2545 | setup_data.data[1] = | 
|  | 2546 | dev->ep[UDC_EP0OUT_IX].td_stp->data34; | 
|  | 2547 | /* set HOST READY */ | 
|  | 2548 | dev->ep[UDC_EP0OUT_IX].td_stp->status = | 
|  | 2549 | UDC_DMA_STP_STS_BS_HOST_READY; | 
|  | 2550 | } else { | 
|  | 2551 | /* read fifo */ | 
|  | 2552 | udc_rxfifo_read_dwords(dev, setup_data.data, 2); | 
|  | 2553 | } | 
|  | 2554 |  | 
|  | 2555 | /* determine direction of control data */ | 
|  | 2556 | if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) { | 
|  | 2557 | dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; | 
|  | 2558 | /* enable RDE */ | 
|  | 2559 | udc_ep0_set_rde(dev); | 
|  | 2560 | set = 0; | 
|  | 2561 | } else { | 
|  | 2562 | dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep; | 
|  | 2563 | /* | 
|  | 2564 | * implant BNA dummy descriptor to allow RXFIFO opening | 
|  | 2565 | * by RDE | 
|  | 2566 | */ | 
|  | 2567 | if (ep->bna_dummy_req) { | 
|  | 2568 | /* write desc pointer */ | 
|  | 2569 | writel(ep->bna_dummy_req->td_phys, | 
|  | 2570 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | 
|  | 2571 | ep->bna_occurred = 0; | 
|  | 2572 | } | 
|  | 2573 |  | 
|  | 2574 | set = 1; | 
|  | 2575 | dev->ep[UDC_EP0OUT_IX].naking = 1; | 
|  | 2576 | /* | 
|  | 2577 | * setup timer for enabling RDE (to not enable | 
|  | 2578 | * RXFIFO DMA for data to early) | 
|  | 2579 | */ | 
|  | 2580 | set_rde = 1; | 
|  | 2581 | if (!timer_pending(&udc_timer)) { | 
|  | 2582 | udc_timer.expires = jiffies + | 
|  | 2583 | HZ/UDC_RDE_TIMER_DIV; | 
|  | 2584 | if (!stop_timer) { | 
|  | 2585 | add_timer(&udc_timer); | 
|  | 2586 | } | 
|  | 2587 | } | 
|  | 2588 | } | 
|  | 2589 |  | 
|  | 2590 | /* | 
|  | 2591 | * mass storage reset must be processed here because | 
|  | 2592 | * next packet may be a CLEAR_FEATURE HALT which would not | 
|  | 2593 | * clear the stall bit when no STALL handshake was received | 
|  | 2594 | * before (autostall can cause this) | 
|  | 2595 | */ | 
|  | 2596 | if (setup_data.data[0] == UDC_MSCRES_DWORD0 | 
|  | 2597 | && setup_data.data[1] == UDC_MSCRES_DWORD1) { | 
|  | 2598 | DBG(dev, "MSC Reset\n"); | 
|  | 2599 | /* | 
|  | 2600 | * clear stall bits | 
|  | 2601 | * only one IN and OUT endpoints are handled | 
|  | 2602 | */ | 
|  | 2603 | ep_tmp = &udc->ep[UDC_EPIN_IX]; | 
|  | 2604 | udc_set_halt(&ep_tmp->ep, 0); | 
|  | 2605 | ep_tmp = &udc->ep[UDC_EPOUT_IX]; | 
|  | 2606 | udc_set_halt(&ep_tmp->ep, 0); | 
|  | 2607 | } | 
|  | 2608 |  | 
|  | 2609 | /* call gadget with setup data received */ | 
|  | 2610 | spin_unlock(&dev->lock); | 
|  | 2611 | setup_supported = dev->driver->setup(&dev->gadget, | 
|  | 2612 | &setup_data.request); | 
|  | 2613 | spin_lock(&dev->lock); | 
|  | 2614 |  | 
|  | 2615 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2616 | /* ep0 in returns data (not zlp) on IN phase */ | 
|  | 2617 | if (setup_supported >= 0 && setup_supported < | 
|  | 2618 | UDC_EP0IN_MAXPACKET) { | 
|  | 2619 | /* clear NAK by writing CNAK in EP0_IN */ | 
|  | 2620 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 2621 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2622 | dev->ep[UDC_EP0IN_IX].naking = 0; | 
|  | 2623 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); | 
|  | 2624 |  | 
|  | 2625 | /* if unsupported request then stall */ | 
|  | 2626 | } else if (setup_supported < 0) { | 
|  | 2627 | tmp |= AMD_BIT(UDC_EPCTL_S); | 
|  | 2628 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2629 | } else | 
|  | 2630 | dev->waiting_zlp_ack_ep0in = 1; | 
|  | 2631 |  | 
|  | 2632 |  | 
|  | 2633 | /* clear NAK by writing CNAK in EP0_OUT */ | 
|  | 2634 | if (!set) { | 
|  | 2635 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | 
|  | 2636 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | 
|  | 2637 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | 
|  | 2638 | dev->ep[UDC_EP0OUT_IX].naking = 0; | 
|  | 2639 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); | 
|  | 2640 | } | 
|  | 2641 |  | 
|  | 2642 | if (!use_dma) { | 
|  | 2643 | /* clear OUT bits in ep status */ | 
|  | 2644 | writel(UDC_EPSTS_OUT_CLEAR, | 
|  | 2645 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | 
|  | 2646 | } | 
|  | 2647 |  | 
|  | 2648 | /* data packet 0 bytes */ | 
|  | 2649 | } else if (tmp == UDC_EPSTS_OUT_DATA) { | 
|  | 2650 | /* clear OUT bits in ep status */ | 
|  | 2651 | writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); | 
|  | 2652 |  | 
|  | 2653 | /* get setup data: only 0 packet */ | 
|  | 2654 | if (use_dma) { | 
|  | 2655 | /* no req if 0 packet, just reactivate */ | 
|  | 2656 | if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) { | 
|  | 2657 | VDBG(dev, "ZLP\n"); | 
|  | 2658 |  | 
|  | 2659 | /* set HOST READY */ | 
|  | 2660 | dev->ep[UDC_EP0OUT_IX].td->status = | 
|  | 2661 | AMD_ADDBITS( | 
|  | 2662 | dev->ep[UDC_EP0OUT_IX].td->status, | 
|  | 2663 | UDC_DMA_OUT_STS_BS_HOST_READY, | 
|  | 2664 | UDC_DMA_OUT_STS_BS); | 
|  | 2665 | /* enable RDE */ | 
|  | 2666 | udc_ep0_set_rde(dev); | 
|  | 2667 | ret_val = IRQ_HANDLED; | 
|  | 2668 |  | 
|  | 2669 | } else { | 
|  | 2670 | /* control write */ | 
|  | 2671 | ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); | 
|  | 2672 | /* re-program desc. pointer for possible ZLPs */ | 
|  | 2673 | writel(dev->ep[UDC_EP0OUT_IX].td_phys, | 
|  | 2674 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | 
|  | 2675 | /* enable RDE */ | 
|  | 2676 | udc_ep0_set_rde(dev); | 
|  | 2677 | } | 
|  | 2678 | } else { | 
|  | 2679 |  | 
|  | 2680 | /* received number bytes */ | 
|  | 2681 | count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); | 
|  | 2682 | count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE); | 
|  | 2683 | /* out data for fifo mode not working */ | 
|  | 2684 | count = 0; | 
|  | 2685 |  | 
|  | 2686 | /* 0 packet or real data ? */ | 
|  | 2687 | if (count != 0) { | 
|  | 2688 | ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); | 
|  | 2689 | } else { | 
|  | 2690 | /* dummy read confirm */ | 
|  | 2691 | readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm); | 
|  | 2692 | ret_val = IRQ_HANDLED; | 
|  | 2693 | } | 
|  | 2694 | } | 
|  | 2695 | } | 
|  | 2696 |  | 
|  | 2697 | /* check pending CNAKS */ | 
|  | 2698 | if (cnak_pending) { | 
|  | 2699 | /* CNAk processing when rxfifo empty only */ | 
|  | 2700 | if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | 
|  | 2701 | udc_process_cnak_queue(dev); | 
|  | 2702 | } | 
|  | 2703 | } | 
|  | 2704 |  | 
|  | 2705 | finished: | 
|  | 2706 | return ret_val; | 
|  | 2707 | } | 
|  | 2708 |  | 
|  | 2709 | /* Interrupt handler for Control IN traffic */ | 
|  | 2710 | static irqreturn_t udc_control_in_isr(struct udc *dev) | 
|  | 2711 | { | 
|  | 2712 | irqreturn_t ret_val = IRQ_NONE; | 
|  | 2713 | u32 tmp; | 
|  | 2714 | struct udc_ep *ep; | 
|  | 2715 | struct udc_request *req; | 
|  | 2716 | unsigned len; | 
|  | 2717 |  | 
|  | 2718 | ep = &dev->ep[UDC_EP0IN_IX]; | 
|  | 2719 |  | 
|  | 2720 | /* clear irq */ | 
|  | 2721 | writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts); | 
|  | 2722 |  | 
|  | 2723 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); | 
|  | 2724 | /* DMA completion */ | 
|  | 2725 | if (tmp & AMD_BIT(UDC_EPSTS_TDC)) { | 
|  | 2726 | VDBG(dev, "isr: TDC clear \n"); | 
|  | 2727 | ret_val = IRQ_HANDLED; | 
|  | 2728 |  | 
|  | 2729 | /* clear TDC bit */ | 
|  | 2730 | writel(AMD_BIT(UDC_EPSTS_TDC), | 
|  | 2731 | &dev->ep[UDC_EP0IN_IX].regs->sts); | 
|  | 2732 |  | 
|  | 2733 | /* status reg has IN bit set ? */ | 
|  | 2734 | } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) { | 
|  | 2735 | ret_val = IRQ_HANDLED; | 
|  | 2736 |  | 
|  | 2737 | if (ep->dma) { | 
|  | 2738 | /* clear IN bit */ | 
|  | 2739 | writel(AMD_BIT(UDC_EPSTS_IN), | 
|  | 2740 | &dev->ep[UDC_EP0IN_IX].regs->sts); | 
|  | 2741 | } | 
|  | 2742 | if (dev->stall_ep0in) { | 
|  | 2743 | DBG(dev, "stall ep0in\n"); | 
|  | 2744 | /* halt ep0in */ | 
|  | 2745 | tmp = readl(&ep->regs->ctl); | 
|  | 2746 | tmp |= AMD_BIT(UDC_EPCTL_S); | 
|  | 2747 | writel(tmp, &ep->regs->ctl); | 
|  | 2748 | } else { | 
|  | 2749 | if (!list_empty(&ep->queue)) { | 
|  | 2750 | /* next request */ | 
|  | 2751 | req = list_entry(ep->queue.next, | 
|  | 2752 | struct udc_request, queue); | 
|  | 2753 |  | 
|  | 2754 | if (ep->dma) { | 
|  | 2755 | /* write desc pointer */ | 
|  | 2756 | writel(req->td_phys, &ep->regs->desptr); | 
|  | 2757 | /* set HOST READY */ | 
|  | 2758 | req->td_data->status = | 
|  | 2759 | AMD_ADDBITS( | 
|  | 2760 | req->td_data->status, | 
|  | 2761 | UDC_DMA_STP_STS_BS_HOST_READY, | 
|  | 2762 | UDC_DMA_STP_STS_BS); | 
|  | 2763 |  | 
|  | 2764 | /* set poll demand bit */ | 
|  | 2765 | tmp = | 
|  | 2766 | readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2767 | tmp |= AMD_BIT(UDC_EPCTL_P); | 
|  | 2768 | writel(tmp, | 
|  | 2769 | &dev->ep[UDC_EP0IN_IX].regs->ctl); | 
|  | 2770 |  | 
|  | 2771 | /* all bytes will be transferred */ | 
|  | 2772 | req->req.actual = req->req.length; | 
|  | 2773 |  | 
|  | 2774 | /* complete req */ | 
|  | 2775 | complete_req(ep, req, 0); | 
|  | 2776 |  | 
|  | 2777 | } else { | 
|  | 2778 | /* write fifo */ | 
|  | 2779 | udc_txfifo_write(ep, &req->req); | 
|  | 2780 |  | 
|  | 2781 | /* lengh bytes transfered */ | 
|  | 2782 | len = req->req.length - req->req.actual; | 
|  | 2783 | if (len > ep->ep.maxpacket) | 
|  | 2784 | len = ep->ep.maxpacket; | 
|  | 2785 |  | 
|  | 2786 | req->req.actual += len; | 
|  | 2787 | if (req->req.actual == req->req.length | 
|  | 2788 | || (len != ep->ep.maxpacket)) { | 
|  | 2789 | /* complete req */ | 
|  | 2790 | complete_req(ep, req, 0); | 
|  | 2791 | } | 
|  | 2792 | } | 
|  | 2793 |  | 
|  | 2794 | } | 
|  | 2795 | } | 
|  | 2796 | ep->halted = 0; | 
|  | 2797 | dev->stall_ep0in = 0; | 
|  | 2798 | if (!ep->dma) { | 
|  | 2799 | /* clear IN bit */ | 
|  | 2800 | writel(AMD_BIT(UDC_EPSTS_IN), | 
|  | 2801 | &dev->ep[UDC_EP0IN_IX].regs->sts); | 
|  | 2802 | } | 
|  | 2803 | } | 
|  | 2804 |  | 
|  | 2805 | return ret_val; | 
|  | 2806 | } | 
|  | 2807 |  | 
|  | 2808 |  | 
|  | 2809 | /* Interrupt handler for global device events */ | 
|  | 2810 | static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq) | 
|  | 2811 | __releases(dev->lock) | 
|  | 2812 | __acquires(dev->lock) | 
|  | 2813 | { | 
|  | 2814 | irqreturn_t ret_val = IRQ_NONE; | 
|  | 2815 | u32 tmp; | 
|  | 2816 | u32 cfg; | 
|  | 2817 | struct udc_ep *ep; | 
|  | 2818 | u16 i; | 
|  | 2819 | u8 udc_csr_epix; | 
|  | 2820 |  | 
|  | 2821 | /* SET_CONFIG irq ? */ | 
|  | 2822 | if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) { | 
|  | 2823 | ret_val = IRQ_HANDLED; | 
|  | 2824 |  | 
|  | 2825 | /* read config value */ | 
|  | 2826 | tmp = readl(&dev->regs->sts); | 
|  | 2827 | cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG); | 
|  | 2828 | DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg); | 
|  | 2829 | dev->cur_config = cfg; | 
|  | 2830 | dev->set_cfg_not_acked = 1; | 
|  | 2831 |  | 
|  | 2832 | /* make usb request for gadget driver */ | 
|  | 2833 | memset(&setup_data, 0 , sizeof(union udc_setup_data)); | 
|  | 2834 | setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION; | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 2835 | setup_data.request.wValue = cpu_to_le16(dev->cur_config); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 2836 |  | 
|  | 2837 | /* programm the NE registers */ | 
|  | 2838 | for (i = 0; i < UDC_EP_NUM; i++) { | 
|  | 2839 | ep = &dev->ep[i]; | 
|  | 2840 | if (ep->in) { | 
|  | 2841 |  | 
|  | 2842 | /* ep ix in UDC CSR register space */ | 
|  | 2843 | udc_csr_epix = ep->num; | 
|  | 2844 |  | 
|  | 2845 |  | 
|  | 2846 | /* OUT ep */ | 
|  | 2847 | } else { | 
|  | 2848 | /* ep ix in UDC CSR register space */ | 
|  | 2849 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | 
|  | 2850 | } | 
|  | 2851 |  | 
|  | 2852 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | 
|  | 2853 | /* ep cfg */ | 
|  | 2854 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, | 
|  | 2855 | UDC_CSR_NE_CFG); | 
|  | 2856 | /* write reg */ | 
|  | 2857 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | 
|  | 2858 |  | 
|  | 2859 | /* clear stall bits */ | 
|  | 2860 | ep->halted = 0; | 
|  | 2861 | tmp = readl(&ep->regs->ctl); | 
|  | 2862 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | 
|  | 2863 | writel(tmp, &ep->regs->ctl); | 
|  | 2864 | } | 
|  | 2865 | /* call gadget zero with setup data received */ | 
|  | 2866 | spin_unlock(&dev->lock); | 
|  | 2867 | tmp = dev->driver->setup(&dev->gadget, &setup_data.request); | 
|  | 2868 | spin_lock(&dev->lock); | 
|  | 2869 |  | 
|  | 2870 | } /* SET_INTERFACE ? */ | 
|  | 2871 | if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) { | 
|  | 2872 | ret_val = IRQ_HANDLED; | 
|  | 2873 |  | 
|  | 2874 | dev->set_cfg_not_acked = 1; | 
|  | 2875 | /* read interface and alt setting values */ | 
|  | 2876 | tmp = readl(&dev->regs->sts); | 
|  | 2877 | dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT); | 
|  | 2878 | dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF); | 
|  | 2879 |  | 
|  | 2880 | /* make usb request for gadget driver */ | 
|  | 2881 | memset(&setup_data, 0 , sizeof(union udc_setup_data)); | 
|  | 2882 | setup_data.request.bRequest = USB_REQ_SET_INTERFACE; | 
|  | 2883 | setup_data.request.bRequestType = USB_RECIP_INTERFACE; | 
| Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 2884 | setup_data.request.wValue = cpu_to_le16(dev->cur_alt); | 
|  | 2885 | setup_data.request.wIndex = cpu_to_le16(dev->cur_intf); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 2886 |  | 
|  | 2887 | DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n", | 
|  | 2888 | dev->cur_alt, dev->cur_intf); | 
|  | 2889 |  | 
|  | 2890 | /* programm the NE registers */ | 
|  | 2891 | for (i = 0; i < UDC_EP_NUM; i++) { | 
|  | 2892 | ep = &dev->ep[i]; | 
|  | 2893 | if (ep->in) { | 
|  | 2894 |  | 
|  | 2895 | /* ep ix in UDC CSR register space */ | 
|  | 2896 | udc_csr_epix = ep->num; | 
|  | 2897 |  | 
|  | 2898 |  | 
|  | 2899 | /* OUT ep */ | 
|  | 2900 | } else { | 
|  | 2901 | /* ep ix in UDC CSR register space */ | 
|  | 2902 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | 
|  | 2903 | } | 
|  | 2904 |  | 
|  | 2905 | /* UDC CSR reg */ | 
|  | 2906 | /* set ep values */ | 
|  | 2907 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | 
|  | 2908 | /* ep interface */ | 
|  | 2909 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, | 
|  | 2910 | UDC_CSR_NE_INTF); | 
|  | 2911 | /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */ | 
|  | 2912 | /* ep alt */ | 
|  | 2913 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, | 
|  | 2914 | UDC_CSR_NE_ALT); | 
|  | 2915 | /* write reg */ | 
|  | 2916 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | 
|  | 2917 |  | 
|  | 2918 | /* clear stall bits */ | 
|  | 2919 | ep->halted = 0; | 
|  | 2920 | tmp = readl(&ep->regs->ctl); | 
|  | 2921 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | 
|  | 2922 | writel(tmp, &ep->regs->ctl); | 
|  | 2923 | } | 
|  | 2924 |  | 
|  | 2925 | /* call gadget zero with setup data received */ | 
|  | 2926 | spin_unlock(&dev->lock); | 
|  | 2927 | tmp = dev->driver->setup(&dev->gadget, &setup_data.request); | 
|  | 2928 | spin_lock(&dev->lock); | 
|  | 2929 |  | 
|  | 2930 | } /* USB reset */ | 
|  | 2931 | if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) { | 
|  | 2932 | DBG(dev, "USB Reset interrupt\n"); | 
|  | 2933 | ret_val = IRQ_HANDLED; | 
|  | 2934 |  | 
|  | 2935 | /* allow soft reset when suspend occurs */ | 
|  | 2936 | soft_reset_occured = 0; | 
|  | 2937 |  | 
|  | 2938 | dev->waiting_zlp_ack_ep0in = 0; | 
|  | 2939 | dev->set_cfg_not_acked = 0; | 
|  | 2940 |  | 
|  | 2941 | /* mask not needed interrupts */ | 
|  | 2942 | udc_mask_unused_interrupts(dev); | 
|  | 2943 |  | 
|  | 2944 | /* call gadget to resume and reset configs etc. */ | 
|  | 2945 | spin_unlock(&dev->lock); | 
|  | 2946 | if (dev->sys_suspended && dev->driver->resume) { | 
|  | 2947 | dev->driver->resume(&dev->gadget); | 
|  | 2948 | dev->sys_suspended = 0; | 
|  | 2949 | } | 
|  | 2950 | dev->driver->disconnect(&dev->gadget); | 
|  | 2951 | spin_lock(&dev->lock); | 
|  | 2952 |  | 
|  | 2953 | /* disable ep0 to empty req queue */ | 
|  | 2954 | empty_req_queue(&dev->ep[UDC_EP0IN_IX]); | 
|  | 2955 | ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); | 
|  | 2956 |  | 
|  | 2957 | /* soft reset when rxfifo not empty */ | 
|  | 2958 | tmp = readl(&dev->regs->sts); | 
|  | 2959 | if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) | 
|  | 2960 | && !soft_reset_after_usbreset_occured) { | 
|  | 2961 | udc_soft_reset(dev); | 
|  | 2962 | soft_reset_after_usbreset_occured++; | 
|  | 2963 | } | 
|  | 2964 |  | 
|  | 2965 | /* | 
|  | 2966 | * DMA reset to kill potential old DMA hw hang, | 
|  | 2967 | * POLL bit is already reset by ep_init() through | 
|  | 2968 | * disconnect() | 
|  | 2969 | */ | 
|  | 2970 | DBG(dev, "DMA machine reset\n"); | 
|  | 2971 | tmp = readl(&dev->regs->cfg); | 
|  | 2972 | writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg); | 
|  | 2973 | writel(tmp, &dev->regs->cfg); | 
|  | 2974 |  | 
|  | 2975 | /* put into initial config */ | 
|  | 2976 | udc_basic_init(dev); | 
|  | 2977 |  | 
|  | 2978 | /* enable device setup interrupts */ | 
|  | 2979 | udc_enable_dev_setup_interrupts(dev); | 
|  | 2980 |  | 
|  | 2981 | /* enable suspend interrupt */ | 
|  | 2982 | tmp = readl(&dev->regs->irqmsk); | 
|  | 2983 | tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US); | 
|  | 2984 | writel(tmp, &dev->regs->irqmsk); | 
|  | 2985 |  | 
|  | 2986 | } /* USB suspend */ | 
|  | 2987 | if (dev_irq & AMD_BIT(UDC_DEVINT_US)) { | 
|  | 2988 | DBG(dev, "USB Suspend interrupt\n"); | 
|  | 2989 | ret_val = IRQ_HANDLED; | 
|  | 2990 | if (dev->driver->suspend) { | 
|  | 2991 | spin_unlock(&dev->lock); | 
|  | 2992 | dev->sys_suspended = 1; | 
|  | 2993 | dev->driver->suspend(&dev->gadget); | 
|  | 2994 | spin_lock(&dev->lock); | 
|  | 2995 | } | 
|  | 2996 | } /* new speed ? */ | 
|  | 2997 | if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) { | 
|  | 2998 | DBG(dev, "ENUM interrupt\n"); | 
|  | 2999 | ret_val = IRQ_HANDLED; | 
|  | 3000 | soft_reset_after_usbreset_occured = 0; | 
|  | 3001 |  | 
|  | 3002 | /* disable ep0 to empty req queue */ | 
|  | 3003 | empty_req_queue(&dev->ep[UDC_EP0IN_IX]); | 
|  | 3004 | ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); | 
|  | 3005 |  | 
|  | 3006 | /* link up all endpoints */ | 
|  | 3007 | udc_setup_endpoints(dev); | 
|  | 3008 | if (dev->gadget.speed == USB_SPEED_HIGH) { | 
|  | 3009 | dev_info(&dev->pdev->dev, "Connect: speed = %s\n", | 
|  | 3010 | "high"); | 
|  | 3011 | } else if (dev->gadget.speed == USB_SPEED_FULL) { | 
|  | 3012 | dev_info(&dev->pdev->dev, "Connect: speed = %s\n", | 
|  | 3013 | "full"); | 
|  | 3014 | } | 
|  | 3015 |  | 
|  | 3016 | /* init ep 0 */ | 
|  | 3017 | activate_control_endpoints(dev); | 
|  | 3018 |  | 
|  | 3019 | /* enable ep0 interrupts */ | 
|  | 3020 | udc_enable_ep0_interrupts(dev); | 
|  | 3021 | } | 
|  | 3022 | /* session valid change interrupt */ | 
|  | 3023 | if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) { | 
|  | 3024 | DBG(dev, "USB SVC interrupt\n"); | 
|  | 3025 | ret_val = IRQ_HANDLED; | 
|  | 3026 |  | 
|  | 3027 | /* check that session is not valid to detect disconnect */ | 
|  | 3028 | tmp = readl(&dev->regs->sts); | 
|  | 3029 | if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) { | 
|  | 3030 | /* disable suspend interrupt */ | 
|  | 3031 | tmp = readl(&dev->regs->irqmsk); | 
|  | 3032 | tmp |= AMD_BIT(UDC_DEVINT_US); | 
|  | 3033 | writel(tmp, &dev->regs->irqmsk); | 
|  | 3034 | DBG(dev, "USB Disconnect (session valid low)\n"); | 
|  | 3035 | /* cleanup on disconnect */ | 
|  | 3036 | usb_disconnect(udc); | 
|  | 3037 | } | 
|  | 3038 |  | 
|  | 3039 | } | 
|  | 3040 |  | 
|  | 3041 | return ret_val; | 
|  | 3042 | } | 
|  | 3043 |  | 
|  | 3044 | /* Interrupt Service Routine, see Linux Kernel Doc for parameters */ | 
|  | 3045 | static irqreturn_t udc_irq(int irq, void *pdev) | 
|  | 3046 | { | 
|  | 3047 | struct udc *dev = pdev; | 
|  | 3048 | u32 reg; | 
|  | 3049 | u16 i; | 
|  | 3050 | u32 ep_irq; | 
|  | 3051 | irqreturn_t ret_val = IRQ_NONE; | 
|  | 3052 |  | 
|  | 3053 | spin_lock(&dev->lock); | 
|  | 3054 |  | 
|  | 3055 | /* check for ep irq */ | 
|  | 3056 | reg = readl(&dev->regs->ep_irqsts); | 
|  | 3057 | if (reg) { | 
|  | 3058 | if (reg & AMD_BIT(UDC_EPINT_OUT_EP0)) | 
|  | 3059 | ret_val |= udc_control_out_isr(dev); | 
|  | 3060 | if (reg & AMD_BIT(UDC_EPINT_IN_EP0)) | 
|  | 3061 | ret_val |= udc_control_in_isr(dev); | 
|  | 3062 |  | 
|  | 3063 | /* | 
|  | 3064 | * data endpoint | 
|  | 3065 | * iterate ep's | 
|  | 3066 | */ | 
|  | 3067 | for (i = 1; i < UDC_EP_NUM; i++) { | 
|  | 3068 | ep_irq = 1 << i; | 
|  | 3069 | if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0) | 
|  | 3070 | continue; | 
|  | 3071 |  | 
|  | 3072 | /* clear irq status */ | 
|  | 3073 | writel(ep_irq, &dev->regs->ep_irqsts); | 
|  | 3074 |  | 
|  | 3075 | /* irq for out ep ? */ | 
|  | 3076 | if (i > UDC_EPIN_NUM) | 
|  | 3077 | ret_val |= udc_data_out_isr(dev, i); | 
|  | 3078 | else | 
|  | 3079 | ret_val |= udc_data_in_isr(dev, i); | 
|  | 3080 | } | 
|  | 3081 |  | 
|  | 3082 | } | 
|  | 3083 |  | 
|  | 3084 |  | 
|  | 3085 | /* check for dev irq */ | 
|  | 3086 | reg = readl(&dev->regs->irqsts); | 
|  | 3087 | if (reg) { | 
|  | 3088 | /* clear irq */ | 
|  | 3089 | writel(reg, &dev->regs->irqsts); | 
|  | 3090 | ret_val |= udc_dev_isr(dev, reg); | 
|  | 3091 | } | 
|  | 3092 |  | 
|  | 3093 |  | 
|  | 3094 | spin_unlock(&dev->lock); | 
|  | 3095 | return ret_val; | 
|  | 3096 | } | 
|  | 3097 |  | 
|  | 3098 | /* Tears down device */ | 
|  | 3099 | static void gadget_release(struct device *pdev) | 
|  | 3100 | { | 
|  | 3101 | struct amd5536udc *dev = dev_get_drvdata(pdev); | 
|  | 3102 | kfree(dev); | 
|  | 3103 | } | 
|  | 3104 |  | 
|  | 3105 | /* Cleanup on device remove */ | 
|  | 3106 | static void udc_remove(struct udc *dev) | 
|  | 3107 | { | 
|  | 3108 | /* remove timer */ | 
|  | 3109 | stop_timer++; | 
|  | 3110 | if (timer_pending(&udc_timer)) | 
|  | 3111 | wait_for_completion(&on_exit); | 
|  | 3112 | if (udc_timer.data) | 
|  | 3113 | del_timer_sync(&udc_timer); | 
|  | 3114 | /* remove pollstall timer */ | 
|  | 3115 | stop_pollstall_timer++; | 
|  | 3116 | if (timer_pending(&udc_pollstall_timer)) | 
|  | 3117 | wait_for_completion(&on_pollstall_exit); | 
|  | 3118 | if (udc_pollstall_timer.data) | 
|  | 3119 | del_timer_sync(&udc_pollstall_timer); | 
|  | 3120 | udc = NULL; | 
|  | 3121 | } | 
|  | 3122 |  | 
|  | 3123 | /* Reset all pci context */ | 
|  | 3124 | static void udc_pci_remove(struct pci_dev *pdev) | 
|  | 3125 | { | 
|  | 3126 | struct udc		*dev; | 
|  | 3127 |  | 
|  | 3128 | dev = pci_get_drvdata(pdev); | 
|  | 3129 |  | 
|  | 3130 | /* gadget driver must not be registered */ | 
|  | 3131 | BUG_ON(dev->driver != NULL); | 
|  | 3132 |  | 
|  | 3133 | /* dma pool cleanup */ | 
|  | 3134 | if (dev->data_requests) | 
|  | 3135 | pci_pool_destroy(dev->data_requests); | 
|  | 3136 |  | 
|  | 3137 | if (dev->stp_requests) { | 
|  | 3138 | /* cleanup DMA desc's for ep0in */ | 
|  | 3139 | pci_pool_free(dev->stp_requests, | 
|  | 3140 | dev->ep[UDC_EP0OUT_IX].td_stp, | 
|  | 3141 | dev->ep[UDC_EP0OUT_IX].td_stp_dma); | 
|  | 3142 | pci_pool_free(dev->stp_requests, | 
|  | 3143 | dev->ep[UDC_EP0OUT_IX].td, | 
|  | 3144 | dev->ep[UDC_EP0OUT_IX].td_phys); | 
|  | 3145 |  | 
|  | 3146 | pci_pool_destroy(dev->stp_requests); | 
|  | 3147 | } | 
|  | 3148 |  | 
|  | 3149 | /* reset controller */ | 
|  | 3150 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | 
|  | 3151 | if (dev->irq_registered) | 
|  | 3152 | free_irq(pdev->irq, dev); | 
|  | 3153 | if (dev->regs) | 
|  | 3154 | iounmap(dev->regs); | 
|  | 3155 | if (dev->mem_region) | 
|  | 3156 | release_mem_region(pci_resource_start(pdev, 0), | 
|  | 3157 | pci_resource_len(pdev, 0)); | 
|  | 3158 | if (dev->active) | 
|  | 3159 | pci_disable_device(pdev); | 
|  | 3160 |  | 
|  | 3161 | device_unregister(&dev->gadget.dev); | 
|  | 3162 | pci_set_drvdata(pdev, NULL); | 
|  | 3163 |  | 
|  | 3164 | udc_remove(dev); | 
|  | 3165 | } | 
|  | 3166 |  | 
|  | 3167 | /* create dma pools on init */ | 
|  | 3168 | static int init_dma_pools(struct udc *dev) | 
|  | 3169 | { | 
|  | 3170 | struct udc_stp_dma	*td_stp; | 
|  | 3171 | struct udc_data_dma	*td_data; | 
|  | 3172 | int retval; | 
|  | 3173 |  | 
|  | 3174 | /* consistent DMA mode setting ? */ | 
|  | 3175 | if (use_dma_ppb) { | 
|  | 3176 | use_dma_bufferfill_mode = 0; | 
|  | 3177 | } else { | 
|  | 3178 | use_dma_ppb_du = 0; | 
|  | 3179 | use_dma_bufferfill_mode = 1; | 
|  | 3180 | } | 
|  | 3181 |  | 
|  | 3182 | /* DMA setup */ | 
|  | 3183 | dev->data_requests = dma_pool_create("data_requests", NULL, | 
|  | 3184 | sizeof(struct udc_data_dma), 0, 0); | 
|  | 3185 | if (!dev->data_requests) { | 
|  | 3186 | DBG(dev, "can't get request data pool\n"); | 
|  | 3187 | retval = -ENOMEM; | 
|  | 3188 | goto finished; | 
|  | 3189 | } | 
|  | 3190 |  | 
|  | 3191 | /* EP0 in dma regs = dev control regs */ | 
|  | 3192 | dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; | 
|  | 3193 |  | 
|  | 3194 | /* dma desc for setup data */ | 
|  | 3195 | dev->stp_requests = dma_pool_create("setup requests", NULL, | 
|  | 3196 | sizeof(struct udc_stp_dma), 0, 0); | 
|  | 3197 | if (!dev->stp_requests) { | 
|  | 3198 | DBG(dev, "can't get stp request pool\n"); | 
|  | 3199 | retval = -ENOMEM; | 
|  | 3200 | goto finished; | 
|  | 3201 | } | 
|  | 3202 | /* setup */ | 
|  | 3203 | td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, | 
|  | 3204 | &dev->ep[UDC_EP0OUT_IX].td_stp_dma); | 
|  | 3205 | if (td_stp == NULL) { | 
|  | 3206 | retval = -ENOMEM; | 
|  | 3207 | goto finished; | 
|  | 3208 | } | 
|  | 3209 | dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; | 
|  | 3210 |  | 
|  | 3211 | /* data: 0 packets !? */ | 
|  | 3212 | td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, | 
|  | 3213 | &dev->ep[UDC_EP0OUT_IX].td_phys); | 
|  | 3214 | if (td_data == NULL) { | 
|  | 3215 | retval = -ENOMEM; | 
|  | 3216 | goto finished; | 
|  | 3217 | } | 
|  | 3218 | dev->ep[UDC_EP0OUT_IX].td = td_data; | 
|  | 3219 | return 0; | 
|  | 3220 |  | 
|  | 3221 | finished: | 
|  | 3222 | return retval; | 
|  | 3223 | } | 
|  | 3224 |  | 
|  | 3225 | /* Called by pci bus driver to init pci context */ | 
|  | 3226 | static int udc_pci_probe( | 
|  | 3227 | struct pci_dev *pdev, | 
|  | 3228 | const struct pci_device_id *id | 
|  | 3229 | ) | 
|  | 3230 | { | 
|  | 3231 | struct udc		*dev; | 
|  | 3232 | unsigned long		resource; | 
|  | 3233 | unsigned long		len; | 
|  | 3234 | int			retval = 0; | 
|  | 3235 |  | 
|  | 3236 | /* one udc only */ | 
|  | 3237 | if (udc) { | 
|  | 3238 | dev_dbg(&pdev->dev, "already probed\n"); | 
|  | 3239 | return -EBUSY; | 
|  | 3240 | } | 
|  | 3241 |  | 
|  | 3242 | /* init */ | 
|  | 3243 | dev = kzalloc(sizeof(struct udc), GFP_KERNEL); | 
|  | 3244 | if (!dev) { | 
|  | 3245 | retval = -ENOMEM; | 
|  | 3246 | goto finished; | 
|  | 3247 | } | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3248 |  | 
|  | 3249 | /* pci setup */ | 
|  | 3250 | if (pci_enable_device(pdev) < 0) { | 
| Jesper Juhl | 73d79aa | 2008-03-28 14:50:27 -0700 | [diff] [blame] | 3251 | kfree(dev); | 
| Harvey Harrison | af3d305 | 2008-04-30 15:03:41 -0700 | [diff] [blame] | 3252 | dev = NULL; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3253 | retval = -ENODEV; | 
|  | 3254 | goto finished; | 
|  | 3255 | } | 
|  | 3256 | dev->active = 1; | 
|  | 3257 |  | 
|  | 3258 | /* PCI resource allocation */ | 
|  | 3259 | resource = pci_resource_start(pdev, 0); | 
|  | 3260 | len = pci_resource_len(pdev, 0); | 
|  | 3261 |  | 
|  | 3262 | if (!request_mem_region(resource, len, name)) { | 
|  | 3263 | dev_dbg(&pdev->dev, "pci device used already\n"); | 
| Jesper Juhl | 73d79aa | 2008-03-28 14:50:27 -0700 | [diff] [blame] | 3264 | kfree(dev); | 
| Harvey Harrison | af3d305 | 2008-04-30 15:03:41 -0700 | [diff] [blame] | 3265 | dev = NULL; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3266 | retval = -EBUSY; | 
|  | 3267 | goto finished; | 
|  | 3268 | } | 
|  | 3269 | dev->mem_region = 1; | 
|  | 3270 |  | 
|  | 3271 | dev->virt_addr = ioremap_nocache(resource, len); | 
|  | 3272 | if (dev->virt_addr == NULL) { | 
|  | 3273 | dev_dbg(&pdev->dev, "start address cannot be mapped\n"); | 
| Jesper Juhl | 73d79aa | 2008-03-28 14:50:27 -0700 | [diff] [blame] | 3274 | kfree(dev); | 
| Harvey Harrison | af3d305 | 2008-04-30 15:03:41 -0700 | [diff] [blame] | 3275 | dev = NULL; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3276 | retval = -EFAULT; | 
|  | 3277 | goto finished; | 
|  | 3278 | } | 
|  | 3279 |  | 
|  | 3280 | if (!pdev->irq) { | 
|  | 3281 | dev_err(&dev->pdev->dev, "irq not set\n"); | 
| Jesper Juhl | 73d79aa | 2008-03-28 14:50:27 -0700 | [diff] [blame] | 3282 | kfree(dev); | 
| Harvey Harrison | af3d305 | 2008-04-30 15:03:41 -0700 | [diff] [blame] | 3283 | dev = NULL; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3284 | retval = -ENODEV; | 
|  | 3285 | goto finished; | 
|  | 3286 | } | 
|  | 3287 |  | 
|  | 3288 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { | 
|  | 3289 | dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); | 
| Jesper Juhl | 73d79aa | 2008-03-28 14:50:27 -0700 | [diff] [blame] | 3290 | kfree(dev); | 
| Harvey Harrison | af3d305 | 2008-04-30 15:03:41 -0700 | [diff] [blame] | 3291 | dev = NULL; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3292 | retval = -EBUSY; | 
|  | 3293 | goto finished; | 
|  | 3294 | } | 
|  | 3295 | dev->irq_registered = 1; | 
|  | 3296 |  | 
|  | 3297 | pci_set_drvdata(pdev, dev); | 
|  | 3298 |  | 
| Auke Kok | 1d3ee41 | 2007-08-27 16:16:13 -0700 | [diff] [blame] | 3299 | /* chip revision for Hs AMD5536 */ | 
|  | 3300 | dev->chiprev = pdev->revision; | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3301 |  | 
|  | 3302 | pci_set_master(pdev); | 
| David Brownell | 5174528 | 2007-10-24 18:44:08 -0700 | [diff] [blame] | 3303 | pci_try_set_mwi(pdev); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3304 |  | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3305 | /* init dma pools */ | 
|  | 3306 | if (use_dma) { | 
|  | 3307 | retval = init_dma_pools(dev); | 
|  | 3308 | if (retval != 0) | 
|  | 3309 | goto finished; | 
|  | 3310 | } | 
|  | 3311 |  | 
|  | 3312 | dev->phys_addr = resource; | 
|  | 3313 | dev->irq = pdev->irq; | 
|  | 3314 | dev->pdev = pdev; | 
|  | 3315 | dev->gadget.dev.parent = &pdev->dev; | 
|  | 3316 | dev->gadget.dev.dma_mask = pdev->dev.dma_mask; | 
|  | 3317 |  | 
|  | 3318 | /* general probing */ | 
|  | 3319 | if (udc_probe(dev) == 0) | 
|  | 3320 | return 0; | 
|  | 3321 |  | 
|  | 3322 | finished: | 
|  | 3323 | if (dev) | 
|  | 3324 | udc_pci_remove(pdev); | 
|  | 3325 | return retval; | 
|  | 3326 | } | 
|  | 3327 |  | 
|  | 3328 | /* general probe */ | 
|  | 3329 | static int udc_probe(struct udc *dev) | 
|  | 3330 | { | 
|  | 3331 | char		tmp[128]; | 
|  | 3332 | u32		reg; | 
|  | 3333 | int		retval; | 
|  | 3334 |  | 
|  | 3335 | /* mark timer as not initialized */ | 
|  | 3336 | udc_timer.data = 0; | 
|  | 3337 | udc_pollstall_timer.data = 0; | 
|  | 3338 |  | 
|  | 3339 | /* device struct setup */ | 
|  | 3340 | spin_lock_init(&dev->lock); | 
|  | 3341 | dev->gadget.ops = &udc_ops; | 
|  | 3342 |  | 
| Kay Sievers | 0031a06 | 2008-05-02 06:02:41 +0200 | [diff] [blame] | 3343 | dev_set_name(&dev->gadget.dev, "gadget"); | 
| Thomas Dahlmann | 55d402d | 2007-07-16 21:40:54 -0700 | [diff] [blame] | 3344 | dev->gadget.dev.release = gadget_release; | 
|  | 3345 | dev->gadget.name = name; | 
|  | 3346 | dev->gadget.name = name; | 
|  | 3347 | dev->gadget.is_dualspeed = 1; | 
|  | 3348 |  | 
|  | 3349 | /* udc csr registers base */ | 
|  | 3350 | dev->csr = dev->virt_addr + UDC_CSR_ADDR; | 
|  | 3351 | /* dev registers base */ | 
|  | 3352 | dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; | 
|  | 3353 | /* ep registers base */ | 
|  | 3354 | dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; | 
|  | 3355 | /* fifo's base */ | 
|  | 3356 | dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); | 
|  | 3357 | dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); | 
|  | 3358 |  | 
|  | 3359 | /* init registers, interrupts, ... */ | 
|  | 3360 | startup_registers(dev); | 
|  | 3361 |  | 
|  | 3362 | dev_info(&dev->pdev->dev, "%s\n", mod_desc); | 
|  | 3363 |  | 
|  | 3364 | snprintf(tmp, sizeof tmp, "%d", dev->irq); | 
|  | 3365 | dev_info(&dev->pdev->dev, | 
|  | 3366 | "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n", | 
|  | 3367 | tmp, dev->phys_addr, dev->chiprev, | 
|  | 3368 | (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1"); | 
|  | 3369 | strcpy(tmp, UDC_DRIVER_VERSION_STRING); | 
|  | 3370 | if (dev->chiprev == UDC_HSA0_REV) { | 
|  | 3371 | dev_err(&dev->pdev->dev, "chip revision is A0; too old\n"); | 
|  | 3372 | retval = -ENODEV; | 
|  | 3373 | goto finished; | 
|  | 3374 | } | 
|  | 3375 | dev_info(&dev->pdev->dev, | 
|  | 3376 | "driver version: %s(for Geode5536 B1)\n", tmp); | 
|  | 3377 | udc = dev; | 
|  | 3378 |  | 
|  | 3379 | retval = device_register(&dev->gadget.dev); | 
|  | 3380 | if (retval) | 
|  | 3381 | goto finished; | 
|  | 3382 |  | 
|  | 3383 | /* timer init */ | 
|  | 3384 | init_timer(&udc_timer); | 
|  | 3385 | udc_timer.function = udc_timer_function; | 
|  | 3386 | udc_timer.data = 1; | 
|  | 3387 | /* timer pollstall init */ | 
|  | 3388 | init_timer(&udc_pollstall_timer); | 
|  | 3389 | udc_pollstall_timer.function = udc_pollstall_timer_function; | 
|  | 3390 | udc_pollstall_timer.data = 1; | 
|  | 3391 |  | 
|  | 3392 | /* set SD */ | 
|  | 3393 | reg = readl(&dev->regs->ctl); | 
|  | 3394 | reg |= AMD_BIT(UDC_DEVCTL_SD); | 
|  | 3395 | writel(reg, &dev->regs->ctl); | 
|  | 3396 |  | 
|  | 3397 | /* print dev register info */ | 
|  | 3398 | print_regs(dev); | 
|  | 3399 |  | 
|  | 3400 | return 0; | 
|  | 3401 |  | 
|  | 3402 | finished: | 
|  | 3403 | return retval; | 
|  | 3404 | } | 
|  | 3405 |  | 
|  | 3406 | /* Initiates a remote wakeup */ | 
|  | 3407 | static int udc_remote_wakeup(struct udc *dev) | 
|  | 3408 | { | 
|  | 3409 | unsigned long flags; | 
|  | 3410 | u32 tmp; | 
|  | 3411 |  | 
|  | 3412 | DBG(dev, "UDC initiates remote wakeup\n"); | 
|  | 3413 |  | 
|  | 3414 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 3415 |  | 
|  | 3416 | tmp = readl(&dev->regs->ctl); | 
|  | 3417 | tmp |= AMD_BIT(UDC_DEVCTL_RES); | 
|  | 3418 | writel(tmp, &dev->regs->ctl); | 
|  | 3419 | tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES); | 
|  | 3420 | writel(tmp, &dev->regs->ctl); | 
|  | 3421 |  | 
|  | 3422 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 3423 | return 0; | 
|  | 3424 | } | 
|  | 3425 |  | 
|  | 3426 | /* PCI device parameters */ | 
|  | 3427 | static const struct pci_device_id pci_id[] = { | 
|  | 3428 | { | 
|  | 3429 | PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096), | 
|  | 3430 | .class =	(PCI_CLASS_SERIAL_USB << 8) | 0xfe, | 
|  | 3431 | .class_mask =	0xffffffff, | 
|  | 3432 | }, | 
|  | 3433 | {}, | 
|  | 3434 | }; | 
|  | 3435 | MODULE_DEVICE_TABLE(pci, pci_id); | 
|  | 3436 |  | 
|  | 3437 | /* PCI functions */ | 
|  | 3438 | static struct pci_driver udc_pci_driver = { | 
|  | 3439 | .name =		(char *) name, | 
|  | 3440 | .id_table =	pci_id, | 
|  | 3441 | .probe =	udc_pci_probe, | 
|  | 3442 | .remove =	udc_pci_remove, | 
|  | 3443 | }; | 
|  | 3444 |  | 
|  | 3445 | /* Inits driver */ | 
|  | 3446 | static int __init init(void) | 
|  | 3447 | { | 
|  | 3448 | return pci_register_driver(&udc_pci_driver); | 
|  | 3449 | } | 
|  | 3450 | module_init(init); | 
|  | 3451 |  | 
|  | 3452 | /* Cleans driver */ | 
|  | 3453 | static void __exit cleanup(void) | 
|  | 3454 | { | 
|  | 3455 | pci_unregister_driver(&udc_pci_driver); | 
|  | 3456 | } | 
|  | 3457 | module_exit(cleanup); | 
|  | 3458 |  | 
|  | 3459 | MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION); | 
|  | 3460 | MODULE_AUTHOR("Thomas Dahlmann"); | 
|  | 3461 | MODULE_LICENSE("GPL"); | 
|  | 3462 |  |