| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | 
|  | 3 | * AVR32 systems.) | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2007-2008 Atmel Corporation | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or modify | 
|  | 8 | * it under the terms of the GNU General Public License version 2 as | 
|  | 9 | * published by the Free Software Foundation. | 
|  | 10 | */ | 
|  | 11 | #include <linux/clk.h> | 
|  | 12 | #include <linux/delay.h> | 
|  | 13 | #include <linux/dmaengine.h> | 
|  | 14 | #include <linux/dma-mapping.h> | 
|  | 15 | #include <linux/init.h> | 
|  | 16 | #include <linux/interrupt.h> | 
|  | 17 | #include <linux/io.h> | 
|  | 18 | #include <linux/mm.h> | 
|  | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/platform_device.h> | 
|  | 21 | #include <linux/slab.h> | 
|  | 22 |  | 
|  | 23 | #include "dw_dmac_regs.h" | 
|  | 24 |  | 
|  | 25 | /* | 
|  | 26 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | 
|  | 27 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | 
|  | 28 | * of which use ARM any more).  See the "Databook" from Synopsys for | 
|  | 29 | * information beyond what licensees probably provide. | 
|  | 30 | * | 
|  | 31 | * The driver has currently been tested only with the Atmel AT32AP7000, | 
|  | 32 | * which does not support descriptor writeback. | 
|  | 33 | */ | 
|  | 34 |  | 
|  | 35 | /* NOTE:  DMS+SMS is system-specific. We should get this information | 
|  | 36 | * from the platform code somehow. | 
|  | 37 | */ | 
|  | 38 | #define DWC_DEFAULT_CTLLO	(DWC_CTLL_DST_MSIZE(0)		\ | 
|  | 39 | | DWC_CTLL_SRC_MSIZE(0)		\ | 
|  | 40 | | DWC_CTLL_DMS(0)		\ | 
|  | 41 | | DWC_CTLL_SMS(1)		\ | 
|  | 42 | | DWC_CTLL_LLP_D_EN		\ | 
|  | 43 | | DWC_CTLL_LLP_S_EN) | 
|  | 44 |  | 
|  | 45 | /* | 
|  | 46 | * This is configuration-dependent and usually a funny size like 4095. | 
|  | 47 | * Let's round it down to the nearest power of two. | 
|  | 48 | * | 
|  | 49 | * Note that this is a transfer count, i.e. if we transfer 32-bit | 
|  | 50 | * words, we can do 8192 bytes per descriptor. | 
|  | 51 | * | 
|  | 52 | * This parameter is also system-specific. | 
|  | 53 | */ | 
|  | 54 | #define DWC_MAX_COUNT	2048U | 
|  | 55 |  | 
|  | 56 | /* | 
|  | 57 | * Number of descriptors to allocate for each channel. This should be | 
|  | 58 | * made configurable somehow; preferably, the clients (at least the | 
|  | 59 | * ones using slave transfers) should be able to give us a hint. | 
|  | 60 | */ | 
|  | 61 | #define NR_DESCS_PER_CHANNEL	64 | 
|  | 62 |  | 
|  | 63 | /*----------------------------------------------------------------------*/ | 
|  | 64 |  | 
|  | 65 | /* | 
|  | 66 | * Because we're not relying on writeback from the controller (it may not | 
|  | 67 | * even be configured into the core!) we don't need to use dma_pool.  These | 
|  | 68 | * descriptors -- and associated data -- are cacheable.  We do need to make | 
|  | 69 | * sure their dcache entries are written back before handing them off to | 
|  | 70 | * the controller, though. | 
|  | 71 | */ | 
|  | 72 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 73 | static struct device *chan2dev(struct dma_chan *chan) | 
|  | 74 | { | 
|  | 75 | return &chan->dev->device; | 
|  | 76 | } | 
|  | 77 | static struct device *chan2parent(struct dma_chan *chan) | 
|  | 78 | { | 
|  | 79 | return chan->dev->device.parent; | 
|  | 80 | } | 
|  | 81 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 
|  | 83 | { | 
|  | 84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 
|  | 85 | } | 
|  | 86 |  | 
|  | 87 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | 
|  | 88 | { | 
|  | 89 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | 
|  | 90 | } | 
|  | 91 |  | 
|  | 92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 
|  | 93 | { | 
|  | 94 | struct dw_desc *desc, *_desc; | 
|  | 95 | struct dw_desc *ret = NULL; | 
|  | 96 | unsigned int i = 0; | 
|  | 97 |  | 
|  | 98 | spin_lock_bh(&dwc->lock); | 
|  | 99 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 
|  | 100 | if (async_tx_test_ack(&desc->txd)) { | 
|  | 101 | list_del(&desc->desc_node); | 
|  | 102 | ret = desc; | 
|  | 103 | break; | 
|  | 104 | } | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 106 | i++; | 
|  | 107 | } | 
|  | 108 | spin_unlock_bh(&dwc->lock); | 
|  | 109 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 111 |  | 
|  | 112 | return ret; | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | 
|  | 116 | { | 
|  | 117 | struct dw_desc	*child; | 
|  | 118 |  | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 119 | list_for_each_entry(child, &desc->tx_list, desc_node) | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 121 | child->txd.phys, sizeof(child->lli), | 
|  | 122 | DMA_TO_DEVICE); | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 123 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 124 | desc->txd.phys, sizeof(desc->lli), | 
|  | 125 | DMA_TO_DEVICE); | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | /* | 
|  | 129 | * Move a descriptor, including any children, to the free list. | 
|  | 130 | * `desc' must not be on any lists. | 
|  | 131 | */ | 
|  | 132 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 
|  | 133 | { | 
|  | 134 | if (desc) { | 
|  | 135 | struct dw_desc *child; | 
|  | 136 |  | 
|  | 137 | dwc_sync_desc_for_cpu(dwc, desc); | 
|  | 138 |  | 
|  | 139 | spin_lock_bh(&dwc->lock); | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 140 | list_for_each_entry(child, &desc->tx_list, desc_node) | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 141 | dev_vdbg(chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 142 | "moving child desc %p to freelist\n", | 
|  | 143 | child); | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 144 | list_splice_init(&desc->tx_list, &dwc->free_list); | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 146 | list_add(&desc->desc_node, &dwc->free_list); | 
|  | 147 | spin_unlock_bh(&dwc->lock); | 
|  | 148 | } | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | /* Called with dwc->lock held and bh disabled */ | 
|  | 152 | static dma_cookie_t | 
|  | 153 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | 
|  | 154 | { | 
|  | 155 | dma_cookie_t cookie = dwc->chan.cookie; | 
|  | 156 |  | 
|  | 157 | if (++cookie < 0) | 
|  | 158 | cookie = 1; | 
|  | 159 |  | 
|  | 160 | dwc->chan.cookie = cookie; | 
|  | 161 | desc->txd.cookie = cookie; | 
|  | 162 |  | 
|  | 163 | return cookie; | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | /*----------------------------------------------------------------------*/ | 
|  | 167 |  | 
|  | 168 | /* Called with dwc->lock held and bh disabled */ | 
|  | 169 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 
|  | 170 | { | 
|  | 171 | struct dw_dma	*dw = to_dw_dma(dwc->chan.device); | 
|  | 172 |  | 
|  | 173 | /* ASSERT:  channel is idle */ | 
|  | 174 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 175 | dev_err(chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 176 | "BUG: Attempted to start non-idle channel\n"); | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 177 | dev_err(chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 178 | "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 
|  | 179 | channel_readl(dwc, SAR), | 
|  | 180 | channel_readl(dwc, DAR), | 
|  | 181 | channel_readl(dwc, LLP), | 
|  | 182 | channel_readl(dwc, CTL_HI), | 
|  | 183 | channel_readl(dwc, CTL_LO)); | 
|  | 184 |  | 
|  | 185 | /* The tasklet will hopefully advance the queue... */ | 
|  | 186 | return; | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | channel_writel(dwc, LLP, first->txd.phys); | 
|  | 190 | channel_writel(dwc, CTL_LO, | 
|  | 191 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 
|  | 192 | channel_writel(dwc, CTL_HI, 0); | 
|  | 193 | channel_set_bit(dw, CH_EN, dwc->mask); | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | /*----------------------------------------------------------------------*/ | 
|  | 197 |  | 
|  | 198 | static void | 
|  | 199 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | 
|  | 200 | { | 
|  | 201 | dma_async_tx_callback		callback; | 
|  | 202 | void				*param; | 
|  | 203 | struct dma_async_tx_descriptor	*txd = &desc->txd; | 
|  | 204 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 206 |  | 
|  | 207 | dwc->completed = txd->cookie; | 
|  | 208 | callback = txd->callback; | 
|  | 209 | param = txd->callback_param; | 
|  | 210 |  | 
|  | 211 | dwc_sync_desc_for_cpu(dwc, desc); | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 212 | list_splice_init(&desc->tx_list, &dwc->free_list); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 213 | list_move(&desc->desc_node, &dwc->free_list); | 
|  | 214 |  | 
| Atsushi Nemoto | 657a77f | 2009-09-08 17:53:05 -0700 | [diff] [blame] | 215 | if (!dwc->chan.private) { | 
|  | 216 | struct device *parent = chan2parent(&dwc->chan); | 
|  | 217 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 
|  | 218 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 
|  | 219 | dma_unmap_single(parent, desc->lli.dar, | 
|  | 220 | desc->len, DMA_FROM_DEVICE); | 
|  | 221 | else | 
|  | 222 | dma_unmap_page(parent, desc->lli.dar, | 
|  | 223 | desc->len, DMA_FROM_DEVICE); | 
|  | 224 | } | 
|  | 225 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 
|  | 226 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 
|  | 227 | dma_unmap_single(parent, desc->lli.sar, | 
|  | 228 | desc->len, DMA_TO_DEVICE); | 
|  | 229 | else | 
|  | 230 | dma_unmap_page(parent, desc->lli.sar, | 
|  | 231 | desc->len, DMA_TO_DEVICE); | 
|  | 232 | } | 
|  | 233 | } | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 234 |  | 
|  | 235 | /* | 
|  | 236 | * The API requires that no submissions are done from a | 
|  | 237 | * callback, so we don't need to drop the lock here | 
|  | 238 | */ | 
|  | 239 | if (callback) | 
|  | 240 | callback(param); | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|  | 244 | { | 
|  | 245 | struct dw_desc *desc, *_desc; | 
|  | 246 | LIST_HEAD(list); | 
|  | 247 |  | 
|  | 248 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 249 | dev_err(chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 250 | "BUG: XFER bit set, but channel not idle!\n"); | 
|  | 251 |  | 
|  | 252 | /* Try to continue after resetting the channel... */ | 
|  | 253 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 254 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|  | 255 | cpu_relax(); | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | /* | 
|  | 259 | * Submit queued descriptors ASAP, i.e. before we go through | 
|  | 260 | * the completed ones. | 
|  | 261 | */ | 
|  | 262 | if (!list_empty(&dwc->queue)) | 
|  | 263 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 
|  | 264 | list_splice_init(&dwc->active_list, &list); | 
|  | 265 | list_splice_init(&dwc->queue, &dwc->active_list); | 
|  | 266 |  | 
|  | 267 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 
|  | 268 | dwc_descriptor_complete(dwc, desc); | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|  | 272 | { | 
|  | 273 | dma_addr_t llp; | 
|  | 274 | struct dw_desc *desc, *_desc; | 
|  | 275 | struct dw_desc *child; | 
|  | 276 | u32 status_xfer; | 
|  | 277 |  | 
|  | 278 | /* | 
|  | 279 | * Clear block interrupt flag before scanning so that we don't | 
|  | 280 | * miss any, and read LLP before RAW_XFER to ensure it is | 
|  | 281 | * valid if we decide to scan the list. | 
|  | 282 | */ | 
|  | 283 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 
|  | 284 | llp = channel_readl(dwc, LLP); | 
|  | 285 | status_xfer = dma_readl(dw, RAW.XFER); | 
|  | 286 |  | 
|  | 287 | if (status_xfer & dwc->mask) { | 
|  | 288 | /* Everything we've submitted is done */ | 
|  | 289 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 
|  | 290 | dwc_complete_all(dw, dwc); | 
|  | 291 | return; | 
|  | 292 | } | 
|  | 293 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 294 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 295 |  | 
|  | 296 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 
|  | 297 | if (desc->lli.llp == llp) | 
|  | 298 | /* This one is currently in progress */ | 
|  | 299 | return; | 
|  | 300 |  | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 301 | list_for_each_entry(child, &desc->tx_list, desc_node) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 302 | if (child->lli.llp == llp) | 
|  | 303 | /* Currently in progress */ | 
|  | 304 | return; | 
|  | 305 |  | 
|  | 306 | /* | 
|  | 307 | * No descriptors so far seem to be in progress, i.e. | 
|  | 308 | * this one must be done. | 
|  | 309 | */ | 
|  | 310 | dwc_descriptor_complete(dwc, desc); | 
|  | 311 | } | 
|  | 312 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 313 | dev_err(chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 314 | "BUG: All descriptors done, but channel not idle!\n"); | 
|  | 315 |  | 
|  | 316 | /* Try to continue after resetting the channel... */ | 
|  | 317 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 318 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|  | 319 | cpu_relax(); | 
|  | 320 |  | 
|  | 321 | if (!list_empty(&dwc->queue)) { | 
|  | 322 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 
|  | 323 | list_splice_init(&dwc->queue, &dwc->active_list); | 
|  | 324 | } | 
|  | 325 | } | 
|  | 326 |  | 
|  | 327 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 
|  | 328 | { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 329 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 330 | "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 
|  | 331 | lli->sar, lli->dar, lli->llp, | 
|  | 332 | lli->ctlhi, lli->ctllo); | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|  | 336 | { | 
|  | 337 | struct dw_desc *bad_desc; | 
|  | 338 | struct dw_desc *child; | 
|  | 339 |  | 
|  | 340 | dwc_scan_descriptors(dw, dwc); | 
|  | 341 |  | 
|  | 342 | /* | 
|  | 343 | * The descriptor currently at the head of the active list is | 
|  | 344 | * borked. Since we don't have any way to report errors, we'll | 
|  | 345 | * just have to scream loudly and try to carry on. | 
|  | 346 | */ | 
|  | 347 | bad_desc = dwc_first_active(dwc); | 
|  | 348 | list_del_init(&bad_desc->desc_node); | 
|  | 349 | list_splice_init(&dwc->queue, dwc->active_list.prev); | 
|  | 350 |  | 
|  | 351 | /* Clear the error flag and try to restart the controller */ | 
|  | 352 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 
|  | 353 | if (!list_empty(&dwc->active_list)) | 
|  | 354 | dwc_dostart(dwc, dwc_first_active(dwc)); | 
|  | 355 |  | 
|  | 356 | /* | 
|  | 357 | * KERN_CRITICAL may seem harsh, but since this only happens | 
|  | 358 | * when someone submits a bad physical address in a | 
|  | 359 | * descriptor, we should consider ourselves lucky that the | 
|  | 360 | * controller flagged an error instead of scribbling over | 
|  | 361 | * random memory locations. | 
|  | 362 | */ | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 363 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 364 | "Bad descriptor submitted for DMA!\n"); | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 365 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 366 | "  cookie: %d\n", bad_desc->txd.cookie); | 
|  | 367 | dwc_dump_lli(dwc, &bad_desc->lli); | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 368 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 369 | dwc_dump_lli(dwc, &child->lli); | 
|  | 370 |  | 
|  | 371 | /* Pretend the descriptor completed successfully */ | 
|  | 372 | dwc_descriptor_complete(dwc, bad_desc); | 
|  | 373 | } | 
|  | 374 |  | 
| Hans-Christian Egtvedt | d9de451 | 2009-04-01 15:47:02 +0200 | [diff] [blame] | 375 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 
|  | 376 |  | 
|  | 377 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | 
|  | 378 | { | 
|  | 379 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|  | 380 | return channel_readl(dwc, SAR); | 
|  | 381 | } | 
|  | 382 | EXPORT_SYMBOL(dw_dma_get_src_addr); | 
|  | 383 |  | 
|  | 384 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | 
|  | 385 | { | 
|  | 386 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|  | 387 | return channel_readl(dwc, DAR); | 
|  | 388 | } | 
|  | 389 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | 
|  | 390 |  | 
|  | 391 | /* called with dwc->lock held and all DMAC interrupts disabled */ | 
|  | 392 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 
|  | 393 | u32 status_block, u32 status_err, u32 status_xfer) | 
|  | 394 | { | 
|  | 395 | if (status_block & dwc->mask) { | 
|  | 396 | void (*callback)(void *param); | 
|  | 397 | void *callback_param; | 
|  | 398 |  | 
|  | 399 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | 
|  | 400 | channel_readl(dwc, LLP)); | 
|  | 401 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 
|  | 402 |  | 
|  | 403 | callback = dwc->cdesc->period_callback; | 
|  | 404 | callback_param = dwc->cdesc->period_callback_param; | 
|  | 405 | if (callback) { | 
|  | 406 | spin_unlock(&dwc->lock); | 
|  | 407 | callback(callback_param); | 
|  | 408 | spin_lock(&dwc->lock); | 
|  | 409 | } | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | /* | 
|  | 413 | * Error and transfer complete are highly unlikely, and will most | 
|  | 414 | * likely be due to a configuration error by the user. | 
|  | 415 | */ | 
|  | 416 | if (unlikely(status_err & dwc->mask) || | 
|  | 417 | unlikely(status_xfer & dwc->mask)) { | 
|  | 418 | int i; | 
|  | 419 |  | 
|  | 420 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | 
|  | 421 | "interrupt, stopping DMA transfer\n", | 
|  | 422 | status_xfer ? "xfer" : "error"); | 
|  | 423 | dev_err(chan2dev(&dwc->chan), | 
|  | 424 | "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 
|  | 425 | channel_readl(dwc, SAR), | 
|  | 426 | channel_readl(dwc, DAR), | 
|  | 427 | channel_readl(dwc, LLP), | 
|  | 428 | channel_readl(dwc, CTL_HI), | 
|  | 429 | channel_readl(dwc, CTL_LO)); | 
|  | 430 |  | 
|  | 431 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 432 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|  | 433 | cpu_relax(); | 
|  | 434 |  | 
|  | 435 | /* make sure DMA does not restart by loading a new list */ | 
|  | 436 | channel_writel(dwc, LLP, 0); | 
|  | 437 | channel_writel(dwc, CTL_LO, 0); | 
|  | 438 | channel_writel(dwc, CTL_HI, 0); | 
|  | 439 |  | 
|  | 440 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 
|  | 441 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 
|  | 442 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 
|  | 443 |  | 
|  | 444 | for (i = 0; i < dwc->cdesc->periods; i++) | 
|  | 445 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | 
|  | 446 | } | 
|  | 447 | } | 
|  | 448 |  | 
|  | 449 | /* ------------------------------------------------------------------------- */ | 
|  | 450 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 451 | static void dw_dma_tasklet(unsigned long data) | 
|  | 452 | { | 
|  | 453 | struct dw_dma *dw = (struct dw_dma *)data; | 
|  | 454 | struct dw_dma_chan *dwc; | 
|  | 455 | u32 status_block; | 
|  | 456 | u32 status_xfer; | 
|  | 457 | u32 status_err; | 
|  | 458 | int i; | 
|  | 459 |  | 
|  | 460 | status_block = dma_readl(dw, RAW.BLOCK); | 
| Haavard Skinnemoen | 7fe7b2f | 2008-10-03 15:23:46 -0700 | [diff] [blame] | 461 | status_xfer = dma_readl(dw, RAW.XFER); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 462 | status_err = dma_readl(dw, RAW.ERROR); | 
|  | 463 |  | 
|  | 464 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | 
|  | 465 | status_block, status_err); | 
|  | 466 |  | 
|  | 467 | for (i = 0; i < dw->dma.chancnt; i++) { | 
|  | 468 | dwc = &dw->chan[i]; | 
|  | 469 | spin_lock(&dwc->lock); | 
| Hans-Christian Egtvedt | d9de451 | 2009-04-01 15:47:02 +0200 | [diff] [blame] | 470 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 
|  | 471 | dwc_handle_cyclic(dw, dwc, status_block, status_err, | 
|  | 472 | status_xfer); | 
|  | 473 | else if (status_err & (1 << i)) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 474 | dwc_handle_error(dw, dwc); | 
|  | 475 | else if ((status_block | status_xfer) & (1 << i)) | 
|  | 476 | dwc_scan_descriptors(dw, dwc); | 
|  | 477 | spin_unlock(&dwc->lock); | 
|  | 478 | } | 
|  | 479 |  | 
|  | 480 | /* | 
|  | 481 | * Re-enable interrupts. Block Complete interrupts are only | 
|  | 482 | * enabled if the INT_EN bit in the descriptor is set. This | 
|  | 483 | * will trigger a scan before the whole list is done. | 
|  | 484 | */ | 
|  | 485 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|  | 486 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 
|  | 487 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|  | 488 | } | 
|  | 489 |  | 
|  | 490 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | 
|  | 491 | { | 
|  | 492 | struct dw_dma *dw = dev_id; | 
|  | 493 | u32 status; | 
|  | 494 |  | 
|  | 495 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | 
|  | 496 | dma_readl(dw, STATUS_INT)); | 
|  | 497 |  | 
|  | 498 | /* | 
|  | 499 | * Just disable the interrupts. We'll turn them back on in the | 
|  | 500 | * softirq handler. | 
|  | 501 | */ | 
|  | 502 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|  | 503 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 
|  | 504 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|  | 505 |  | 
|  | 506 | status = dma_readl(dw, STATUS_INT); | 
|  | 507 | if (status) { | 
|  | 508 | dev_err(dw->dma.dev, | 
|  | 509 | "BUG: Unexpected interrupts pending: 0x%x\n", | 
|  | 510 | status); | 
|  | 511 |  | 
|  | 512 | /* Try to recover */ | 
|  | 513 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | 
|  | 514 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | 
|  | 515 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | 
|  | 516 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | 
|  | 517 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | 
|  | 518 | } | 
|  | 519 |  | 
|  | 520 | tasklet_schedule(&dw->tasklet); | 
|  | 521 |  | 
|  | 522 | return IRQ_HANDLED; | 
|  | 523 | } | 
|  | 524 |  | 
|  | 525 | /*----------------------------------------------------------------------*/ | 
|  | 526 |  | 
|  | 527 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | 
|  | 528 | { | 
|  | 529 | struct dw_desc		*desc = txd_to_dw_desc(tx); | 
|  | 530 | struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan); | 
|  | 531 | dma_cookie_t		cookie; | 
|  | 532 |  | 
|  | 533 | spin_lock_bh(&dwc->lock); | 
|  | 534 | cookie = dwc_assign_cookie(dwc, desc); | 
|  | 535 |  | 
|  | 536 | /* | 
|  | 537 | * REVISIT: We should attempt to chain as many descriptors as | 
|  | 538 | * possible, perhaps even appending to those already submitted | 
|  | 539 | * for DMA. But this is hard to do in a race-free manner. | 
|  | 540 | */ | 
|  | 541 | if (list_empty(&dwc->active_list)) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 542 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 543 | desc->txd.cookie); | 
|  | 544 | dwc_dostart(dwc, desc); | 
|  | 545 | list_add_tail(&desc->desc_node, &dwc->active_list); | 
|  | 546 | } else { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 547 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 548 | desc->txd.cookie); | 
|  | 549 |  | 
|  | 550 | list_add_tail(&desc->desc_node, &dwc->queue); | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | spin_unlock_bh(&dwc->lock); | 
|  | 554 |  | 
|  | 555 | return cookie; | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | static struct dma_async_tx_descriptor * | 
|  | 559 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 
|  | 560 | size_t len, unsigned long flags) | 
|  | 561 | { | 
|  | 562 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 563 | struct dw_desc		*desc; | 
|  | 564 | struct dw_desc		*first; | 
|  | 565 | struct dw_desc		*prev; | 
|  | 566 | size_t			xfer_count; | 
|  | 567 | size_t			offset; | 
|  | 568 | unsigned int		src_width; | 
|  | 569 | unsigned int		dst_width; | 
|  | 570 | u32			ctllo; | 
|  | 571 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 572 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 573 | dest, src, len, flags); | 
|  | 574 |  | 
|  | 575 | if (unlikely(!len)) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 576 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 577 | return NULL; | 
|  | 578 | } | 
|  | 579 |  | 
|  | 580 | /* | 
|  | 581 | * We can be a lot more clever here, but this should take care | 
|  | 582 | * of the most common optimization. | 
|  | 583 | */ | 
|  | 584 | if (!((src | dest  | len) & 3)) | 
|  | 585 | src_width = dst_width = 2; | 
|  | 586 | else if (!((src | dest | len) & 1)) | 
|  | 587 | src_width = dst_width = 1; | 
|  | 588 | else | 
|  | 589 | src_width = dst_width = 0; | 
|  | 590 |  | 
|  | 591 | ctllo = DWC_DEFAULT_CTLLO | 
|  | 592 | | DWC_CTLL_DST_WIDTH(dst_width) | 
|  | 593 | | DWC_CTLL_SRC_WIDTH(src_width) | 
|  | 594 | | DWC_CTLL_DST_INC | 
|  | 595 | | DWC_CTLL_SRC_INC | 
|  | 596 | | DWC_CTLL_FC_M2M; | 
|  | 597 | prev = first = NULL; | 
|  | 598 |  | 
|  | 599 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 
|  | 600 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 
|  | 601 | DWC_MAX_COUNT); | 
|  | 602 |  | 
|  | 603 | desc = dwc_desc_get(dwc); | 
|  | 604 | if (!desc) | 
|  | 605 | goto err_desc_get; | 
|  | 606 |  | 
|  | 607 | desc->lli.sar = src + offset; | 
|  | 608 | desc->lli.dar = dest + offset; | 
|  | 609 | desc->lli.ctllo = ctllo; | 
|  | 610 | desc->lli.ctlhi = xfer_count; | 
|  | 611 |  | 
|  | 612 | if (!first) { | 
|  | 613 | first = desc; | 
|  | 614 | } else { | 
|  | 615 | prev->lli.llp = desc->txd.phys; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 616 | dma_sync_single_for_device(chan2parent(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 617 | prev->txd.phys, sizeof(prev->lli), | 
|  | 618 | DMA_TO_DEVICE); | 
|  | 619 | list_add_tail(&desc->desc_node, | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 620 | &first->tx_list); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 621 | } | 
|  | 622 | prev = desc; | 
|  | 623 | } | 
|  | 624 |  | 
|  | 625 |  | 
|  | 626 | if (flags & DMA_PREP_INTERRUPT) | 
|  | 627 | /* Trigger interrupt after last block */ | 
|  | 628 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 
|  | 629 |  | 
|  | 630 | prev->lli.llp = 0; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 631 | dma_sync_single_for_device(chan2parent(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 632 | prev->txd.phys, sizeof(prev->lli), | 
|  | 633 | DMA_TO_DEVICE); | 
|  | 634 |  | 
|  | 635 | first->txd.flags = flags; | 
|  | 636 | first->len = len; | 
|  | 637 |  | 
|  | 638 | return &first->txd; | 
|  | 639 |  | 
|  | 640 | err_desc_get: | 
|  | 641 | dwc_desc_put(dwc, first); | 
|  | 642 | return NULL; | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | static struct dma_async_tx_descriptor * | 
|  | 646 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 
|  | 647 | unsigned int sg_len, enum dma_data_direction direction, | 
|  | 648 | unsigned long flags) | 
|  | 649 | { | 
|  | 650 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
| Dan Williams | 287d859 | 2009-02-18 14:48:26 -0800 | [diff] [blame] | 651 | struct dw_dma_slave	*dws = chan->private; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 652 | struct dw_desc		*prev; | 
|  | 653 | struct dw_desc		*first; | 
|  | 654 | u32			ctllo; | 
|  | 655 | dma_addr_t		reg; | 
|  | 656 | unsigned int		reg_width; | 
|  | 657 | unsigned int		mem_width; | 
|  | 658 | unsigned int		i; | 
|  | 659 | struct scatterlist	*sg; | 
|  | 660 | size_t			total_len = 0; | 
|  | 661 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 662 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 663 |  | 
|  | 664 | if (unlikely(!dws || !sg_len)) | 
|  | 665 | return NULL; | 
|  | 666 |  | 
| Dan Williams | 74465b4 | 2009-01-06 11:38:16 -0700 | [diff] [blame] | 667 | reg_width = dws->reg_width; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 668 | prev = first = NULL; | 
|  | 669 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 670 | switch (direction) { | 
|  | 671 | case DMA_TO_DEVICE: | 
|  | 672 | ctllo = (DWC_DEFAULT_CTLLO | 
|  | 673 | | DWC_CTLL_DST_WIDTH(reg_width) | 
|  | 674 | | DWC_CTLL_DST_FIX | 
|  | 675 | | DWC_CTLL_SRC_INC | 
|  | 676 | | DWC_CTLL_FC_M2P); | 
| Dan Williams | 74465b4 | 2009-01-06 11:38:16 -0700 | [diff] [blame] | 677 | reg = dws->tx_reg; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 678 | for_each_sg(sgl, sg, sg_len, i) { | 
|  | 679 | struct dw_desc	*desc; | 
|  | 680 | u32		len; | 
|  | 681 | u32		mem; | 
|  | 682 |  | 
|  | 683 | desc = dwc_desc_get(dwc); | 
|  | 684 | if (!desc) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 685 | dev_err(chan2dev(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 686 | "not enough descriptors available\n"); | 
|  | 687 | goto err_desc_get; | 
|  | 688 | } | 
|  | 689 |  | 
|  | 690 | mem = sg_phys(sg); | 
|  | 691 | len = sg_dma_len(sg); | 
|  | 692 | mem_width = 2; | 
|  | 693 | if (unlikely(mem & 3 || len & 3)) | 
|  | 694 | mem_width = 0; | 
|  | 695 |  | 
|  | 696 | desc->lli.sar = mem; | 
|  | 697 | desc->lli.dar = reg; | 
|  | 698 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 
|  | 699 | desc->lli.ctlhi = len >> mem_width; | 
|  | 700 |  | 
|  | 701 | if (!first) { | 
|  | 702 | first = desc; | 
|  | 703 | } else { | 
|  | 704 | prev->lli.llp = desc->txd.phys; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 705 | dma_sync_single_for_device(chan2parent(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 706 | prev->txd.phys, | 
|  | 707 | sizeof(prev->lli), | 
|  | 708 | DMA_TO_DEVICE); | 
|  | 709 | list_add_tail(&desc->desc_node, | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 710 | &first->tx_list); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 711 | } | 
|  | 712 | prev = desc; | 
|  | 713 | total_len += len; | 
|  | 714 | } | 
|  | 715 | break; | 
|  | 716 | case DMA_FROM_DEVICE: | 
|  | 717 | ctllo = (DWC_DEFAULT_CTLLO | 
|  | 718 | | DWC_CTLL_SRC_WIDTH(reg_width) | 
|  | 719 | | DWC_CTLL_DST_INC | 
|  | 720 | | DWC_CTLL_SRC_FIX | 
|  | 721 | | DWC_CTLL_FC_P2M); | 
|  | 722 |  | 
| Dan Williams | 74465b4 | 2009-01-06 11:38:16 -0700 | [diff] [blame] | 723 | reg = dws->rx_reg; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 724 | for_each_sg(sgl, sg, sg_len, i) { | 
|  | 725 | struct dw_desc	*desc; | 
|  | 726 | u32		len; | 
|  | 727 | u32		mem; | 
|  | 728 |  | 
|  | 729 | desc = dwc_desc_get(dwc); | 
|  | 730 | if (!desc) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 731 | dev_err(chan2dev(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 732 | "not enough descriptors available\n"); | 
|  | 733 | goto err_desc_get; | 
|  | 734 | } | 
|  | 735 |  | 
|  | 736 | mem = sg_phys(sg); | 
|  | 737 | len = sg_dma_len(sg); | 
|  | 738 | mem_width = 2; | 
|  | 739 | if (unlikely(mem & 3 || len & 3)) | 
|  | 740 | mem_width = 0; | 
|  | 741 |  | 
|  | 742 | desc->lli.sar = reg; | 
|  | 743 | desc->lli.dar = mem; | 
|  | 744 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 
|  | 745 | desc->lli.ctlhi = len >> reg_width; | 
|  | 746 |  | 
|  | 747 | if (!first) { | 
|  | 748 | first = desc; | 
|  | 749 | } else { | 
|  | 750 | prev->lli.llp = desc->txd.phys; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 751 | dma_sync_single_for_device(chan2parent(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 752 | prev->txd.phys, | 
|  | 753 | sizeof(prev->lli), | 
|  | 754 | DMA_TO_DEVICE); | 
|  | 755 | list_add_tail(&desc->desc_node, | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 756 | &first->tx_list); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 757 | } | 
|  | 758 | prev = desc; | 
|  | 759 | total_len += len; | 
|  | 760 | } | 
|  | 761 | break; | 
|  | 762 | default: | 
|  | 763 | return NULL; | 
|  | 764 | } | 
|  | 765 |  | 
|  | 766 | if (flags & DMA_PREP_INTERRUPT) | 
|  | 767 | /* Trigger interrupt after last block */ | 
|  | 768 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 
|  | 769 |  | 
|  | 770 | prev->lli.llp = 0; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 771 | dma_sync_single_for_device(chan2parent(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 772 | prev->txd.phys, sizeof(prev->lli), | 
|  | 773 | DMA_TO_DEVICE); | 
|  | 774 |  | 
|  | 775 | first->len = total_len; | 
|  | 776 |  | 
|  | 777 | return &first->txd; | 
|  | 778 |  | 
|  | 779 | err_desc_get: | 
|  | 780 | dwc_desc_put(dwc, first); | 
|  | 781 | return NULL; | 
|  | 782 | } | 
|  | 783 |  | 
| Linus Walleij | 0582763 | 2010-05-17 16:30:42 -0700 | [diff] [blame] | 784 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 
|  | 785 | unsigned long arg) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 786 | { | 
|  | 787 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 788 | struct dw_dma		*dw = to_dw_dma(chan->device); | 
|  | 789 | struct dw_desc		*desc, *_desc; | 
|  | 790 | LIST_HEAD(list); | 
|  | 791 |  | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 792 | /* Only supports DMA_TERMINATE_ALL */ | 
|  | 793 | if (cmd != DMA_TERMINATE_ALL) | 
|  | 794 | return -ENXIO; | 
|  | 795 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 796 | /* | 
|  | 797 | * This is only called when something went wrong elsewhere, so | 
|  | 798 | * we don't really care about the data. Just disable the | 
|  | 799 | * channel. We still have to poll the channel enable bit due | 
|  | 800 | * to AHB/HSB limitations. | 
|  | 801 | */ | 
|  | 802 | spin_lock_bh(&dwc->lock); | 
|  | 803 |  | 
|  | 804 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 805 |  | 
|  | 806 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|  | 807 | cpu_relax(); | 
|  | 808 |  | 
|  | 809 | /* active_list entries will end up before queued entries */ | 
|  | 810 | list_splice_init(&dwc->queue, &list); | 
|  | 811 | list_splice_init(&dwc->active_list, &list); | 
|  | 812 |  | 
|  | 813 | spin_unlock_bh(&dwc->lock); | 
|  | 814 |  | 
|  | 815 | /* Flush all pending and queued descriptors */ | 
|  | 816 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 
|  | 817 | dwc_descriptor_complete(dwc, desc); | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 818 |  | 
|  | 819 | return 0; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 820 | } | 
|  | 821 |  | 
|  | 822 | static enum dma_status | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 823 | dwc_tx_status(struct dma_chan *chan, | 
|  | 824 | dma_cookie_t cookie, | 
|  | 825 | struct dma_tx_state *txstate) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 826 | { | 
|  | 827 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 828 | dma_cookie_t		last_used; | 
|  | 829 | dma_cookie_t		last_complete; | 
|  | 830 | int			ret; | 
|  | 831 |  | 
|  | 832 | last_complete = dwc->completed; | 
|  | 833 | last_used = chan->cookie; | 
|  | 834 |  | 
|  | 835 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 
|  | 836 | if (ret != DMA_SUCCESS) { | 
|  | 837 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 
|  | 838 |  | 
|  | 839 | last_complete = dwc->completed; | 
|  | 840 | last_used = chan->cookie; | 
|  | 841 |  | 
|  | 842 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 
|  | 843 | } | 
|  | 844 |  | 
| Dan Williams | bca3469 | 2010-03-26 16:52:10 -0700 | [diff] [blame] | 845 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 846 |  | 
|  | 847 | return ret; | 
|  | 848 | } | 
|  | 849 |  | 
|  | 850 | static void dwc_issue_pending(struct dma_chan *chan) | 
|  | 851 | { | 
|  | 852 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 853 |  | 
|  | 854 | spin_lock_bh(&dwc->lock); | 
|  | 855 | if (!list_empty(&dwc->queue)) | 
|  | 856 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 
|  | 857 | spin_unlock_bh(&dwc->lock); | 
|  | 858 | } | 
|  | 859 |  | 
| Dan Williams | aa1e6f1 | 2009-01-06 11:38:17 -0700 | [diff] [blame] | 860 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 861 | { | 
|  | 862 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 863 | struct dw_dma		*dw = to_dw_dma(chan->device); | 
|  | 864 | struct dw_desc		*desc; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 865 | struct dw_dma_slave	*dws; | 
|  | 866 | int			i; | 
|  | 867 | u32			cfghi; | 
|  | 868 | u32			cfglo; | 
|  | 869 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 870 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 871 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 872 | /* ASSERT:  channel is idle */ | 
|  | 873 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 874 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 875 | return -EIO; | 
|  | 876 | } | 
|  | 877 |  | 
|  | 878 | dwc->completed = chan->cookie = 1; | 
|  | 879 |  | 
|  | 880 | cfghi = DWC_CFGH_FIFO_MODE; | 
|  | 881 | cfglo = 0; | 
|  | 882 |  | 
| Dan Williams | 287d859 | 2009-02-18 14:48:26 -0800 | [diff] [blame] | 883 | dws = chan->private; | 
| Dan Williams | 74465b4 | 2009-01-06 11:38:16 -0700 | [diff] [blame] | 884 | if (dws) { | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 885 | /* | 
|  | 886 | * We need controller-specific data to set up slave | 
|  | 887 | * transfers. | 
|  | 888 | */ | 
| Dan Williams | 74465b4 | 2009-01-06 11:38:16 -0700 | [diff] [blame] | 889 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 890 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 891 | cfghi = dws->cfg_hi; | 
|  | 892 | cfglo = dws->cfg_lo; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 893 | } | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 894 | channel_writel(dwc, CFG_LO, cfglo); | 
|  | 895 | channel_writel(dwc, CFG_HI, cfghi); | 
|  | 896 |  | 
|  | 897 | /* | 
|  | 898 | * NOTE: some controllers may have additional features that we | 
|  | 899 | * need to initialize here, like "scatter-gather" (which | 
|  | 900 | * doesn't mean what you think it means), and status writeback. | 
|  | 901 | */ | 
|  | 902 |  | 
|  | 903 | spin_lock_bh(&dwc->lock); | 
|  | 904 | i = dwc->descs_allocated; | 
|  | 905 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 
|  | 906 | spin_unlock_bh(&dwc->lock); | 
|  | 907 |  | 
|  | 908 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 
|  | 909 | if (!desc) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 910 | dev_info(chan2dev(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 911 | "only allocated %d descriptors\n", i); | 
|  | 912 | spin_lock_bh(&dwc->lock); | 
|  | 913 | break; | 
|  | 914 | } | 
|  | 915 |  | 
| Dan Williams | e0bd0f8 | 2009-09-08 17:53:02 -0700 | [diff] [blame] | 916 | INIT_LIST_HEAD(&desc->tx_list); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 917 | dma_async_tx_descriptor_init(&desc->txd, chan); | 
|  | 918 | desc->txd.tx_submit = dwc_tx_submit; | 
|  | 919 | desc->txd.flags = DMA_CTRL_ACK; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 920 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 921 | sizeof(desc->lli), DMA_TO_DEVICE); | 
|  | 922 | dwc_desc_put(dwc, desc); | 
|  | 923 |  | 
|  | 924 | spin_lock_bh(&dwc->lock); | 
|  | 925 | i = ++dwc->descs_allocated; | 
|  | 926 | } | 
|  | 927 |  | 
|  | 928 | /* Enable interrupts */ | 
|  | 929 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 
|  | 930 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | 
|  | 931 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 
|  | 932 |  | 
|  | 933 | spin_unlock_bh(&dwc->lock); | 
|  | 934 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 935 | dev_dbg(chan2dev(chan), | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 936 | "alloc_chan_resources allocated %d descriptors\n", i); | 
|  | 937 |  | 
|  | 938 | return i; | 
|  | 939 | } | 
|  | 940 |  | 
|  | 941 | static void dwc_free_chan_resources(struct dma_chan *chan) | 
|  | 942 | { | 
|  | 943 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 944 | struct dw_dma		*dw = to_dw_dma(chan->device); | 
|  | 945 | struct dw_desc		*desc, *_desc; | 
|  | 946 | LIST_HEAD(list); | 
|  | 947 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 948 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 949 | dwc->descs_allocated); | 
|  | 950 |  | 
|  | 951 | /* ASSERT:  channel is idle */ | 
|  | 952 | BUG_ON(!list_empty(&dwc->active_list)); | 
|  | 953 | BUG_ON(!list_empty(&dwc->queue)); | 
|  | 954 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 
|  | 955 |  | 
|  | 956 | spin_lock_bh(&dwc->lock); | 
|  | 957 | list_splice_init(&dwc->free_list, &list); | 
|  | 958 | dwc->descs_allocated = 0; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 959 |  | 
|  | 960 | /* Disable interrupts */ | 
|  | 961 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 
|  | 962 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | 
|  | 963 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 
|  | 964 |  | 
|  | 965 | spin_unlock_bh(&dwc->lock); | 
|  | 966 |  | 
|  | 967 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 968 | dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc); | 
|  | 969 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 970 | sizeof(desc->lli), DMA_TO_DEVICE); | 
|  | 971 | kfree(desc); | 
|  | 972 | } | 
|  | 973 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 974 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 975 | } | 
|  | 976 |  | 
| Hans-Christian Egtvedt | d9de451 | 2009-04-01 15:47:02 +0200 | [diff] [blame] | 977 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 
|  | 978 |  | 
|  | 979 | /** | 
|  | 980 | * dw_dma_cyclic_start - start the cyclic DMA transfer | 
|  | 981 | * @chan: the DMA channel to start | 
|  | 982 | * | 
|  | 983 | * Must be called with soft interrupts disabled. Returns zero on success or | 
|  | 984 | * -errno on failure. | 
|  | 985 | */ | 
|  | 986 | int dw_dma_cyclic_start(struct dma_chan *chan) | 
|  | 987 | { | 
|  | 988 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 989 | struct dw_dma		*dw = to_dw_dma(dwc->chan.device); | 
|  | 990 |  | 
|  | 991 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | 
|  | 992 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | 
|  | 993 | return -ENODEV; | 
|  | 994 | } | 
|  | 995 |  | 
|  | 996 | spin_lock(&dwc->lock); | 
|  | 997 |  | 
|  | 998 | /* assert channel is idle */ | 
|  | 999 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
|  | 1000 | dev_err(chan2dev(&dwc->chan), | 
|  | 1001 | "BUG: Attempted to start non-idle channel\n"); | 
|  | 1002 | dev_err(chan2dev(&dwc->chan), | 
|  | 1003 | "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 
|  | 1004 | channel_readl(dwc, SAR), | 
|  | 1005 | channel_readl(dwc, DAR), | 
|  | 1006 | channel_readl(dwc, LLP), | 
|  | 1007 | channel_readl(dwc, CTL_HI), | 
|  | 1008 | channel_readl(dwc, CTL_LO)); | 
|  | 1009 | spin_unlock(&dwc->lock); | 
|  | 1010 | return -EBUSY; | 
|  | 1011 | } | 
|  | 1012 |  | 
|  | 1013 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 
|  | 1014 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 
|  | 1015 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 
|  | 1016 |  | 
|  | 1017 | /* setup DMAC channel registers */ | 
|  | 1018 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | 
|  | 1019 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 
|  | 1020 | channel_writel(dwc, CTL_HI, 0); | 
|  | 1021 |  | 
|  | 1022 | channel_set_bit(dw, CH_EN, dwc->mask); | 
|  | 1023 |  | 
|  | 1024 | spin_unlock(&dwc->lock); | 
|  | 1025 |  | 
|  | 1026 | return 0; | 
|  | 1027 | } | 
|  | 1028 | EXPORT_SYMBOL(dw_dma_cyclic_start); | 
|  | 1029 |  | 
|  | 1030 | /** | 
|  | 1031 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | 
|  | 1032 | * @chan: the DMA channel to stop | 
|  | 1033 | * | 
|  | 1034 | * Must be called with soft interrupts disabled. | 
|  | 1035 | */ | 
|  | 1036 | void dw_dma_cyclic_stop(struct dma_chan *chan) | 
|  | 1037 | { | 
|  | 1038 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 1039 | struct dw_dma		*dw = to_dw_dma(dwc->chan.device); | 
|  | 1040 |  | 
|  | 1041 | spin_lock(&dwc->lock); | 
|  | 1042 |  | 
|  | 1043 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 1044 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|  | 1045 | cpu_relax(); | 
|  | 1046 |  | 
|  | 1047 | spin_unlock(&dwc->lock); | 
|  | 1048 | } | 
|  | 1049 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | 
|  | 1050 |  | 
|  | 1051 | /** | 
|  | 1052 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | 
|  | 1053 | * @chan: the DMA channel to prepare | 
|  | 1054 | * @buf_addr: physical DMA address where the buffer starts | 
|  | 1055 | * @buf_len: total number of bytes for the entire buffer | 
|  | 1056 | * @period_len: number of bytes for each period | 
|  | 1057 | * @direction: transfer direction, to or from device | 
|  | 1058 | * | 
|  | 1059 | * Must be called before trying to start the transfer. Returns a valid struct | 
|  | 1060 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | 
|  | 1061 | */ | 
|  | 1062 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | 
|  | 1063 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | 
|  | 1064 | enum dma_data_direction direction) | 
|  | 1065 | { | 
|  | 1066 | struct dw_dma_chan		*dwc = to_dw_dma_chan(chan); | 
|  | 1067 | struct dw_cyclic_desc		*cdesc; | 
|  | 1068 | struct dw_cyclic_desc		*retval = NULL; | 
|  | 1069 | struct dw_desc			*desc; | 
|  | 1070 | struct dw_desc			*last = NULL; | 
|  | 1071 | struct dw_dma_slave		*dws = chan->private; | 
|  | 1072 | unsigned long			was_cyclic; | 
|  | 1073 | unsigned int			reg_width; | 
|  | 1074 | unsigned int			periods; | 
|  | 1075 | unsigned int			i; | 
|  | 1076 |  | 
|  | 1077 | spin_lock_bh(&dwc->lock); | 
|  | 1078 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 
|  | 1079 | spin_unlock_bh(&dwc->lock); | 
|  | 1080 | dev_dbg(chan2dev(&dwc->chan), | 
|  | 1081 | "queue and/or active list are not empty\n"); | 
|  | 1082 | return ERR_PTR(-EBUSY); | 
|  | 1083 | } | 
|  | 1084 |  | 
|  | 1085 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 
|  | 1086 | spin_unlock_bh(&dwc->lock); | 
|  | 1087 | if (was_cyclic) { | 
|  | 1088 | dev_dbg(chan2dev(&dwc->chan), | 
|  | 1089 | "channel already prepared for cyclic DMA\n"); | 
|  | 1090 | return ERR_PTR(-EBUSY); | 
|  | 1091 | } | 
|  | 1092 |  | 
|  | 1093 | retval = ERR_PTR(-EINVAL); | 
|  | 1094 | reg_width = dws->reg_width; | 
|  | 1095 | periods = buf_len / period_len; | 
|  | 1096 |  | 
|  | 1097 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | 
|  | 1098 | if (period_len > (DWC_MAX_COUNT << reg_width)) | 
|  | 1099 | goto out_err; | 
|  | 1100 | if (unlikely(period_len & ((1 << reg_width) - 1))) | 
|  | 1101 | goto out_err; | 
|  | 1102 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 
|  | 1103 | goto out_err; | 
|  | 1104 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | 
|  | 1105 | goto out_err; | 
|  | 1106 |  | 
|  | 1107 | retval = ERR_PTR(-ENOMEM); | 
|  | 1108 |  | 
|  | 1109 | if (periods > NR_DESCS_PER_CHANNEL) | 
|  | 1110 | goto out_err; | 
|  | 1111 |  | 
|  | 1112 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | 
|  | 1113 | if (!cdesc) | 
|  | 1114 | goto out_err; | 
|  | 1115 |  | 
|  | 1116 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | 
|  | 1117 | if (!cdesc->desc) | 
|  | 1118 | goto out_err_alloc; | 
|  | 1119 |  | 
|  | 1120 | for (i = 0; i < periods; i++) { | 
|  | 1121 | desc = dwc_desc_get(dwc); | 
|  | 1122 | if (!desc) | 
|  | 1123 | goto out_err_desc_get; | 
|  | 1124 |  | 
|  | 1125 | switch (direction) { | 
|  | 1126 | case DMA_TO_DEVICE: | 
|  | 1127 | desc->lli.dar = dws->tx_reg; | 
|  | 1128 | desc->lli.sar = buf_addr + (period_len * i); | 
|  | 1129 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 
|  | 1130 | | DWC_CTLL_DST_WIDTH(reg_width) | 
|  | 1131 | | DWC_CTLL_SRC_WIDTH(reg_width) | 
|  | 1132 | | DWC_CTLL_DST_FIX | 
|  | 1133 | | DWC_CTLL_SRC_INC | 
|  | 1134 | | DWC_CTLL_FC_M2P | 
|  | 1135 | | DWC_CTLL_INT_EN); | 
|  | 1136 | break; | 
|  | 1137 | case DMA_FROM_DEVICE: | 
|  | 1138 | desc->lli.dar = buf_addr + (period_len * i); | 
|  | 1139 | desc->lli.sar = dws->rx_reg; | 
|  | 1140 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 
|  | 1141 | | DWC_CTLL_SRC_WIDTH(reg_width) | 
|  | 1142 | | DWC_CTLL_DST_WIDTH(reg_width) | 
|  | 1143 | | DWC_CTLL_DST_INC | 
|  | 1144 | | DWC_CTLL_SRC_FIX | 
|  | 1145 | | DWC_CTLL_FC_P2M | 
|  | 1146 | | DWC_CTLL_INT_EN); | 
|  | 1147 | break; | 
|  | 1148 | default: | 
|  | 1149 | break; | 
|  | 1150 | } | 
|  | 1151 |  | 
|  | 1152 | desc->lli.ctlhi = (period_len >> reg_width); | 
|  | 1153 | cdesc->desc[i] = desc; | 
|  | 1154 |  | 
|  | 1155 | if (last) { | 
|  | 1156 | last->lli.llp = desc->txd.phys; | 
|  | 1157 | dma_sync_single_for_device(chan2parent(chan), | 
|  | 1158 | last->txd.phys, sizeof(last->lli), | 
|  | 1159 | DMA_TO_DEVICE); | 
|  | 1160 | } | 
|  | 1161 |  | 
|  | 1162 | last = desc; | 
|  | 1163 | } | 
|  | 1164 |  | 
|  | 1165 | /* lets make a cyclic list */ | 
|  | 1166 | last->lli.llp = cdesc->desc[0]->txd.phys; | 
|  | 1167 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | 
|  | 1168 | sizeof(last->lli), DMA_TO_DEVICE); | 
|  | 1169 |  | 
|  | 1170 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " | 
|  | 1171 | "period %zu periods %d\n", buf_addr, buf_len, | 
|  | 1172 | period_len, periods); | 
|  | 1173 |  | 
|  | 1174 | cdesc->periods = periods; | 
|  | 1175 | dwc->cdesc = cdesc; | 
|  | 1176 |  | 
|  | 1177 | return cdesc; | 
|  | 1178 |  | 
|  | 1179 | out_err_desc_get: | 
|  | 1180 | while (i--) | 
|  | 1181 | dwc_desc_put(dwc, cdesc->desc[i]); | 
|  | 1182 | out_err_alloc: | 
|  | 1183 | kfree(cdesc); | 
|  | 1184 | out_err: | 
|  | 1185 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 
|  | 1186 | return (struct dw_cyclic_desc *)retval; | 
|  | 1187 | } | 
|  | 1188 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | 
|  | 1189 |  | 
|  | 1190 | /** | 
|  | 1191 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | 
|  | 1192 | * @chan: the DMA channel to free | 
|  | 1193 | */ | 
|  | 1194 | void dw_dma_cyclic_free(struct dma_chan *chan) | 
|  | 1195 | { | 
|  | 1196 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|  | 1197 | struct dw_dma		*dw = to_dw_dma(dwc->chan.device); | 
|  | 1198 | struct dw_cyclic_desc	*cdesc = dwc->cdesc; | 
|  | 1199 | int			i; | 
|  | 1200 |  | 
|  | 1201 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | 
|  | 1202 |  | 
|  | 1203 | if (!cdesc) | 
|  | 1204 | return; | 
|  | 1205 |  | 
|  | 1206 | spin_lock_bh(&dwc->lock); | 
|  | 1207 |  | 
|  | 1208 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 1209 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|  | 1210 | cpu_relax(); | 
|  | 1211 |  | 
|  | 1212 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 
|  | 1213 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 
|  | 1214 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 
|  | 1215 |  | 
|  | 1216 | spin_unlock_bh(&dwc->lock); | 
|  | 1217 |  | 
|  | 1218 | for (i = 0; i < cdesc->periods; i++) | 
|  | 1219 | dwc_desc_put(dwc, cdesc->desc[i]); | 
|  | 1220 |  | 
|  | 1221 | kfree(cdesc->desc); | 
|  | 1222 | kfree(cdesc); | 
|  | 1223 |  | 
|  | 1224 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 
|  | 1225 | } | 
|  | 1226 | EXPORT_SYMBOL(dw_dma_cyclic_free); | 
|  | 1227 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1228 | /*----------------------------------------------------------------------*/ | 
|  | 1229 |  | 
|  | 1230 | static void dw_dma_off(struct dw_dma *dw) | 
|  | 1231 | { | 
|  | 1232 | dma_writel(dw, CFG, 0); | 
|  | 1233 |  | 
|  | 1234 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|  | 1235 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 
|  | 1236 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 
|  | 1237 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 
|  | 1238 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|  | 1239 |  | 
|  | 1240 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | 
|  | 1241 | cpu_relax(); | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | static int __init dw_probe(struct platform_device *pdev) | 
|  | 1245 | { | 
|  | 1246 | struct dw_dma_platform_data *pdata; | 
|  | 1247 | struct resource		*io; | 
|  | 1248 | struct dw_dma		*dw; | 
|  | 1249 | size_t			size; | 
|  | 1250 | int			irq; | 
|  | 1251 | int			err; | 
|  | 1252 | int			i; | 
|  | 1253 |  | 
|  | 1254 | pdata = pdev->dev.platform_data; | 
|  | 1255 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | 
|  | 1256 | return -EINVAL; | 
|  | 1257 |  | 
|  | 1258 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | 1259 | if (!io) | 
|  | 1260 | return -EINVAL; | 
|  | 1261 |  | 
|  | 1262 | irq = platform_get_irq(pdev, 0); | 
|  | 1263 | if (irq < 0) | 
|  | 1264 | return irq; | 
|  | 1265 |  | 
|  | 1266 | size = sizeof(struct dw_dma); | 
|  | 1267 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | 
|  | 1268 | dw = kzalloc(size, GFP_KERNEL); | 
|  | 1269 | if (!dw) | 
|  | 1270 | return -ENOMEM; | 
|  | 1271 |  | 
|  | 1272 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | 
|  | 1273 | err = -EBUSY; | 
|  | 1274 | goto err_kfree; | 
|  | 1275 | } | 
|  | 1276 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1277 | dw->regs = ioremap(io->start, DW_REGLEN); | 
|  | 1278 | if (!dw->regs) { | 
|  | 1279 | err = -ENOMEM; | 
|  | 1280 | goto err_release_r; | 
|  | 1281 | } | 
|  | 1282 |  | 
|  | 1283 | dw->clk = clk_get(&pdev->dev, "hclk"); | 
|  | 1284 | if (IS_ERR(dw->clk)) { | 
|  | 1285 | err = PTR_ERR(dw->clk); | 
|  | 1286 | goto err_clk; | 
|  | 1287 | } | 
|  | 1288 | clk_enable(dw->clk); | 
|  | 1289 |  | 
|  | 1290 | /* force dma off, just in case */ | 
|  | 1291 | dw_dma_off(dw); | 
|  | 1292 |  | 
|  | 1293 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | 
|  | 1294 | if (err) | 
|  | 1295 | goto err_irq; | 
|  | 1296 |  | 
|  | 1297 | platform_set_drvdata(pdev, dw); | 
|  | 1298 |  | 
|  | 1299 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 
|  | 1300 |  | 
|  | 1301 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 
|  | 1302 |  | 
|  | 1303 | INIT_LIST_HEAD(&dw->dma.channels); | 
|  | 1304 | for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { | 
|  | 1305 | struct dw_dma_chan	*dwc = &dw->chan[i]; | 
|  | 1306 |  | 
|  | 1307 | dwc->chan.device = &dw->dma; | 
|  | 1308 | dwc->chan.cookie = dwc->completed = 1; | 
|  | 1309 | dwc->chan.chan_id = i; | 
|  | 1310 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | 
|  | 1311 |  | 
|  | 1312 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | 
|  | 1313 | spin_lock_init(&dwc->lock); | 
|  | 1314 | dwc->mask = 1 << i; | 
|  | 1315 |  | 
|  | 1316 | INIT_LIST_HEAD(&dwc->active_list); | 
|  | 1317 | INIT_LIST_HEAD(&dwc->queue); | 
|  | 1318 | INIT_LIST_HEAD(&dwc->free_list); | 
|  | 1319 |  | 
|  | 1320 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 1321 | } | 
|  | 1322 |  | 
|  | 1323 | /* Clear/disable all interrupts on all channels. */ | 
|  | 1324 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 
|  | 1325 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | 
|  | 1326 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 
|  | 1327 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 
|  | 1328 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 
|  | 1329 |  | 
|  | 1330 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|  | 1331 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 
|  | 1332 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 
|  | 1333 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 
|  | 1334 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|  | 1335 |  | 
|  | 1336 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 
|  | 1337 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 
|  | 1338 | dw->dma.dev = &pdev->dev; | 
|  | 1339 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 
|  | 1340 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 
|  | 1341 |  | 
|  | 1342 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | 
|  | 1343 |  | 
|  | 1344 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 1345 | dw->dma.device_control = dwc_control; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1346 |  | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 1347 | dw->dma.device_tx_status = dwc_tx_status; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1348 | dw->dma.device_issue_pending = dwc_issue_pending; | 
|  | 1349 |  | 
|  | 1350 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 
|  | 1351 |  | 
|  | 1352 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 
| Kay Sievers | dfbc901 | 2009-03-24 16:38:22 -0700 | [diff] [blame] | 1353 | dev_name(&pdev->dev), dw->dma.chancnt); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1354 |  | 
|  | 1355 | dma_async_device_register(&dw->dma); | 
|  | 1356 |  | 
|  | 1357 | return 0; | 
|  | 1358 |  | 
|  | 1359 | err_irq: | 
|  | 1360 | clk_disable(dw->clk); | 
|  | 1361 | clk_put(dw->clk); | 
|  | 1362 | err_clk: | 
|  | 1363 | iounmap(dw->regs); | 
|  | 1364 | dw->regs = NULL; | 
|  | 1365 | err_release_r: | 
|  | 1366 | release_resource(io); | 
|  | 1367 | err_kfree: | 
|  | 1368 | kfree(dw); | 
|  | 1369 | return err; | 
|  | 1370 | } | 
|  | 1371 |  | 
|  | 1372 | static int __exit dw_remove(struct platform_device *pdev) | 
|  | 1373 | { | 
|  | 1374 | struct dw_dma		*dw = platform_get_drvdata(pdev); | 
|  | 1375 | struct dw_dma_chan	*dwc, *_dwc; | 
|  | 1376 | struct resource		*io; | 
|  | 1377 |  | 
|  | 1378 | dw_dma_off(dw); | 
|  | 1379 | dma_async_device_unregister(&dw->dma); | 
|  | 1380 |  | 
|  | 1381 | free_irq(platform_get_irq(pdev, 0), dw); | 
|  | 1382 | tasklet_kill(&dw->tasklet); | 
|  | 1383 |  | 
|  | 1384 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 
|  | 1385 | chan.device_node) { | 
|  | 1386 | list_del(&dwc->chan.device_node); | 
|  | 1387 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|  | 1388 | } | 
|  | 1389 |  | 
|  | 1390 | clk_disable(dw->clk); | 
|  | 1391 | clk_put(dw->clk); | 
|  | 1392 |  | 
|  | 1393 | iounmap(dw->regs); | 
|  | 1394 | dw->regs = NULL; | 
|  | 1395 |  | 
|  | 1396 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | 1397 | release_mem_region(io->start, DW_REGLEN); | 
|  | 1398 |  | 
|  | 1399 | kfree(dw); | 
|  | 1400 |  | 
|  | 1401 | return 0; | 
|  | 1402 | } | 
|  | 1403 |  | 
|  | 1404 | static void dw_shutdown(struct platform_device *pdev) | 
|  | 1405 | { | 
|  | 1406 | struct dw_dma	*dw = platform_get_drvdata(pdev); | 
|  | 1407 |  | 
|  | 1408 | dw_dma_off(platform_get_drvdata(pdev)); | 
|  | 1409 | clk_disable(dw->clk); | 
|  | 1410 | } | 
|  | 1411 |  | 
| Magnus Damm | 4a256b5 | 2009-07-08 13:22:18 +0200 | [diff] [blame] | 1412 | static int dw_suspend_noirq(struct device *dev) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1413 | { | 
| Magnus Damm | 4a256b5 | 2009-07-08 13:22:18 +0200 | [diff] [blame] | 1414 | struct platform_device *pdev = to_platform_device(dev); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1415 | struct dw_dma	*dw = platform_get_drvdata(pdev); | 
|  | 1416 |  | 
|  | 1417 | dw_dma_off(platform_get_drvdata(pdev)); | 
|  | 1418 | clk_disable(dw->clk); | 
|  | 1419 | return 0; | 
|  | 1420 | } | 
|  | 1421 |  | 
| Magnus Damm | 4a256b5 | 2009-07-08 13:22:18 +0200 | [diff] [blame] | 1422 | static int dw_resume_noirq(struct device *dev) | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1423 | { | 
| Magnus Damm | 4a256b5 | 2009-07-08 13:22:18 +0200 | [diff] [blame] | 1424 | struct platform_device *pdev = to_platform_device(dev); | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1425 | struct dw_dma	*dw = platform_get_drvdata(pdev); | 
|  | 1426 |  | 
|  | 1427 | clk_enable(dw->clk); | 
|  | 1428 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 
|  | 1429 | return 0; | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1430 | } | 
|  | 1431 |  | 
| Alexey Dobriyan | 4714521 | 2009-12-14 18:00:08 -0800 | [diff] [blame] | 1432 | static const struct dev_pm_ops dw_dev_pm_ops = { | 
| Magnus Damm | 4a256b5 | 2009-07-08 13:22:18 +0200 | [diff] [blame] | 1433 | .suspend_noirq = dw_suspend_noirq, | 
|  | 1434 | .resume_noirq = dw_resume_noirq, | 
|  | 1435 | }; | 
|  | 1436 |  | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1437 | static struct platform_driver dw_driver = { | 
|  | 1438 | .remove		= __exit_p(dw_remove), | 
|  | 1439 | .shutdown	= dw_shutdown, | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1440 | .driver = { | 
|  | 1441 | .name	= "dw_dmac", | 
| Magnus Damm | 4a256b5 | 2009-07-08 13:22:18 +0200 | [diff] [blame] | 1442 | .pm	= &dw_dev_pm_ops, | 
| Haavard Skinnemoen | 3bfb1d2 | 2008-07-08 11:59:42 -0700 | [diff] [blame] | 1443 | }, | 
|  | 1444 | }; | 
|  | 1445 |  | 
|  | 1446 | static int __init dw_init(void) | 
|  | 1447 | { | 
|  | 1448 | return platform_driver_probe(&dw_driver, dw_probe); | 
|  | 1449 | } | 
|  | 1450 | module_init(dw_init); | 
|  | 1451 |  | 
|  | 1452 | static void __exit dw_exit(void) | 
|  | 1453 | { | 
|  | 1454 | platform_driver_unregister(&dw_driver); | 
|  | 1455 | } | 
|  | 1456 | module_exit(dw_exit); | 
|  | 1457 |  | 
|  | 1458 | MODULE_LICENSE("GPL v2"); | 
|  | 1459 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 
|  | 1460 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); |