blob: 9794f00810f6dc326c4be949b3ce47fd1e3d0e56 [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
Viresh Kumaraecb7b62011-05-24 14:04:09 +05306 * Copyright (C) 2010-2011 ST Microelectronics
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
Viresh Kumar327e6972012-02-01 16:12:26 +053012#include <linux/bitops.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070013#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
Viresh Kumard3f797d2012-04-20 20:15:34 +053020#include <linux/of.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070021#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26#include "dw_dmac_regs.h"
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000027#include "dmaengine.h"
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070028
29/*
30 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32 * of which use ARM any more). See the "Databook" from Synopsys for
33 * information beyond what licensees probably provide.
34 *
35 * The driver has currently been tested only with the Atmel AT32AP7000,
36 * which does not support descriptor writeback.
37 */
38
Viresh Kumar327e6972012-02-01 16:12:26 +053039#define DWC_DEFAULT_CTLLO(_chan) ({ \
40 struct dw_dma_slave *__slave = (_chan->private); \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 int _dms = __slave ? __slave->dst_master : 0; \
44 int _sms = __slave ? __slave->src_master : 1; \
45 u8 _smsize = __slave ? _sconfig->src_maxburst : \
46 DW_DMA_MSIZE_16; \
47 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
48 DW_DMA_MSIZE_16; \
Jamie Ilesf301c062011-01-21 14:11:53 +000049 \
Viresh Kumar327e6972012-02-01 16:12:26 +053050 (DWC_CTLL_DST_MSIZE(_dmsize) \
51 | DWC_CTLL_SRC_MSIZE(_smsize) \
Jamie Ilesf301c062011-01-21 14:11:53 +000052 | DWC_CTLL_LLP_D_EN \
53 | DWC_CTLL_LLP_S_EN \
Viresh Kumar327e6972012-02-01 16:12:26 +053054 | DWC_CTLL_DMS(_dms) \
55 | DWC_CTLL_SMS(_sms)); \
Jamie Ilesf301c062011-01-21 14:11:53 +000056 })
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070057
58/*
59 * This is configuration-dependent and usually a funny size like 4095.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070060 *
61 * Note that this is a transfer count, i.e. if we transfer 32-bit
Viresh Kumar418e7402011-03-04 15:42:50 +053062 * words, we can do 16380 bytes per descriptor.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070063 *
64 * This parameter is also system-specific.
65 */
Viresh Kumar418e7402011-03-04 15:42:50 +053066#define DWC_MAX_COUNT 4095U
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070067
68/*
69 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint.
72 */
73#define NR_DESCS_PER_CHANNEL 64
74
75/*----------------------------------------------------------------------*/
76
77/*
78 * Because we're not relying on writeback from the controller (it may not
79 * even be configured into the core!) we don't need to use dma_pool. These
80 * descriptors -- and associated data -- are cacheable. We do need to make
81 * sure their dcache entries are written back before handing them off to
82 * the controller, though.
83 */
84
Dan Williams41d5e592009-01-06 11:38:21 -070085static struct device *chan2dev(struct dma_chan *chan)
86{
87 return &chan->dev->device;
88}
89static struct device *chan2parent(struct dma_chan *chan)
90{
91 return chan->dev->device.parent;
92}
93
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070094static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
95{
96 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
97}
98
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070099static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
100{
101 struct dw_desc *desc, *_desc;
102 struct dw_desc *ret = NULL;
103 unsigned int i = 0;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530104 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700105
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530106 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
108 if (async_tx_test_ack(&desc->txd)) {
109 list_del(&desc->desc_node);
110 ret = desc;
111 break;
112 }
Dan Williams41d5e592009-01-06 11:38:21 -0700113 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700114 i++;
115 }
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530116 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700117
Dan Williams41d5e592009-01-06 11:38:21 -0700118 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700119
120 return ret;
121}
122
123static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 struct dw_desc *child;
126
Dan Williamse0bd0f82009-09-08 17:53:02 -0700127 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700128 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700129 child->txd.phys, sizeof(child->lli),
130 DMA_TO_DEVICE);
Dan Williams41d5e592009-01-06 11:38:21 -0700131 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700132 desc->txd.phys, sizeof(desc->lli),
133 DMA_TO_DEVICE);
134}
135
136/*
137 * Move a descriptor, including any children, to the free list.
138 * `desc' must not be on any lists.
139 */
140static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
141{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530142 unsigned long flags;
143
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700144 if (desc) {
145 struct dw_desc *child;
146
147 dwc_sync_desc_for_cpu(dwc, desc);
148
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530149 spin_lock_irqsave(&dwc->lock, flags);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700150 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700151 dev_vdbg(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700152 "moving child desc %p to freelist\n",
153 child);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700154 list_splice_init(&desc->tx_list, &dwc->free_list);
Dan Williams41d5e592009-01-06 11:38:21 -0700155 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700156 list_add(&desc->desc_node, &dwc->free_list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530157 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700158 }
159}
160
Viresh Kumar61e183f2011-11-17 16:01:29 +0530161static void dwc_initialize(struct dw_dma_chan *dwc)
162{
163 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
164 struct dw_dma_slave *dws = dwc->chan.private;
165 u32 cfghi = DWC_CFGH_FIFO_MODE;
166 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
167
168 if (dwc->initialized == true)
169 return;
170
171 if (dws) {
172 /*
173 * We need controller-specific data to set up slave
174 * transfers.
175 */
176 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
177
178 cfghi = dws->cfg_hi;
179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
180 }
181
182 channel_writel(dwc, CFG_LO, cfglo);
183 channel_writel(dwc, CFG_HI, cfghi);
184
185 /* Enable interrupts */
186 channel_set_bit(dw, MASK.XFER, dwc->mask);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530187 channel_set_bit(dw, MASK.ERROR, dwc->mask);
188
189 dwc->initialized = true;
190}
191
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700192/*----------------------------------------------------------------------*/
193
194/* Called with dwc->lock held and bh disabled */
195static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
196{
197 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
198
199 /* ASSERT: channel is idle */
200 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700201 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700202 "BUG: Attempted to start non-idle channel\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700203 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700204 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
205 channel_readl(dwc, SAR),
206 channel_readl(dwc, DAR),
207 channel_readl(dwc, LLP),
208 channel_readl(dwc, CTL_HI),
209 channel_readl(dwc, CTL_LO));
210
211 /* The tasklet will hopefully advance the queue... */
212 return;
213 }
214
Viresh Kumar61e183f2011-11-17 16:01:29 +0530215 dwc_initialize(dwc);
216
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700217 channel_writel(dwc, LLP, first->txd.phys);
218 channel_writel(dwc, CTL_LO,
219 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
220 channel_writel(dwc, CTL_HI, 0);
221 channel_set_bit(dw, CH_EN, dwc->mask);
222}
223
224/*----------------------------------------------------------------------*/
225
226static void
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530227dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
228 bool callback_required)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700229{
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530230 dma_async_tx_callback callback = NULL;
231 void *param = NULL;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700232 struct dma_async_tx_descriptor *txd = &desc->txd;
Viresh Kumare5180762011-03-03 15:47:20 +0530233 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530234 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700235
Dan Williams41d5e592009-01-06 11:38:21 -0700236 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700237
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530238 spin_lock_irqsave(&dwc->lock, flags);
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +0000239 dma_cookie_complete(txd);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530240 if (callback_required) {
241 callback = txd->callback;
242 param = txd->callback_param;
243 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700244
245 dwc_sync_desc_for_cpu(dwc, desc);
Viresh Kumare5180762011-03-03 15:47:20 +0530246
247 /* async_tx_ack */
248 list_for_each_entry(child, &desc->tx_list, desc_node)
249 async_tx_ack(&child->txd);
250 async_tx_ack(&desc->txd);
251
Dan Williamse0bd0f82009-09-08 17:53:02 -0700252 list_splice_init(&desc->tx_list, &dwc->free_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700253 list_move(&desc->desc_node, &dwc->free_list);
254
Atsushi Nemoto657a77f2009-09-08 17:53:05 -0700255 if (!dwc->chan.private) {
256 struct device *parent = chan2parent(&dwc->chan);
257 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
258 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
259 dma_unmap_single(parent, desc->lli.dar,
260 desc->len, DMA_FROM_DEVICE);
261 else
262 dma_unmap_page(parent, desc->lli.dar,
263 desc->len, DMA_FROM_DEVICE);
264 }
265 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267 dma_unmap_single(parent, desc->lli.sar,
268 desc->len, DMA_TO_DEVICE);
269 else
270 dma_unmap_page(parent, desc->lli.sar,
271 desc->len, DMA_TO_DEVICE);
272 }
273 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700274
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530275 spin_unlock_irqrestore(&dwc->lock, flags);
276
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530277 if (callback_required && callback)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700278 callback(param);
279}
280
281static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
282{
283 struct dw_desc *desc, *_desc;
284 LIST_HEAD(list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530285 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700286
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530287 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700288 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700289 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700290 "BUG: XFER bit set, but channel not idle!\n");
291
292 /* Try to continue after resetting the channel... */
293 channel_clear_bit(dw, CH_EN, dwc->mask);
294 while (dma_readl(dw, CH_EN) & dwc->mask)
295 cpu_relax();
296 }
297
298 /*
299 * Submit queued descriptors ASAP, i.e. before we go through
300 * the completed ones.
301 */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700302 list_splice_init(&dwc->active_list, &list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530303 if (!list_empty(&dwc->queue)) {
304 list_move(dwc->queue.next, &dwc->active_list);
305 dwc_dostart(dwc, dwc_first_active(dwc));
306 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700307
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530308 spin_unlock_irqrestore(&dwc->lock, flags);
309
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700310 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530311 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700312}
313
314static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
315{
316 dma_addr_t llp;
317 struct dw_desc *desc, *_desc;
318 struct dw_desc *child;
319 u32 status_xfer;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530320 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700321
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530322 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700323 llp = channel_readl(dwc, LLP);
324 status_xfer = dma_readl(dw, RAW.XFER);
325
326 if (status_xfer & dwc->mask) {
327 /* Everything we've submitted is done */
328 dma_writel(dw, CLEAR.XFER, dwc->mask);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530329 spin_unlock_irqrestore(&dwc->lock, flags);
330
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700331 dwc_complete_all(dw, dwc);
332 return;
333 }
334
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530335 if (list_empty(&dwc->active_list)) {
336 spin_unlock_irqrestore(&dwc->lock, flags);
Jamie Iles087809f2011-01-21 14:11:52 +0000337 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530338 }
Jamie Iles087809f2011-01-21 14:11:52 +0000339
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300340 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%llx\n",
341 (unsigned long long)llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700342
343 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
Viresh Kumar84adccf2011-03-24 11:32:15 +0530344 /* check first descriptors addr */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530345 if (desc->txd.phys == llp) {
346 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700347 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530348 }
Viresh Kumar84adccf2011-03-24 11:32:15 +0530349
350 /* check first descriptors llp */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530351 if (desc->lli.llp == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700352 /* This one is currently in progress */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530353 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700354 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530355 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700356
Dan Williamse0bd0f82009-09-08 17:53:02 -0700357 list_for_each_entry(child, &desc->tx_list, desc_node)
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530358 if (child->lli.llp == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700359 /* Currently in progress */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530360 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700361 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530362 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700363
364 /*
365 * No descriptors so far seem to be in progress, i.e.
366 * this one must be done.
367 */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530368 spin_unlock_irqrestore(&dwc->lock, flags);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530369 dwc_descriptor_complete(dwc, desc, true);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530370 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700371 }
372
Dan Williams41d5e592009-01-06 11:38:21 -0700373 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700374 "BUG: All descriptors done, but channel not idle!\n");
375
376 /* Try to continue after resetting the channel... */
377 channel_clear_bit(dw, CH_EN, dwc->mask);
378 while (dma_readl(dw, CH_EN) & dwc->mask)
379 cpu_relax();
380
381 if (!list_empty(&dwc->queue)) {
Viresh Kumarf336e422011-03-03 15:47:16 +0530382 list_move(dwc->queue.next, &dwc->active_list);
383 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700384 }
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530385 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700386}
387
388static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
389{
Dan Williams41d5e592009-01-06 11:38:21 -0700390 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300391 " desc: s0x%llx d0x%llx l0x%llx c0x%x:%x\n",
392 (unsigned long long)lli->sar,
393 (unsigned long long)lli->dar,
394 (unsigned long long)lli->llp,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700395 lli->ctlhi, lli->ctllo);
396}
397
398static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
399{
400 struct dw_desc *bad_desc;
401 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530402 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700403
404 dwc_scan_descriptors(dw, dwc);
405
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530406 spin_lock_irqsave(&dwc->lock, flags);
407
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700408 /*
409 * The descriptor currently at the head of the active list is
410 * borked. Since we don't have any way to report errors, we'll
411 * just have to scream loudly and try to carry on.
412 */
413 bad_desc = dwc_first_active(dwc);
414 list_del_init(&bad_desc->desc_node);
Viresh Kumarf336e422011-03-03 15:47:16 +0530415 list_move(dwc->queue.next, dwc->active_list.prev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700416
417 /* Clear the error flag and try to restart the controller */
418 dma_writel(dw, CLEAR.ERROR, dwc->mask);
419 if (!list_empty(&dwc->active_list))
420 dwc_dostart(dwc, dwc_first_active(dwc));
421
422 /*
423 * KERN_CRITICAL may seem harsh, but since this only happens
424 * when someone submits a bad physical address in a
425 * descriptor, we should consider ourselves lucky that the
426 * controller flagged an error instead of scribbling over
427 * random memory locations.
428 */
Dan Williams41d5e592009-01-06 11:38:21 -0700429 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700430 "Bad descriptor submitted for DMA!\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700431 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700432 " cookie: %d\n", bad_desc->txd.cookie);
433 dwc_dump_lli(dwc, &bad_desc->lli);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700434 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700435 dwc_dump_lli(dwc, &child->lli);
436
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530437 spin_unlock_irqrestore(&dwc->lock, flags);
438
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700439 /* Pretend the descriptor completed successfully */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530440 dwc_descriptor_complete(dwc, bad_desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700441}
442
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200443/* --------------------- Cyclic DMA API extensions -------------------- */
444
445inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
446{
447 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
448 return channel_readl(dwc, SAR);
449}
450EXPORT_SYMBOL(dw_dma_get_src_addr);
451
452inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
453{
454 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
455 return channel_readl(dwc, DAR);
456}
457EXPORT_SYMBOL(dw_dma_get_dst_addr);
458
459/* called with dwc->lock held and all DMAC interrupts disabled */
460static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530461 u32 status_err, u32 status_xfer)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200462{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530463 unsigned long flags;
464
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530465 if (dwc->mask) {
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200466 void (*callback)(void *param);
467 void *callback_param;
468
469 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
470 channel_readl(dwc, LLP));
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200471
472 callback = dwc->cdesc->period_callback;
473 callback_param = dwc->cdesc->period_callback_param;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530474
475 if (callback)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200476 callback(callback_param);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200477 }
478
479 /*
480 * Error and transfer complete are highly unlikely, and will most
481 * likely be due to a configuration error by the user.
482 */
483 if (unlikely(status_err & dwc->mask) ||
484 unlikely(status_xfer & dwc->mask)) {
485 int i;
486
487 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
488 "interrupt, stopping DMA transfer\n",
489 status_xfer ? "xfer" : "error");
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530490
491 spin_lock_irqsave(&dwc->lock, flags);
492
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200493 dev_err(chan2dev(&dwc->chan),
494 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
495 channel_readl(dwc, SAR),
496 channel_readl(dwc, DAR),
497 channel_readl(dwc, LLP),
498 channel_readl(dwc, CTL_HI),
499 channel_readl(dwc, CTL_LO));
500
501 channel_clear_bit(dw, CH_EN, dwc->mask);
502 while (dma_readl(dw, CH_EN) & dwc->mask)
503 cpu_relax();
504
505 /* make sure DMA does not restart by loading a new list */
506 channel_writel(dwc, LLP, 0);
507 channel_writel(dwc, CTL_LO, 0);
508 channel_writel(dwc, CTL_HI, 0);
509
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200510 dma_writel(dw, CLEAR.ERROR, dwc->mask);
511 dma_writel(dw, CLEAR.XFER, dwc->mask);
512
513 for (i = 0; i < dwc->cdesc->periods; i++)
514 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530515
516 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200517 }
518}
519
520/* ------------------------------------------------------------------------- */
521
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700522static void dw_dma_tasklet(unsigned long data)
523{
524 struct dw_dma *dw = (struct dw_dma *)data;
525 struct dw_dma_chan *dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700526 u32 status_xfer;
527 u32 status_err;
528 int i;
529
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700530 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700531 status_err = dma_readl(dw, RAW.ERROR);
532
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530533 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700534
535 for (i = 0; i < dw->dma.chancnt; i++) {
536 dwc = &dw->chan[i];
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200537 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530538 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200539 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700540 dwc_handle_error(dw, dwc);
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530541 else if (status_xfer & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700542 dwc_scan_descriptors(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700543 }
544
545 /*
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530546 * Re-enable interrupts.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700547 */
548 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700549 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
550}
551
552static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
553{
554 struct dw_dma *dw = dev_id;
555 u32 status;
556
557 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
558 dma_readl(dw, STATUS_INT));
559
560 /*
561 * Just disable the interrupts. We'll turn them back on in the
562 * softirq handler.
563 */
564 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700565 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
566
567 status = dma_readl(dw, STATUS_INT);
568 if (status) {
569 dev_err(dw->dma.dev,
570 "BUG: Unexpected interrupts pending: 0x%x\n",
571 status);
572
573 /* Try to recover */
574 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700575 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
576 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
577 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
578 }
579
580 tasklet_schedule(&dw->tasklet);
581
582 return IRQ_HANDLED;
583}
584
585/*----------------------------------------------------------------------*/
586
587static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
588{
589 struct dw_desc *desc = txd_to_dw_desc(tx);
590 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
591 dma_cookie_t cookie;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530592 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700593
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530594 spin_lock_irqsave(&dwc->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000595 cookie = dma_cookie_assign(tx);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700596
597 /*
598 * REVISIT: We should attempt to chain as many descriptors as
599 * possible, perhaps even appending to those already submitted
600 * for DMA. But this is hard to do in a race-free manner.
601 */
602 if (list_empty(&dwc->active_list)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700603 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700604 desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700605 list_add_tail(&desc->desc_node, &dwc->active_list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530606 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700607 } else {
Dan Williams41d5e592009-01-06 11:38:21 -0700608 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700609 desc->txd.cookie);
610
611 list_add_tail(&desc->desc_node, &dwc->queue);
612 }
613
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530614 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700615
616 return cookie;
617}
618
619static struct dma_async_tx_descriptor *
620dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
621 size_t len, unsigned long flags)
622{
623 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
624 struct dw_desc *desc;
625 struct dw_desc *first;
626 struct dw_desc *prev;
627 size_t xfer_count;
628 size_t offset;
629 unsigned int src_width;
630 unsigned int dst_width;
631 u32 ctllo;
632
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300633 dev_vdbg(chan2dev(chan),
634 "prep_dma_memcpy d0x%llx s0x%llx l0x%zx f0x%lx\n",
635 (unsigned long long)dest, (unsigned long long)src,
636 len, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700637
638 if (unlikely(!len)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700639 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700640 return NULL;
641 }
642
643 /*
644 * We can be a lot more clever here, but this should take care
645 * of the most common optimization.
646 */
Viresh Kumara0227452011-03-03 15:47:18 +0530647 if (!((src | dest | len) & 7))
648 src_width = dst_width = 3;
649 else if (!((src | dest | len) & 3))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700650 src_width = dst_width = 2;
651 else if (!((src | dest | len) & 1))
652 src_width = dst_width = 1;
653 else
654 src_width = dst_width = 0;
655
Viresh Kumar327e6972012-02-01 16:12:26 +0530656 ctllo = DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700657 | DWC_CTLL_DST_WIDTH(dst_width)
658 | DWC_CTLL_SRC_WIDTH(src_width)
659 | DWC_CTLL_DST_INC
660 | DWC_CTLL_SRC_INC
661 | DWC_CTLL_FC_M2M;
662 prev = first = NULL;
663
664 for (offset = 0; offset < len; offset += xfer_count << src_width) {
665 xfer_count = min_t(size_t, (len - offset) >> src_width,
666 DWC_MAX_COUNT);
667
668 desc = dwc_desc_get(dwc);
669 if (!desc)
670 goto err_desc_get;
671
672 desc->lli.sar = src + offset;
673 desc->lli.dar = dest + offset;
674 desc->lli.ctllo = ctllo;
675 desc->lli.ctlhi = xfer_count;
676
677 if (!first) {
678 first = desc;
679 } else {
680 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700681 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700682 prev->txd.phys, sizeof(prev->lli),
683 DMA_TO_DEVICE);
684 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700685 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700686 }
687 prev = desc;
688 }
689
690
691 if (flags & DMA_PREP_INTERRUPT)
692 /* Trigger interrupt after last block */
693 prev->lli.ctllo |= DWC_CTLL_INT_EN;
694
695 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700696 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700697 prev->txd.phys, sizeof(prev->lli),
698 DMA_TO_DEVICE);
699
700 first->txd.flags = flags;
701 first->len = len;
702
703 return &first->txd;
704
705err_desc_get:
706 dwc_desc_put(dwc, first);
707 return NULL;
708}
709
710static struct dma_async_tx_descriptor *
711dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530712 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500713 unsigned long flags, void *context)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700714{
715 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Dan Williams287d8592009-02-18 14:48:26 -0800716 struct dw_dma_slave *dws = chan->private;
Viresh Kumar327e6972012-02-01 16:12:26 +0530717 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700718 struct dw_desc *prev;
719 struct dw_desc *first;
720 u32 ctllo;
721 dma_addr_t reg;
722 unsigned int reg_width;
723 unsigned int mem_width;
724 unsigned int i;
725 struct scatterlist *sg;
726 size_t total_len = 0;
727
Dan Williams41d5e592009-01-06 11:38:21 -0700728 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700729
730 if (unlikely(!dws || !sg_len))
731 return NULL;
732
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700733 prev = first = NULL;
734
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700735 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530736 case DMA_MEM_TO_DEV:
Viresh Kumar327e6972012-02-01 16:12:26 +0530737 reg_width = __fls(sconfig->dst_addr_width);
738 reg = sconfig->dst_addr;
739 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700740 | DWC_CTLL_DST_WIDTH(reg_width)
741 | DWC_CTLL_DST_FIX
Viresh Kumar327e6972012-02-01 16:12:26 +0530742 | DWC_CTLL_SRC_INC);
743
744 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
745 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
746
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700747 for_each_sg(sgl, sg, sg_len, i) {
748 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530749 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700750
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200751 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700752 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530753
754 if (!((mem | len) & 7))
755 mem_width = 3;
756 else if (!((mem | len) & 3))
757 mem_width = 2;
758 else if (!((mem | len) & 1))
759 mem_width = 1;
760 else
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700761 mem_width = 0;
762
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530763slave_sg_todev_fill_desc:
764 desc = dwc_desc_get(dwc);
765 if (!desc) {
766 dev_err(chan2dev(chan),
767 "not enough descriptors available\n");
768 goto err_desc_get;
769 }
770
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700771 desc->lli.sar = mem;
772 desc->lli.dar = reg;
773 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530774 if ((len >> mem_width) > DWC_MAX_COUNT) {
775 dlen = DWC_MAX_COUNT << mem_width;
776 mem += dlen;
777 len -= dlen;
778 } else {
779 dlen = len;
780 len = 0;
781 }
782
783 desc->lli.ctlhi = dlen >> mem_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700784
785 if (!first) {
786 first = desc;
787 } else {
788 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700789 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700790 prev->txd.phys,
791 sizeof(prev->lli),
792 DMA_TO_DEVICE);
793 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700794 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700795 }
796 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530797 total_len += dlen;
798
799 if (len)
800 goto slave_sg_todev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700801 }
802 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530803 case DMA_DEV_TO_MEM:
Viresh Kumar327e6972012-02-01 16:12:26 +0530804 reg_width = __fls(sconfig->src_addr_width);
805 reg = sconfig->src_addr;
806 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700807 | DWC_CTLL_SRC_WIDTH(reg_width)
808 | DWC_CTLL_DST_INC
Viresh Kumar327e6972012-02-01 16:12:26 +0530809 | DWC_CTLL_SRC_FIX);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700810
Viresh Kumar327e6972012-02-01 16:12:26 +0530811 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
812 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
813
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700814 for_each_sg(sgl, sg, sg_len, i) {
815 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530816 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700817
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200818 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700819 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530820
821 if (!((mem | len) & 7))
822 mem_width = 3;
823 else if (!((mem | len) & 3))
824 mem_width = 2;
825 else if (!((mem | len) & 1))
826 mem_width = 1;
827 else
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700828 mem_width = 0;
829
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530830slave_sg_fromdev_fill_desc:
831 desc = dwc_desc_get(dwc);
832 if (!desc) {
833 dev_err(chan2dev(chan),
834 "not enough descriptors available\n");
835 goto err_desc_get;
836 }
837
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700838 desc->lli.sar = reg;
839 desc->lli.dar = mem;
840 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530841 if ((len >> reg_width) > DWC_MAX_COUNT) {
842 dlen = DWC_MAX_COUNT << reg_width;
843 mem += dlen;
844 len -= dlen;
845 } else {
846 dlen = len;
847 len = 0;
848 }
849 desc->lli.ctlhi = dlen >> reg_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700850
851 if (!first) {
852 first = desc;
853 } else {
854 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700855 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700856 prev->txd.phys,
857 sizeof(prev->lli),
858 DMA_TO_DEVICE);
859 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700860 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700861 }
862 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530863 total_len += dlen;
864
865 if (len)
866 goto slave_sg_fromdev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700867 }
868 break;
869 default:
870 return NULL;
871 }
872
873 if (flags & DMA_PREP_INTERRUPT)
874 /* Trigger interrupt after last block */
875 prev->lli.ctllo |= DWC_CTLL_INT_EN;
876
877 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700878 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700879 prev->txd.phys, sizeof(prev->lli),
880 DMA_TO_DEVICE);
881
882 first->len = total_len;
883
884 return &first->txd;
885
886err_desc_get:
887 dwc_desc_put(dwc, first);
888 return NULL;
889}
890
Viresh Kumar327e6972012-02-01 16:12:26 +0530891/*
892 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
893 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
894 *
895 * NOTE: burst size 2 is not supported by controller.
896 *
897 * This can be done by finding least significant bit set: n & (n - 1)
898 */
899static inline void convert_burst(u32 *maxburst)
900{
901 if (*maxburst > 1)
902 *maxburst = fls(*maxburst) - 2;
903 else
904 *maxburst = 0;
905}
906
907static int
908set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
909{
910 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
911
912 /* Check if it is chan is configured for slave transfers */
913 if (!chan->private)
914 return -EINVAL;
915
916 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
917
918 convert_burst(&dwc->dma_sconfig.src_maxburst);
919 convert_burst(&dwc->dma_sconfig.dst_maxburst);
920
921 return 0;
922}
923
Linus Walleij05827632010-05-17 16:30:42 -0700924static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
925 unsigned long arg)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700926{
927 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
928 struct dw_dma *dw = to_dw_dma(chan->device);
929 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530930 unsigned long flags;
Linus Walleija7c57cf2011-04-19 08:31:32 +0800931 u32 cfglo;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700932 LIST_HEAD(list);
933
Linus Walleija7c57cf2011-04-19 08:31:32 +0800934 if (cmd == DMA_PAUSE) {
935 spin_lock_irqsave(&dwc->lock, flags);
936
937 cfglo = channel_readl(dwc, CFG_LO);
938 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
939 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
940 cpu_relax();
941
942 dwc->paused = true;
943 spin_unlock_irqrestore(&dwc->lock, flags);
944 } else if (cmd == DMA_RESUME) {
945 if (!dwc->paused)
946 return 0;
947
948 spin_lock_irqsave(&dwc->lock, flags);
949
950 cfglo = channel_readl(dwc, CFG_LO);
951 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
952 dwc->paused = false;
953
954 spin_unlock_irqrestore(&dwc->lock, flags);
955 } else if (cmd == DMA_TERMINATE_ALL) {
956 spin_lock_irqsave(&dwc->lock, flags);
957
958 channel_clear_bit(dw, CH_EN, dwc->mask);
959 while (dma_readl(dw, CH_EN) & dwc->mask)
960 cpu_relax();
961
962 dwc->paused = false;
963
964 /* active_list entries will end up before queued entries */
965 list_splice_init(&dwc->queue, &list);
966 list_splice_init(&dwc->active_list, &list);
967
968 spin_unlock_irqrestore(&dwc->lock, flags);
969
970 /* Flush all pending and queued descriptors */
971 list_for_each_entry_safe(desc, _desc, &list, desc_node)
972 dwc_descriptor_complete(dwc, desc, false);
Viresh Kumar327e6972012-02-01 16:12:26 +0530973 } else if (cmd == DMA_SLAVE_CONFIG) {
974 return set_runtime_config(chan, (struct dma_slave_config *)arg);
975 } else {
Linus Walleijc3635c72010-03-26 16:44:01 -0700976 return -ENXIO;
Viresh Kumar327e6972012-02-01 16:12:26 +0530977 }
Linus Walleijc3635c72010-03-26 16:44:01 -0700978
Linus Walleijc3635c72010-03-26 16:44:01 -0700979 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700980}
981
982static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700983dwc_tx_status(struct dma_chan *chan,
984 dma_cookie_t cookie,
985 struct dma_tx_state *txstate)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700986{
987 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000988 enum dma_status ret;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700989
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000990 ret = dma_cookie_status(chan, cookie, txstate);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700991 if (ret != DMA_SUCCESS) {
992 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
993
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000994 ret = dma_cookie_status(chan, cookie, txstate);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700995 }
996
Viresh Kumarabf53902011-04-15 16:03:35 +0530997 if (ret != DMA_SUCCESS)
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000998 dma_set_residue(txstate, dwc_first_active(dwc)->len);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700999
Linus Walleija7c57cf2011-04-19 08:31:32 +08001000 if (dwc->paused)
1001 return DMA_PAUSED;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001002
1003 return ret;
1004}
1005
1006static void dwc_issue_pending(struct dma_chan *chan)
1007{
1008 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1009
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001010 if (!list_empty(&dwc->queue))
1011 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001012}
1013
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001014static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001015{
1016 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1017 struct dw_dma *dw = to_dw_dma(chan->device);
1018 struct dw_desc *desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001019 int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301020 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001021
Dan Williams41d5e592009-01-06 11:38:21 -07001022 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001023
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001024 /* ASSERT: channel is idle */
1025 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -07001026 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001027 return -EIO;
1028 }
1029
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001030 dma_cookie_init(chan);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001031
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001032 /*
1033 * NOTE: some controllers may have additional features that we
1034 * need to initialize here, like "scatter-gather" (which
1035 * doesn't mean what you think it means), and status writeback.
1036 */
1037
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301038 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001039 i = dwc->descs_allocated;
1040 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301041 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001042
1043 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1044 if (!desc) {
Dan Williams41d5e592009-01-06 11:38:21 -07001045 dev_info(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001046 "only allocated %d descriptors\n", i);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301047 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001048 break;
1049 }
1050
Dan Williamse0bd0f82009-09-08 17:53:02 -07001051 INIT_LIST_HEAD(&desc->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001052 dma_async_tx_descriptor_init(&desc->txd, chan);
1053 desc->txd.tx_submit = dwc_tx_submit;
1054 desc->txd.flags = DMA_CTRL_ACK;
Dan Williams41d5e592009-01-06 11:38:21 -07001055 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001056 sizeof(desc->lli), DMA_TO_DEVICE);
1057 dwc_desc_put(dwc, desc);
1058
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301059 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001060 i = ++dwc->descs_allocated;
1061 }
1062
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301063 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001064
Dan Williams41d5e592009-01-06 11:38:21 -07001065 dev_dbg(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001066 "alloc_chan_resources allocated %d descriptors\n", i);
1067
1068 return i;
1069}
1070
1071static void dwc_free_chan_resources(struct dma_chan *chan)
1072{
1073 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1074 struct dw_dma *dw = to_dw_dma(chan->device);
1075 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301076 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001077 LIST_HEAD(list);
1078
Dan Williams41d5e592009-01-06 11:38:21 -07001079 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001080 dwc->descs_allocated);
1081
1082 /* ASSERT: channel is idle */
1083 BUG_ON(!list_empty(&dwc->active_list));
1084 BUG_ON(!list_empty(&dwc->queue));
1085 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1086
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301087 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001088 list_splice_init(&dwc->free_list, &list);
1089 dwc->descs_allocated = 0;
Viresh Kumar61e183f2011-11-17 16:01:29 +05301090 dwc->initialized = false;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001091
1092 /* Disable interrupts */
1093 channel_clear_bit(dw, MASK.XFER, dwc->mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001094 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1095
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301096 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001097
1098 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
Dan Williams41d5e592009-01-06 11:38:21 -07001099 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1100 dma_unmap_single(chan2parent(chan), desc->txd.phys,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001101 sizeof(desc->lli), DMA_TO_DEVICE);
1102 kfree(desc);
1103 }
1104
Dan Williams41d5e592009-01-06 11:38:21 -07001105 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001106}
1107
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001108/* --------------------- Cyclic DMA API extensions -------------------- */
1109
1110/**
1111 * dw_dma_cyclic_start - start the cyclic DMA transfer
1112 * @chan: the DMA channel to start
1113 *
1114 * Must be called with soft interrupts disabled. Returns zero on success or
1115 * -errno on failure.
1116 */
1117int dw_dma_cyclic_start(struct dma_chan *chan)
1118{
1119 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1120 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301121 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001122
1123 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1124 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1125 return -ENODEV;
1126 }
1127
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301128 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001129
1130 /* assert channel is idle */
1131 if (dma_readl(dw, CH_EN) & dwc->mask) {
1132 dev_err(chan2dev(&dwc->chan),
1133 "BUG: Attempted to start non-idle channel\n");
1134 dev_err(chan2dev(&dwc->chan),
1135 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1136 channel_readl(dwc, SAR),
1137 channel_readl(dwc, DAR),
1138 channel_readl(dwc, LLP),
1139 channel_readl(dwc, CTL_HI),
1140 channel_readl(dwc, CTL_LO));
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301141 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001142 return -EBUSY;
1143 }
1144
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001145 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1146 dma_writel(dw, CLEAR.XFER, dwc->mask);
1147
1148 /* setup DMAC channel registers */
1149 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1150 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1151 channel_writel(dwc, CTL_HI, 0);
1152
1153 channel_set_bit(dw, CH_EN, dwc->mask);
1154
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301155 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001156
1157 return 0;
1158}
1159EXPORT_SYMBOL(dw_dma_cyclic_start);
1160
1161/**
1162 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1163 * @chan: the DMA channel to stop
1164 *
1165 * Must be called with soft interrupts disabled.
1166 */
1167void dw_dma_cyclic_stop(struct dma_chan *chan)
1168{
1169 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1170 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301171 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001172
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301173 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001174
1175 channel_clear_bit(dw, CH_EN, dwc->mask);
1176 while (dma_readl(dw, CH_EN) & dwc->mask)
1177 cpu_relax();
1178
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301179 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001180}
1181EXPORT_SYMBOL(dw_dma_cyclic_stop);
1182
1183/**
1184 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1185 * @chan: the DMA channel to prepare
1186 * @buf_addr: physical DMA address where the buffer starts
1187 * @buf_len: total number of bytes for the entire buffer
1188 * @period_len: number of bytes for each period
1189 * @direction: transfer direction, to or from device
1190 *
1191 * Must be called before trying to start the transfer. Returns a valid struct
1192 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1193 */
1194struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1195 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301196 enum dma_transfer_direction direction)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001197{
1198 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Viresh Kumar327e6972012-02-01 16:12:26 +05301199 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001200 struct dw_cyclic_desc *cdesc;
1201 struct dw_cyclic_desc *retval = NULL;
1202 struct dw_desc *desc;
1203 struct dw_desc *last = NULL;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001204 unsigned long was_cyclic;
1205 unsigned int reg_width;
1206 unsigned int periods;
1207 unsigned int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301208 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001209
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301210 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001211 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301212 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001213 dev_dbg(chan2dev(&dwc->chan),
1214 "queue and/or active list are not empty\n");
1215 return ERR_PTR(-EBUSY);
1216 }
1217
1218 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301219 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001220 if (was_cyclic) {
1221 dev_dbg(chan2dev(&dwc->chan),
1222 "channel already prepared for cyclic DMA\n");
1223 return ERR_PTR(-EBUSY);
1224 }
1225
1226 retval = ERR_PTR(-EINVAL);
Viresh Kumar327e6972012-02-01 16:12:26 +05301227
1228 if (direction == DMA_MEM_TO_DEV)
1229 reg_width = __ffs(sconfig->dst_addr_width);
1230 else
1231 reg_width = __ffs(sconfig->src_addr_width);
1232
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001233 periods = buf_len / period_len;
1234
1235 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1236 if (period_len > (DWC_MAX_COUNT << reg_width))
1237 goto out_err;
1238 if (unlikely(period_len & ((1 << reg_width) - 1)))
1239 goto out_err;
1240 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1241 goto out_err;
Vinod Kouldb8196d2011-10-13 22:34:23 +05301242 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001243 goto out_err;
1244
1245 retval = ERR_PTR(-ENOMEM);
1246
1247 if (periods > NR_DESCS_PER_CHANNEL)
1248 goto out_err;
1249
1250 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1251 if (!cdesc)
1252 goto out_err;
1253
1254 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1255 if (!cdesc->desc)
1256 goto out_err_alloc;
1257
1258 for (i = 0; i < periods; i++) {
1259 desc = dwc_desc_get(dwc);
1260 if (!desc)
1261 goto out_err_desc_get;
1262
1263 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05301264 case DMA_MEM_TO_DEV:
Viresh Kumar327e6972012-02-01 16:12:26 +05301265 desc->lli.dar = sconfig->dst_addr;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001266 desc->lli.sar = buf_addr + (period_len * i);
Viresh Kumar327e6972012-02-01 16:12:26 +05301267 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001268 | DWC_CTLL_DST_WIDTH(reg_width)
1269 | DWC_CTLL_SRC_WIDTH(reg_width)
1270 | DWC_CTLL_DST_FIX
1271 | DWC_CTLL_SRC_INC
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001272 | DWC_CTLL_INT_EN);
Viresh Kumar327e6972012-02-01 16:12:26 +05301273
1274 desc->lli.ctllo |= sconfig->device_fc ?
1275 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1276 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1277
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001278 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +05301279 case DMA_DEV_TO_MEM:
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001280 desc->lli.dar = buf_addr + (period_len * i);
Viresh Kumar327e6972012-02-01 16:12:26 +05301281 desc->lli.sar = sconfig->src_addr;
1282 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001283 | DWC_CTLL_SRC_WIDTH(reg_width)
1284 | DWC_CTLL_DST_WIDTH(reg_width)
1285 | DWC_CTLL_DST_INC
1286 | DWC_CTLL_SRC_FIX
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001287 | DWC_CTLL_INT_EN);
Viresh Kumar327e6972012-02-01 16:12:26 +05301288
1289 desc->lli.ctllo |= sconfig->device_fc ?
1290 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1291 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1292
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001293 break;
1294 default:
1295 break;
1296 }
1297
1298 desc->lli.ctlhi = (period_len >> reg_width);
1299 cdesc->desc[i] = desc;
1300
1301 if (last) {
1302 last->lli.llp = desc->txd.phys;
1303 dma_sync_single_for_device(chan2parent(chan),
1304 last->txd.phys, sizeof(last->lli),
1305 DMA_TO_DEVICE);
1306 }
1307
1308 last = desc;
1309 }
1310
1311 /* lets make a cyclic list */
1312 last->lli.llp = cdesc->desc[0]->txd.phys;
1313 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1314 sizeof(last->lli), DMA_TO_DEVICE);
1315
Andy Shevchenko2f45d612012-06-19 13:34:02 +03001316 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1317 "period %zu periods %d\n", (unsigned long long)buf_addr,
1318 buf_len, period_len, periods);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001319
1320 cdesc->periods = periods;
1321 dwc->cdesc = cdesc;
1322
1323 return cdesc;
1324
1325out_err_desc_get:
1326 while (i--)
1327 dwc_desc_put(dwc, cdesc->desc[i]);
1328out_err_alloc:
1329 kfree(cdesc);
1330out_err:
1331 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1332 return (struct dw_cyclic_desc *)retval;
1333}
1334EXPORT_SYMBOL(dw_dma_cyclic_prep);
1335
1336/**
1337 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1338 * @chan: the DMA channel to free
1339 */
1340void dw_dma_cyclic_free(struct dma_chan *chan)
1341{
1342 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1343 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1344 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1345 int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301346 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001347
1348 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1349
1350 if (!cdesc)
1351 return;
1352
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301353 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001354
1355 channel_clear_bit(dw, CH_EN, dwc->mask);
1356 while (dma_readl(dw, CH_EN) & dwc->mask)
1357 cpu_relax();
1358
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001359 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1360 dma_writel(dw, CLEAR.XFER, dwc->mask);
1361
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301362 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001363
1364 for (i = 0; i < cdesc->periods; i++)
1365 dwc_desc_put(dwc, cdesc->desc[i]);
1366
1367 kfree(cdesc->desc);
1368 kfree(cdesc);
1369
1370 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1371}
1372EXPORT_SYMBOL(dw_dma_cyclic_free);
1373
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001374/*----------------------------------------------------------------------*/
1375
1376static void dw_dma_off(struct dw_dma *dw)
1377{
Viresh Kumar61e183f2011-11-17 16:01:29 +05301378 int i;
1379
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001380 dma_writel(dw, CFG, 0);
1381
1382 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001383 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1384 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1385 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1386
1387 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1388 cpu_relax();
Viresh Kumar61e183f2011-11-17 16:01:29 +05301389
1390 for (i = 0; i < dw->dma.chancnt; i++)
1391 dw->chan[i].initialized = false;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001392}
1393
1394static int __init dw_probe(struct platform_device *pdev)
1395{
1396 struct dw_dma_platform_data *pdata;
1397 struct resource *io;
1398 struct dw_dma *dw;
1399 size_t size;
1400 int irq;
1401 int err;
1402 int i;
1403
Viresh Kumar6c618c92012-02-01 16:12:22 +05301404 pdata = dev_get_platdata(&pdev->dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001405 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1406 return -EINVAL;
1407
1408 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1409 if (!io)
1410 return -EINVAL;
1411
1412 irq = platform_get_irq(pdev, 0);
1413 if (irq < 0)
1414 return irq;
1415
1416 size = sizeof(struct dw_dma);
1417 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1418 dw = kzalloc(size, GFP_KERNEL);
1419 if (!dw)
1420 return -ENOMEM;
1421
1422 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1423 err = -EBUSY;
1424 goto err_kfree;
1425 }
1426
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001427 dw->regs = ioremap(io->start, DW_REGLEN);
1428 if (!dw->regs) {
1429 err = -ENOMEM;
1430 goto err_release_r;
1431 }
1432
1433 dw->clk = clk_get(&pdev->dev, "hclk");
1434 if (IS_ERR(dw->clk)) {
1435 err = PTR_ERR(dw->clk);
1436 goto err_clk;
1437 }
Viresh Kumar30755282012-04-17 17:10:07 +05301438 clk_prepare_enable(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001439
1440 /* force dma off, just in case */
1441 dw_dma_off(dw);
1442
1443 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1444 if (err)
1445 goto err_irq;
1446
1447 platform_set_drvdata(pdev, dw);
1448
1449 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1450
1451 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1452
1453 INIT_LIST_HEAD(&dw->dma.channels);
Barry Song463894702011-09-15 03:06:30 -07001454 for (i = 0; i < pdata->nr_channels; i++) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001455 struct dw_dma_chan *dwc = &dw->chan[i];
1456
1457 dwc->chan.device = &dw->dma;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001458 dma_cookie_init(&dwc->chan);
Viresh Kumarb0c31302011-03-03 15:47:21 +05301459 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1460 list_add_tail(&dwc->chan.device_node,
1461 &dw->dma.channels);
1462 else
1463 list_add(&dwc->chan.device_node, &dw->dma.channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001464
Viresh Kumar93317e82011-03-03 15:47:22 +05301465 /* 7 is highest priority & 0 is lowest. */
1466 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
Viresh Kumare8d9f872012-02-01 16:12:21 +05301467 dwc->priority = pdata->nr_channels - i - 1;
Viresh Kumar93317e82011-03-03 15:47:22 +05301468 else
1469 dwc->priority = i;
1470
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001471 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1472 spin_lock_init(&dwc->lock);
1473 dwc->mask = 1 << i;
1474
1475 INIT_LIST_HEAD(&dwc->active_list);
1476 INIT_LIST_HEAD(&dwc->queue);
1477 INIT_LIST_HEAD(&dwc->free_list);
1478
1479 channel_clear_bit(dw, CH_EN, dwc->mask);
1480 }
1481
1482 /* Clear/disable all interrupts on all channels. */
1483 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001484 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1485 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1486 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1487
1488 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001489 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1490 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1491 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1492
1493 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1494 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
Jamie Iles95ea7592011-01-21 14:11:54 +00001495 if (pdata->is_private)
1496 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001497 dw->dma.dev = &pdev->dev;
1498 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1499 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1500
1501 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1502
1503 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001504 dw->dma.device_control = dwc_control;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001505
Linus Walleij07934482010-03-26 16:50:49 -07001506 dw->dma.device_tx_status = dwc_tx_status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001507 dw->dma.device_issue_pending = dwc_issue_pending;
1508
1509 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1510
1511 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
Barry Song463894702011-09-15 03:06:30 -07001512 dev_name(&pdev->dev), pdata->nr_channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001513
1514 dma_async_device_register(&dw->dma);
1515
1516 return 0;
1517
1518err_irq:
Viresh Kumar30755282012-04-17 17:10:07 +05301519 clk_disable_unprepare(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001520 clk_put(dw->clk);
1521err_clk:
1522 iounmap(dw->regs);
1523 dw->regs = NULL;
1524err_release_r:
1525 release_resource(io);
1526err_kfree:
1527 kfree(dw);
1528 return err;
1529}
1530
1531static int __exit dw_remove(struct platform_device *pdev)
1532{
1533 struct dw_dma *dw = platform_get_drvdata(pdev);
1534 struct dw_dma_chan *dwc, *_dwc;
1535 struct resource *io;
1536
1537 dw_dma_off(dw);
1538 dma_async_device_unregister(&dw->dma);
1539
1540 free_irq(platform_get_irq(pdev, 0), dw);
1541 tasklet_kill(&dw->tasklet);
1542
1543 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1544 chan.device_node) {
1545 list_del(&dwc->chan.device_node);
1546 channel_clear_bit(dw, CH_EN, dwc->mask);
1547 }
1548
Viresh Kumar30755282012-04-17 17:10:07 +05301549 clk_disable_unprepare(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001550 clk_put(dw->clk);
1551
1552 iounmap(dw->regs);
1553 dw->regs = NULL;
1554
1555 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1556 release_mem_region(io->start, DW_REGLEN);
1557
1558 kfree(dw);
1559
1560 return 0;
1561}
1562
1563static void dw_shutdown(struct platform_device *pdev)
1564{
1565 struct dw_dma *dw = platform_get_drvdata(pdev);
1566
1567 dw_dma_off(platform_get_drvdata(pdev));
Viresh Kumar30755282012-04-17 17:10:07 +05301568 clk_disable_unprepare(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001569}
1570
Magnus Damm4a256b52009-07-08 13:22:18 +02001571static int dw_suspend_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001572{
Magnus Damm4a256b52009-07-08 13:22:18 +02001573 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001574 struct dw_dma *dw = platform_get_drvdata(pdev);
1575
1576 dw_dma_off(platform_get_drvdata(pdev));
Viresh Kumar30755282012-04-17 17:10:07 +05301577 clk_disable_unprepare(dw->clk);
Viresh Kumar61e183f2011-11-17 16:01:29 +05301578
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001579 return 0;
1580}
1581
Magnus Damm4a256b52009-07-08 13:22:18 +02001582static int dw_resume_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001583{
Magnus Damm4a256b52009-07-08 13:22:18 +02001584 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001585 struct dw_dma *dw = platform_get_drvdata(pdev);
1586
Viresh Kumar30755282012-04-17 17:10:07 +05301587 clk_prepare_enable(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001588 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1589 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001590}
1591
Alexey Dobriyan47145212009-12-14 18:00:08 -08001592static const struct dev_pm_ops dw_dev_pm_ops = {
Magnus Damm4a256b52009-07-08 13:22:18 +02001593 .suspend_noirq = dw_suspend_noirq,
1594 .resume_noirq = dw_resume_noirq,
Rajeev KUMAR7414a1b2012-02-01 16:12:17 +05301595 .freeze_noirq = dw_suspend_noirq,
1596 .thaw_noirq = dw_resume_noirq,
1597 .restore_noirq = dw_resume_noirq,
1598 .poweroff_noirq = dw_suspend_noirq,
Magnus Damm4a256b52009-07-08 13:22:18 +02001599};
1600
Viresh Kumard3f797d2012-04-20 20:15:34 +05301601#ifdef CONFIG_OF
1602static const struct of_device_id dw_dma_id_table[] = {
1603 { .compatible = "snps,dma-spear1340" },
1604 {}
1605};
1606MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1607#endif
1608
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001609static struct platform_driver dw_driver = {
1610 .remove = __exit_p(dw_remove),
1611 .shutdown = dw_shutdown,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001612 .driver = {
1613 .name = "dw_dmac",
Magnus Damm4a256b52009-07-08 13:22:18 +02001614 .pm = &dw_dev_pm_ops,
Viresh Kumard3f797d2012-04-20 20:15:34 +05301615 .of_match_table = of_match_ptr(dw_dma_id_table),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001616 },
1617};
1618
1619static int __init dw_init(void)
1620{
1621 return platform_driver_probe(&dw_driver, dw_probe);
1622}
Viresh Kumarcb689a72011-03-03 15:47:15 +05301623subsys_initcall(dw_init);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001624
1625static void __exit dw_exit(void)
1626{
1627 platform_driver_unregister(&dw_driver);
1628}
1629module_exit(dw_exit);
1630
1631MODULE_LICENSE("GPL v2");
1632MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02001633MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Viresh Kumaraecb7b62011-05-24 14:04:09 +05301634MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");