blob: a9755e3dd60320270535f2a57bc46341cc82c8d0 [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
Jamie Ilesf301c062011-01-21 14:11:53 +000035#define DWC_DEFAULT_CTLLO(private) ({ \
36 struct dw_dma_slave *__slave = (private); \
37 int dms = __slave ? __slave->dst_master : 0; \
38 int sms = __slave ? __slave->src_master : 1; \
Viresh Kumare51dc532011-03-03 15:47:25 +053039 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
40 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
Jamie Ilesf301c062011-01-21 14:11:53 +000041 \
Viresh KUMARee665092011-03-04 15:42:51 +053042 (DWC_CTLL_DST_MSIZE(dmsize) \
43 | DWC_CTLL_SRC_MSIZE(smsize) \
Jamie Ilesf301c062011-01-21 14:11:53 +000044 | DWC_CTLL_LLP_D_EN \
45 | DWC_CTLL_LLP_S_EN \
46 | DWC_CTLL_DMS(dms) \
47 | DWC_CTLL_SMS(sms)); \
48 })
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070049
50/*
51 * This is configuration-dependent and usually a funny size like 4095.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070052 *
53 * Note that this is a transfer count, i.e. if we transfer 32-bit
Viresh Kumar418e7402011-03-04 15:42:50 +053054 * words, we can do 16380 bytes per descriptor.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070055 *
56 * This parameter is also system-specific.
57 */
Viresh Kumar418e7402011-03-04 15:42:50 +053058#define DWC_MAX_COUNT 4095U
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070059
60/*
61 * Number of descriptors to allocate for each channel. This should be
62 * made configurable somehow; preferably, the clients (at least the
63 * ones using slave transfers) should be able to give us a hint.
64 */
65#define NR_DESCS_PER_CHANNEL 64
66
67/*----------------------------------------------------------------------*/
68
69/*
70 * Because we're not relying on writeback from the controller (it may not
71 * even be configured into the core!) we don't need to use dma_pool. These
72 * descriptors -- and associated data -- are cacheable. We do need to make
73 * sure their dcache entries are written back before handing them off to
74 * the controller, though.
75 */
76
Dan Williams41d5e592009-01-06 11:38:21 -070077static struct device *chan2dev(struct dma_chan *chan)
78{
79 return &chan->dev->device;
80}
81static struct device *chan2parent(struct dma_chan *chan)
82{
83 return chan->dev->device.parent;
84}
85
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070086static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
87{
88 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
89}
90
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070091static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
92{
93 struct dw_desc *desc, *_desc;
94 struct dw_desc *ret = NULL;
95 unsigned int i = 0;
96
97 spin_lock_bh(&dwc->lock);
98 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
99 if (async_tx_test_ack(&desc->txd)) {
100 list_del(&desc->desc_node);
101 ret = desc;
102 break;
103 }
Dan Williams41d5e592009-01-06 11:38:21 -0700104 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700105 i++;
106 }
107 spin_unlock_bh(&dwc->lock);
108
Dan Williams41d5e592009-01-06 11:38:21 -0700109 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700110
111 return ret;
112}
113
114static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
115{
116 struct dw_desc *child;
117
Dan Williamse0bd0f82009-09-08 17:53:02 -0700118 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700119 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700120 child->txd.phys, sizeof(child->lli),
121 DMA_TO_DEVICE);
Dan Williams41d5e592009-01-06 11:38:21 -0700122 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700123 desc->txd.phys, sizeof(desc->lli),
124 DMA_TO_DEVICE);
125}
126
127/*
128 * Move a descriptor, including any children, to the free list.
129 * `desc' must not be on any lists.
130 */
131static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
132{
133 if (desc) {
134 struct dw_desc *child;
135
136 dwc_sync_desc_for_cpu(dwc, desc);
137
138 spin_lock_bh(&dwc->lock);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700139 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700140 dev_vdbg(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700141 "moving child desc %p to freelist\n",
142 child);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700143 list_splice_init(&desc->tx_list, &dwc->free_list);
Dan Williams41d5e592009-01-06 11:38:21 -0700144 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700145 list_add(&desc->desc_node, &dwc->free_list);
146 spin_unlock_bh(&dwc->lock);
147 }
148}
149
150/* Called with dwc->lock held and bh disabled */
151static dma_cookie_t
152dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
153{
154 dma_cookie_t cookie = dwc->chan.cookie;
155
156 if (++cookie < 0)
157 cookie = 1;
158
159 dwc->chan.cookie = cookie;
160 desc->txd.cookie = cookie;
161
162 return cookie;
163}
164
165/*----------------------------------------------------------------------*/
166
167/* Called with dwc->lock held and bh disabled */
168static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
169{
170 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
171
172 /* ASSERT: channel is idle */
173 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700174 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700175 "BUG: Attempted to start non-idle channel\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700176 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700177 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
178 channel_readl(dwc, SAR),
179 channel_readl(dwc, DAR),
180 channel_readl(dwc, LLP),
181 channel_readl(dwc, CTL_HI),
182 channel_readl(dwc, CTL_LO));
183
184 /* The tasklet will hopefully advance the queue... */
185 return;
186 }
187
188 channel_writel(dwc, LLP, first->txd.phys);
189 channel_writel(dwc, CTL_LO,
190 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
191 channel_writel(dwc, CTL_HI, 0);
192 channel_set_bit(dw, CH_EN, dwc->mask);
193}
194
195/*----------------------------------------------------------------------*/
196
197static void
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530198dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
199 bool callback_required)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700200{
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530201 dma_async_tx_callback callback = NULL;
202 void *param = NULL;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700203 struct dma_async_tx_descriptor *txd = &desc->txd;
Viresh Kumare5180762011-03-03 15:47:20 +0530204 struct dw_desc *child;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700205
Dan Williams41d5e592009-01-06 11:38:21 -0700206 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700207
208 dwc->completed = txd->cookie;
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530209 if (callback_required) {
210 callback = txd->callback;
211 param = txd->callback_param;
212 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700213
214 dwc_sync_desc_for_cpu(dwc, desc);
Viresh Kumare5180762011-03-03 15:47:20 +0530215
216 /* async_tx_ack */
217 list_for_each_entry(child, &desc->tx_list, desc_node)
218 async_tx_ack(&child->txd);
219 async_tx_ack(&desc->txd);
220
Dan Williamse0bd0f82009-09-08 17:53:02 -0700221 list_splice_init(&desc->tx_list, &dwc->free_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700222 list_move(&desc->desc_node, &dwc->free_list);
223
Atsushi Nemoto657a77f2009-09-08 17:53:05 -0700224 if (!dwc->chan.private) {
225 struct device *parent = chan2parent(&dwc->chan);
226 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
227 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
228 dma_unmap_single(parent, desc->lli.dar,
229 desc->len, DMA_FROM_DEVICE);
230 else
231 dma_unmap_page(parent, desc->lli.dar,
232 desc->len, DMA_FROM_DEVICE);
233 }
234 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
235 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
236 dma_unmap_single(parent, desc->lli.sar,
237 desc->len, DMA_TO_DEVICE);
238 else
239 dma_unmap_page(parent, desc->lli.sar,
240 desc->len, DMA_TO_DEVICE);
241 }
242 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700243
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530244 if (callback_required && callback)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700245 callback(param);
246}
247
248static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
249{
250 struct dw_desc *desc, *_desc;
251 LIST_HEAD(list);
252
253 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700254 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700255 "BUG: XFER bit set, but channel not idle!\n");
256
257 /* Try to continue after resetting the channel... */
258 channel_clear_bit(dw, CH_EN, dwc->mask);
259 while (dma_readl(dw, CH_EN) & dwc->mask)
260 cpu_relax();
261 }
262
263 /*
264 * Submit queued descriptors ASAP, i.e. before we go through
265 * the completed ones.
266 */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700267 list_splice_init(&dwc->active_list, &list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530268 if (!list_empty(&dwc->queue)) {
269 list_move(dwc->queue.next, &dwc->active_list);
270 dwc_dostart(dwc, dwc_first_active(dwc));
271 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700272
273 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530274 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700275}
276
277static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
278{
279 dma_addr_t llp;
280 struct dw_desc *desc, *_desc;
281 struct dw_desc *child;
282 u32 status_xfer;
283
284 /*
285 * Clear block interrupt flag before scanning so that we don't
286 * miss any, and read LLP before RAW_XFER to ensure it is
287 * valid if we decide to scan the list.
288 */
289 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
290 llp = channel_readl(dwc, LLP);
291 status_xfer = dma_readl(dw, RAW.XFER);
292
293 if (status_xfer & dwc->mask) {
294 /* Everything we've submitted is done */
295 dma_writel(dw, CLEAR.XFER, dwc->mask);
296 dwc_complete_all(dw, dwc);
297 return;
298 }
299
Jamie Iles087809f2011-01-21 14:11:52 +0000300 if (list_empty(&dwc->active_list))
301 return;
302
Dan Williams41d5e592009-01-06 11:38:21 -0700303 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700304
305 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
Viresh Kumar84adccf2011-03-24 11:32:15 +0530306 /* check first descriptors addr */
307 if (desc->txd.phys == llp)
308 return;
309
310 /* check first descriptors llp */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700311 if (desc->lli.llp == llp)
312 /* This one is currently in progress */
313 return;
314
Dan Williamse0bd0f82009-09-08 17:53:02 -0700315 list_for_each_entry(child, &desc->tx_list, desc_node)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700316 if (child->lli.llp == llp)
317 /* Currently in progress */
318 return;
319
320 /*
321 * No descriptors so far seem to be in progress, i.e.
322 * this one must be done.
323 */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530324 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700325 }
326
Dan Williams41d5e592009-01-06 11:38:21 -0700327 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700328 "BUG: All descriptors done, but channel not idle!\n");
329
330 /* Try to continue after resetting the channel... */
331 channel_clear_bit(dw, CH_EN, dwc->mask);
332 while (dma_readl(dw, CH_EN) & dwc->mask)
333 cpu_relax();
334
335 if (!list_empty(&dwc->queue)) {
Viresh Kumarf336e422011-03-03 15:47:16 +0530336 list_move(dwc->queue.next, &dwc->active_list);
337 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700338 }
339}
340
341static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
342{
Dan Williams41d5e592009-01-06 11:38:21 -0700343 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700344 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
345 lli->sar, lli->dar, lli->llp,
346 lli->ctlhi, lli->ctllo);
347}
348
349static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
350{
351 struct dw_desc *bad_desc;
352 struct dw_desc *child;
353
354 dwc_scan_descriptors(dw, dwc);
355
356 /*
357 * The descriptor currently at the head of the active list is
358 * borked. Since we don't have any way to report errors, we'll
359 * just have to scream loudly and try to carry on.
360 */
361 bad_desc = dwc_first_active(dwc);
362 list_del_init(&bad_desc->desc_node);
Viresh Kumarf336e422011-03-03 15:47:16 +0530363 list_move(dwc->queue.next, dwc->active_list.prev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700364
365 /* Clear the error flag and try to restart the controller */
366 dma_writel(dw, CLEAR.ERROR, dwc->mask);
367 if (!list_empty(&dwc->active_list))
368 dwc_dostart(dwc, dwc_first_active(dwc));
369
370 /*
371 * KERN_CRITICAL may seem harsh, but since this only happens
372 * when someone submits a bad physical address in a
373 * descriptor, we should consider ourselves lucky that the
374 * controller flagged an error instead of scribbling over
375 * random memory locations.
376 */
Dan Williams41d5e592009-01-06 11:38:21 -0700377 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700378 "Bad descriptor submitted for DMA!\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700379 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700380 " cookie: %d\n", bad_desc->txd.cookie);
381 dwc_dump_lli(dwc, &bad_desc->lli);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700382 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700383 dwc_dump_lli(dwc, &child->lli);
384
385 /* Pretend the descriptor completed successfully */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530386 dwc_descriptor_complete(dwc, bad_desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700387}
388
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200389/* --------------------- Cyclic DMA API extensions -------------------- */
390
391inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
392{
393 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
394 return channel_readl(dwc, SAR);
395}
396EXPORT_SYMBOL(dw_dma_get_src_addr);
397
398inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
399{
400 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
401 return channel_readl(dwc, DAR);
402}
403EXPORT_SYMBOL(dw_dma_get_dst_addr);
404
405/* called with dwc->lock held and all DMAC interrupts disabled */
406static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
407 u32 status_block, u32 status_err, u32 status_xfer)
408{
409 if (status_block & dwc->mask) {
410 void (*callback)(void *param);
411 void *callback_param;
412
413 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
414 channel_readl(dwc, LLP));
415 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
416
417 callback = dwc->cdesc->period_callback;
418 callback_param = dwc->cdesc->period_callback_param;
419 if (callback) {
420 spin_unlock(&dwc->lock);
421 callback(callback_param);
422 spin_lock(&dwc->lock);
423 }
424 }
425
426 /*
427 * Error and transfer complete are highly unlikely, and will most
428 * likely be due to a configuration error by the user.
429 */
430 if (unlikely(status_err & dwc->mask) ||
431 unlikely(status_xfer & dwc->mask)) {
432 int i;
433
434 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
435 "interrupt, stopping DMA transfer\n",
436 status_xfer ? "xfer" : "error");
437 dev_err(chan2dev(&dwc->chan),
438 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
439 channel_readl(dwc, SAR),
440 channel_readl(dwc, DAR),
441 channel_readl(dwc, LLP),
442 channel_readl(dwc, CTL_HI),
443 channel_readl(dwc, CTL_LO));
444
445 channel_clear_bit(dw, CH_EN, dwc->mask);
446 while (dma_readl(dw, CH_EN) & dwc->mask)
447 cpu_relax();
448
449 /* make sure DMA does not restart by loading a new list */
450 channel_writel(dwc, LLP, 0);
451 channel_writel(dwc, CTL_LO, 0);
452 channel_writel(dwc, CTL_HI, 0);
453
454 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
455 dma_writel(dw, CLEAR.ERROR, dwc->mask);
456 dma_writel(dw, CLEAR.XFER, dwc->mask);
457
458 for (i = 0; i < dwc->cdesc->periods; i++)
459 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
460 }
461}
462
463/* ------------------------------------------------------------------------- */
464
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700465static void dw_dma_tasklet(unsigned long data)
466{
467 struct dw_dma *dw = (struct dw_dma *)data;
468 struct dw_dma_chan *dwc;
469 u32 status_block;
470 u32 status_xfer;
471 u32 status_err;
472 int i;
473
474 status_block = dma_readl(dw, RAW.BLOCK);
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700475 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700476 status_err = dma_readl(dw, RAW.ERROR);
477
478 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
479 status_block, status_err);
480
481 for (i = 0; i < dw->dma.chancnt; i++) {
482 dwc = &dw->chan[i];
483 spin_lock(&dwc->lock);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200484 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
485 dwc_handle_cyclic(dw, dwc, status_block, status_err,
486 status_xfer);
487 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700488 dwc_handle_error(dw, dwc);
489 else if ((status_block | status_xfer) & (1 << i))
490 dwc_scan_descriptors(dw, dwc);
491 spin_unlock(&dwc->lock);
492 }
493
494 /*
495 * Re-enable interrupts. Block Complete interrupts are only
496 * enabled if the INT_EN bit in the descriptor is set. This
497 * will trigger a scan before the whole list is done.
498 */
499 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
500 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
501 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
502}
503
504static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
505{
506 struct dw_dma *dw = dev_id;
507 u32 status;
508
509 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
510 dma_readl(dw, STATUS_INT));
511
512 /*
513 * Just disable the interrupts. We'll turn them back on in the
514 * softirq handler.
515 */
516 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
517 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
518 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
519
520 status = dma_readl(dw, STATUS_INT);
521 if (status) {
522 dev_err(dw->dma.dev,
523 "BUG: Unexpected interrupts pending: 0x%x\n",
524 status);
525
526 /* Try to recover */
527 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
528 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
529 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
530 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
531 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
532 }
533
534 tasklet_schedule(&dw->tasklet);
535
536 return IRQ_HANDLED;
537}
538
539/*----------------------------------------------------------------------*/
540
541static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
542{
543 struct dw_desc *desc = txd_to_dw_desc(tx);
544 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
545 dma_cookie_t cookie;
546
547 spin_lock_bh(&dwc->lock);
548 cookie = dwc_assign_cookie(dwc, desc);
549
550 /*
551 * REVISIT: We should attempt to chain as many descriptors as
552 * possible, perhaps even appending to those already submitted
553 * for DMA. But this is hard to do in a race-free manner.
554 */
555 if (list_empty(&dwc->active_list)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700556 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700557 desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700558 list_add_tail(&desc->desc_node, &dwc->active_list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530559 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700560 } else {
Dan Williams41d5e592009-01-06 11:38:21 -0700561 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700562 desc->txd.cookie);
563
564 list_add_tail(&desc->desc_node, &dwc->queue);
565 }
566
567 spin_unlock_bh(&dwc->lock);
568
569 return cookie;
570}
571
572static struct dma_async_tx_descriptor *
573dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
574 size_t len, unsigned long flags)
575{
576 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
577 struct dw_desc *desc;
578 struct dw_desc *first;
579 struct dw_desc *prev;
580 size_t xfer_count;
581 size_t offset;
582 unsigned int src_width;
583 unsigned int dst_width;
584 u32 ctllo;
585
Dan Williams41d5e592009-01-06 11:38:21 -0700586 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700587 dest, src, len, flags);
588
589 if (unlikely(!len)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700590 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700591 return NULL;
592 }
593
594 /*
595 * We can be a lot more clever here, but this should take care
596 * of the most common optimization.
597 */
Viresh Kumara0227452011-03-03 15:47:18 +0530598 if (!((src | dest | len) & 7))
599 src_width = dst_width = 3;
600 else if (!((src | dest | len) & 3))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700601 src_width = dst_width = 2;
602 else if (!((src | dest | len) & 1))
603 src_width = dst_width = 1;
604 else
605 src_width = dst_width = 0;
606
Jamie Ilesf301c062011-01-21 14:11:53 +0000607 ctllo = DWC_DEFAULT_CTLLO(chan->private)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700608 | DWC_CTLL_DST_WIDTH(dst_width)
609 | DWC_CTLL_SRC_WIDTH(src_width)
610 | DWC_CTLL_DST_INC
611 | DWC_CTLL_SRC_INC
612 | DWC_CTLL_FC_M2M;
613 prev = first = NULL;
614
615 for (offset = 0; offset < len; offset += xfer_count << src_width) {
616 xfer_count = min_t(size_t, (len - offset) >> src_width,
617 DWC_MAX_COUNT);
618
619 desc = dwc_desc_get(dwc);
620 if (!desc)
621 goto err_desc_get;
622
623 desc->lli.sar = src + offset;
624 desc->lli.dar = dest + offset;
625 desc->lli.ctllo = ctllo;
626 desc->lli.ctlhi = xfer_count;
627
628 if (!first) {
629 first = desc;
630 } else {
631 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700632 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700633 prev->txd.phys, sizeof(prev->lli),
634 DMA_TO_DEVICE);
635 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700636 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700637 }
638 prev = desc;
639 }
640
641
642 if (flags & DMA_PREP_INTERRUPT)
643 /* Trigger interrupt after last block */
644 prev->lli.ctllo |= DWC_CTLL_INT_EN;
645
646 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700647 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700648 prev->txd.phys, sizeof(prev->lli),
649 DMA_TO_DEVICE);
650
651 first->txd.flags = flags;
652 first->len = len;
653
654 return &first->txd;
655
656err_desc_get:
657 dwc_desc_put(dwc, first);
658 return NULL;
659}
660
661static struct dma_async_tx_descriptor *
662dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
663 unsigned int sg_len, enum dma_data_direction direction,
664 unsigned long flags)
665{
666 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Dan Williams287d8592009-02-18 14:48:26 -0800667 struct dw_dma_slave *dws = chan->private;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700668 struct dw_desc *prev;
669 struct dw_desc *first;
670 u32 ctllo;
671 dma_addr_t reg;
672 unsigned int reg_width;
673 unsigned int mem_width;
674 unsigned int i;
675 struct scatterlist *sg;
676 size_t total_len = 0;
677
Dan Williams41d5e592009-01-06 11:38:21 -0700678 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700679
680 if (unlikely(!dws || !sg_len))
681 return NULL;
682
Dan Williams74465b42009-01-06 11:38:16 -0700683 reg_width = dws->reg_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700684 prev = first = NULL;
685
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700686 switch (direction) {
687 case DMA_TO_DEVICE:
Jamie Ilesf301c062011-01-21 14:11:53 +0000688 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700689 | DWC_CTLL_DST_WIDTH(reg_width)
690 | DWC_CTLL_DST_FIX
691 | DWC_CTLL_SRC_INC
Viresh KUMARee665092011-03-04 15:42:51 +0530692 | DWC_CTLL_FC(dws->fc));
Dan Williams74465b42009-01-06 11:38:16 -0700693 reg = dws->tx_reg;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700694 for_each_sg(sgl, sg, sg_len, i) {
695 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530696 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700697
698 mem = sg_phys(sg);
699 len = sg_dma_len(sg);
700 mem_width = 2;
701 if (unlikely(mem & 3 || len & 3))
702 mem_width = 0;
703
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530704slave_sg_todev_fill_desc:
705 desc = dwc_desc_get(dwc);
706 if (!desc) {
707 dev_err(chan2dev(chan),
708 "not enough descriptors available\n");
709 goto err_desc_get;
710 }
711
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700712 desc->lli.sar = mem;
713 desc->lli.dar = reg;
714 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530715 if ((len >> mem_width) > DWC_MAX_COUNT) {
716 dlen = DWC_MAX_COUNT << mem_width;
717 mem += dlen;
718 len -= dlen;
719 } else {
720 dlen = len;
721 len = 0;
722 }
723
724 desc->lli.ctlhi = dlen >> mem_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700725
726 if (!first) {
727 first = desc;
728 } else {
729 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700730 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700731 prev->txd.phys,
732 sizeof(prev->lli),
733 DMA_TO_DEVICE);
734 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700735 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700736 }
737 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530738 total_len += dlen;
739
740 if (len)
741 goto slave_sg_todev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700742 }
743 break;
744 case DMA_FROM_DEVICE:
Jamie Ilesf301c062011-01-21 14:11:53 +0000745 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700746 | DWC_CTLL_SRC_WIDTH(reg_width)
747 | DWC_CTLL_DST_INC
748 | DWC_CTLL_SRC_FIX
Viresh KUMARee665092011-03-04 15:42:51 +0530749 | DWC_CTLL_FC(dws->fc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700750
Dan Williams74465b42009-01-06 11:38:16 -0700751 reg = dws->rx_reg;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700752 for_each_sg(sgl, sg, sg_len, i) {
753 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530754 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700755
756 mem = sg_phys(sg);
757 len = sg_dma_len(sg);
758 mem_width = 2;
759 if (unlikely(mem & 3 || len & 3))
760 mem_width = 0;
761
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530762slave_sg_fromdev_fill_desc:
763 desc = dwc_desc_get(dwc);
764 if (!desc) {
765 dev_err(chan2dev(chan),
766 "not enough descriptors available\n");
767 goto err_desc_get;
768 }
769
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700770 desc->lli.sar = reg;
771 desc->lli.dar = mem;
772 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530773 if ((len >> reg_width) > DWC_MAX_COUNT) {
774 dlen = DWC_MAX_COUNT << reg_width;
775 mem += dlen;
776 len -= dlen;
777 } else {
778 dlen = len;
779 len = 0;
780 }
781 desc->lli.ctlhi = dlen >> reg_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700782
783 if (!first) {
784 first = desc;
785 } else {
786 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700787 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700788 prev->txd.phys,
789 sizeof(prev->lli),
790 DMA_TO_DEVICE);
791 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700792 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700793 }
794 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530795 total_len += dlen;
796
797 if (len)
798 goto slave_sg_fromdev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700799 }
800 break;
801 default:
802 return NULL;
803 }
804
805 if (flags & DMA_PREP_INTERRUPT)
806 /* Trigger interrupt after last block */
807 prev->lli.ctllo |= DWC_CTLL_INT_EN;
808
809 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700810 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700811 prev->txd.phys, sizeof(prev->lli),
812 DMA_TO_DEVICE);
813
814 first->len = total_len;
815
816 return &first->txd;
817
818err_desc_get:
819 dwc_desc_put(dwc, first);
820 return NULL;
821}
822
Linus Walleij05827632010-05-17 16:30:42 -0700823static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
824 unsigned long arg)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700825{
826 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
827 struct dw_dma *dw = to_dw_dma(chan->device);
828 struct dw_desc *desc, *_desc;
829 LIST_HEAD(list);
830
Linus Walleijc3635c72010-03-26 16:44:01 -0700831 /* Only supports DMA_TERMINATE_ALL */
832 if (cmd != DMA_TERMINATE_ALL)
833 return -ENXIO;
834
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700835 /*
836 * This is only called when something went wrong elsewhere, so
837 * we don't really care about the data. Just disable the
838 * channel. We still have to poll the channel enable bit due
839 * to AHB/HSB limitations.
840 */
841 spin_lock_bh(&dwc->lock);
842
843 channel_clear_bit(dw, CH_EN, dwc->mask);
844
845 while (dma_readl(dw, CH_EN) & dwc->mask)
846 cpu_relax();
847
848 /* active_list entries will end up before queued entries */
849 list_splice_init(&dwc->queue, &list);
850 list_splice_init(&dwc->active_list, &list);
851
852 spin_unlock_bh(&dwc->lock);
853
854 /* Flush all pending and queued descriptors */
855 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530856 dwc_descriptor_complete(dwc, desc, false);
Linus Walleijc3635c72010-03-26 16:44:01 -0700857
858 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700859}
860
861static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700862dwc_tx_status(struct dma_chan *chan,
863 dma_cookie_t cookie,
864 struct dma_tx_state *txstate)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700865{
866 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
867 dma_cookie_t last_used;
868 dma_cookie_t last_complete;
869 int ret;
870
871 last_complete = dwc->completed;
872 last_used = chan->cookie;
873
874 ret = dma_async_is_complete(cookie, last_complete, last_used);
875 if (ret != DMA_SUCCESS) {
Viresh Kumar569432e2011-03-03 15:47:17 +0530876 spin_lock_bh(&dwc->lock);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700877 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
Viresh Kumar569432e2011-03-03 15:47:17 +0530878 spin_unlock_bh(&dwc->lock);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700879
880 last_complete = dwc->completed;
881 last_used = chan->cookie;
882
883 ret = dma_async_is_complete(cookie, last_complete, last_used);
884 }
885
Viresh Kumarabf53902011-04-15 16:03:35 +0530886 if (ret != DMA_SUCCESS)
887 dma_set_tx_state(txstate, last_complete, last_used,
888 dwc_first_active(dwc)->len);
889 else
890 dma_set_tx_state(txstate, last_complete, last_used, 0);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700891
892 return ret;
893}
894
895static void dwc_issue_pending(struct dma_chan *chan)
896{
897 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
898
899 spin_lock_bh(&dwc->lock);
900 if (!list_empty(&dwc->queue))
901 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
902 spin_unlock_bh(&dwc->lock);
903}
904
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700905static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700906{
907 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
908 struct dw_dma *dw = to_dw_dma(chan->device);
909 struct dw_desc *desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700910 struct dw_dma_slave *dws;
911 int i;
912 u32 cfghi;
913 u32 cfglo;
914
Dan Williams41d5e592009-01-06 11:38:21 -0700915 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700916
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700917 /* ASSERT: channel is idle */
918 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700919 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700920 return -EIO;
921 }
922
923 dwc->completed = chan->cookie = 1;
924
925 cfghi = DWC_CFGH_FIFO_MODE;
926 cfglo = 0;
927
Dan Williams287d8592009-02-18 14:48:26 -0800928 dws = chan->private;
Dan Williams74465b42009-01-06 11:38:16 -0700929 if (dws) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700930 /*
931 * We need controller-specific data to set up slave
932 * transfers.
933 */
Dan Williams74465b42009-01-06 11:38:16 -0700934 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700935
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700936 cfghi = dws->cfg_hi;
Viresh Kumar93317e82011-03-03 15:47:22 +0530937 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700938 }
Viresh Kumar93317e82011-03-03 15:47:22 +0530939
940 cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
941
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700942 channel_writel(dwc, CFG_LO, cfglo);
943 channel_writel(dwc, CFG_HI, cfghi);
944
945 /*
946 * NOTE: some controllers may have additional features that we
947 * need to initialize here, like "scatter-gather" (which
948 * doesn't mean what you think it means), and status writeback.
949 */
950
951 spin_lock_bh(&dwc->lock);
952 i = dwc->descs_allocated;
953 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
954 spin_unlock_bh(&dwc->lock);
955
956 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
957 if (!desc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700958 dev_info(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700959 "only allocated %d descriptors\n", i);
960 spin_lock_bh(&dwc->lock);
961 break;
962 }
963
Dan Williamse0bd0f82009-09-08 17:53:02 -0700964 INIT_LIST_HEAD(&desc->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700965 dma_async_tx_descriptor_init(&desc->txd, chan);
966 desc->txd.tx_submit = dwc_tx_submit;
967 desc->txd.flags = DMA_CTRL_ACK;
Dan Williams41d5e592009-01-06 11:38:21 -0700968 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700969 sizeof(desc->lli), DMA_TO_DEVICE);
970 dwc_desc_put(dwc, desc);
971
972 spin_lock_bh(&dwc->lock);
973 i = ++dwc->descs_allocated;
974 }
975
976 /* Enable interrupts */
977 channel_set_bit(dw, MASK.XFER, dwc->mask);
978 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
979 channel_set_bit(dw, MASK.ERROR, dwc->mask);
980
981 spin_unlock_bh(&dwc->lock);
982
Dan Williams41d5e592009-01-06 11:38:21 -0700983 dev_dbg(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700984 "alloc_chan_resources allocated %d descriptors\n", i);
985
986 return i;
987}
988
989static void dwc_free_chan_resources(struct dma_chan *chan)
990{
991 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
992 struct dw_dma *dw = to_dw_dma(chan->device);
993 struct dw_desc *desc, *_desc;
994 LIST_HEAD(list);
995
Dan Williams41d5e592009-01-06 11:38:21 -0700996 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700997 dwc->descs_allocated);
998
999 /* ASSERT: channel is idle */
1000 BUG_ON(!list_empty(&dwc->active_list));
1001 BUG_ON(!list_empty(&dwc->queue));
1002 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1003
1004 spin_lock_bh(&dwc->lock);
1005 list_splice_init(&dwc->free_list, &list);
1006 dwc->descs_allocated = 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001007
1008 /* Disable interrupts */
1009 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1010 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1011 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1012
1013 spin_unlock_bh(&dwc->lock);
1014
1015 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
Dan Williams41d5e592009-01-06 11:38:21 -07001016 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1017 dma_unmap_single(chan2parent(chan), desc->txd.phys,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001018 sizeof(desc->lli), DMA_TO_DEVICE);
1019 kfree(desc);
1020 }
1021
Dan Williams41d5e592009-01-06 11:38:21 -07001022 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001023}
1024
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001025/* --------------------- Cyclic DMA API extensions -------------------- */
1026
1027/**
1028 * dw_dma_cyclic_start - start the cyclic DMA transfer
1029 * @chan: the DMA channel to start
1030 *
1031 * Must be called with soft interrupts disabled. Returns zero on success or
1032 * -errno on failure.
1033 */
1034int dw_dma_cyclic_start(struct dma_chan *chan)
1035{
1036 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1037 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1038
1039 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1040 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1041 return -ENODEV;
1042 }
1043
1044 spin_lock(&dwc->lock);
1045
1046 /* assert channel is idle */
1047 if (dma_readl(dw, CH_EN) & dwc->mask) {
1048 dev_err(chan2dev(&dwc->chan),
1049 "BUG: Attempted to start non-idle channel\n");
1050 dev_err(chan2dev(&dwc->chan),
1051 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1052 channel_readl(dwc, SAR),
1053 channel_readl(dwc, DAR),
1054 channel_readl(dwc, LLP),
1055 channel_readl(dwc, CTL_HI),
1056 channel_readl(dwc, CTL_LO));
1057 spin_unlock(&dwc->lock);
1058 return -EBUSY;
1059 }
1060
1061 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1062 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1063 dma_writel(dw, CLEAR.XFER, dwc->mask);
1064
1065 /* setup DMAC channel registers */
1066 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1067 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1068 channel_writel(dwc, CTL_HI, 0);
1069
1070 channel_set_bit(dw, CH_EN, dwc->mask);
1071
1072 spin_unlock(&dwc->lock);
1073
1074 return 0;
1075}
1076EXPORT_SYMBOL(dw_dma_cyclic_start);
1077
1078/**
1079 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1080 * @chan: the DMA channel to stop
1081 *
1082 * Must be called with soft interrupts disabled.
1083 */
1084void dw_dma_cyclic_stop(struct dma_chan *chan)
1085{
1086 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1087 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1088
1089 spin_lock(&dwc->lock);
1090
1091 channel_clear_bit(dw, CH_EN, dwc->mask);
1092 while (dma_readl(dw, CH_EN) & dwc->mask)
1093 cpu_relax();
1094
1095 spin_unlock(&dwc->lock);
1096}
1097EXPORT_SYMBOL(dw_dma_cyclic_stop);
1098
1099/**
1100 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1101 * @chan: the DMA channel to prepare
1102 * @buf_addr: physical DMA address where the buffer starts
1103 * @buf_len: total number of bytes for the entire buffer
1104 * @period_len: number of bytes for each period
1105 * @direction: transfer direction, to or from device
1106 *
1107 * Must be called before trying to start the transfer. Returns a valid struct
1108 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1109 */
1110struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1111 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1112 enum dma_data_direction direction)
1113{
1114 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1115 struct dw_cyclic_desc *cdesc;
1116 struct dw_cyclic_desc *retval = NULL;
1117 struct dw_desc *desc;
1118 struct dw_desc *last = NULL;
1119 struct dw_dma_slave *dws = chan->private;
1120 unsigned long was_cyclic;
1121 unsigned int reg_width;
1122 unsigned int periods;
1123 unsigned int i;
1124
1125 spin_lock_bh(&dwc->lock);
1126 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1127 spin_unlock_bh(&dwc->lock);
1128 dev_dbg(chan2dev(&dwc->chan),
1129 "queue and/or active list are not empty\n");
1130 return ERR_PTR(-EBUSY);
1131 }
1132
1133 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1134 spin_unlock_bh(&dwc->lock);
1135 if (was_cyclic) {
1136 dev_dbg(chan2dev(&dwc->chan),
1137 "channel already prepared for cyclic DMA\n");
1138 return ERR_PTR(-EBUSY);
1139 }
1140
1141 retval = ERR_PTR(-EINVAL);
1142 reg_width = dws->reg_width;
1143 periods = buf_len / period_len;
1144
1145 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1146 if (period_len > (DWC_MAX_COUNT << reg_width))
1147 goto out_err;
1148 if (unlikely(period_len & ((1 << reg_width) - 1)))
1149 goto out_err;
1150 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1151 goto out_err;
1152 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1153 goto out_err;
1154
1155 retval = ERR_PTR(-ENOMEM);
1156
1157 if (periods > NR_DESCS_PER_CHANNEL)
1158 goto out_err;
1159
1160 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1161 if (!cdesc)
1162 goto out_err;
1163
1164 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1165 if (!cdesc->desc)
1166 goto out_err_alloc;
1167
1168 for (i = 0; i < periods; i++) {
1169 desc = dwc_desc_get(dwc);
1170 if (!desc)
1171 goto out_err_desc_get;
1172
1173 switch (direction) {
1174 case DMA_TO_DEVICE:
1175 desc->lli.dar = dws->tx_reg;
1176 desc->lli.sar = buf_addr + (period_len * i);
Jamie Ilesf301c062011-01-21 14:11:53 +00001177 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001178 | DWC_CTLL_DST_WIDTH(reg_width)
1179 | DWC_CTLL_SRC_WIDTH(reg_width)
1180 | DWC_CTLL_DST_FIX
1181 | DWC_CTLL_SRC_INC
Viresh KUMARee665092011-03-04 15:42:51 +05301182 | DWC_CTLL_FC(dws->fc)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001183 | DWC_CTLL_INT_EN);
1184 break;
1185 case DMA_FROM_DEVICE:
1186 desc->lli.dar = buf_addr + (period_len * i);
1187 desc->lli.sar = dws->rx_reg;
Jamie Ilesf301c062011-01-21 14:11:53 +00001188 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001189 | DWC_CTLL_SRC_WIDTH(reg_width)
1190 | DWC_CTLL_DST_WIDTH(reg_width)
1191 | DWC_CTLL_DST_INC
1192 | DWC_CTLL_SRC_FIX
Viresh KUMARee665092011-03-04 15:42:51 +05301193 | DWC_CTLL_FC(dws->fc)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001194 | DWC_CTLL_INT_EN);
1195 break;
1196 default:
1197 break;
1198 }
1199
1200 desc->lli.ctlhi = (period_len >> reg_width);
1201 cdesc->desc[i] = desc;
1202
1203 if (last) {
1204 last->lli.llp = desc->txd.phys;
1205 dma_sync_single_for_device(chan2parent(chan),
1206 last->txd.phys, sizeof(last->lli),
1207 DMA_TO_DEVICE);
1208 }
1209
1210 last = desc;
1211 }
1212
1213 /* lets make a cyclic list */
1214 last->lli.llp = cdesc->desc[0]->txd.phys;
1215 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1216 sizeof(last->lli), DMA_TO_DEVICE);
1217
1218 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1219 "period %zu periods %d\n", buf_addr, buf_len,
1220 period_len, periods);
1221
1222 cdesc->periods = periods;
1223 dwc->cdesc = cdesc;
1224
1225 return cdesc;
1226
1227out_err_desc_get:
1228 while (i--)
1229 dwc_desc_put(dwc, cdesc->desc[i]);
1230out_err_alloc:
1231 kfree(cdesc);
1232out_err:
1233 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1234 return (struct dw_cyclic_desc *)retval;
1235}
1236EXPORT_SYMBOL(dw_dma_cyclic_prep);
1237
1238/**
1239 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1240 * @chan: the DMA channel to free
1241 */
1242void dw_dma_cyclic_free(struct dma_chan *chan)
1243{
1244 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1245 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1246 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1247 int i;
1248
1249 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1250
1251 if (!cdesc)
1252 return;
1253
1254 spin_lock_bh(&dwc->lock);
1255
1256 channel_clear_bit(dw, CH_EN, dwc->mask);
1257 while (dma_readl(dw, CH_EN) & dwc->mask)
1258 cpu_relax();
1259
1260 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1261 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1262 dma_writel(dw, CLEAR.XFER, dwc->mask);
1263
1264 spin_unlock_bh(&dwc->lock);
1265
1266 for (i = 0; i < cdesc->periods; i++)
1267 dwc_desc_put(dwc, cdesc->desc[i]);
1268
1269 kfree(cdesc->desc);
1270 kfree(cdesc);
1271
1272 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1273}
1274EXPORT_SYMBOL(dw_dma_cyclic_free);
1275
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001276/*----------------------------------------------------------------------*/
1277
1278static void dw_dma_off(struct dw_dma *dw)
1279{
1280 dma_writel(dw, CFG, 0);
1281
1282 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1283 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1284 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1285 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1286 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1287
1288 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1289 cpu_relax();
1290}
1291
1292static int __init dw_probe(struct platform_device *pdev)
1293{
1294 struct dw_dma_platform_data *pdata;
1295 struct resource *io;
1296 struct dw_dma *dw;
1297 size_t size;
1298 int irq;
1299 int err;
1300 int i;
1301
1302 pdata = pdev->dev.platform_data;
1303 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1304 return -EINVAL;
1305
1306 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1307 if (!io)
1308 return -EINVAL;
1309
1310 irq = platform_get_irq(pdev, 0);
1311 if (irq < 0)
1312 return irq;
1313
1314 size = sizeof(struct dw_dma);
1315 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1316 dw = kzalloc(size, GFP_KERNEL);
1317 if (!dw)
1318 return -ENOMEM;
1319
1320 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1321 err = -EBUSY;
1322 goto err_kfree;
1323 }
1324
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001325 dw->regs = ioremap(io->start, DW_REGLEN);
1326 if (!dw->regs) {
1327 err = -ENOMEM;
1328 goto err_release_r;
1329 }
1330
1331 dw->clk = clk_get(&pdev->dev, "hclk");
1332 if (IS_ERR(dw->clk)) {
1333 err = PTR_ERR(dw->clk);
1334 goto err_clk;
1335 }
1336 clk_enable(dw->clk);
1337
1338 /* force dma off, just in case */
1339 dw_dma_off(dw);
1340
1341 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1342 if (err)
1343 goto err_irq;
1344
1345 platform_set_drvdata(pdev, dw);
1346
1347 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1348
1349 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1350
1351 INIT_LIST_HEAD(&dw->dma.channels);
1352 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
1353 struct dw_dma_chan *dwc = &dw->chan[i];
1354
1355 dwc->chan.device = &dw->dma;
1356 dwc->chan.cookie = dwc->completed = 1;
1357 dwc->chan.chan_id = i;
Viresh Kumarb0c31302011-03-03 15:47:21 +05301358 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1359 list_add_tail(&dwc->chan.device_node,
1360 &dw->dma.channels);
1361 else
1362 list_add(&dwc->chan.device_node, &dw->dma.channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001363
Viresh Kumar93317e82011-03-03 15:47:22 +05301364 /* 7 is highest priority & 0 is lowest. */
1365 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1366 dwc->priority = 7 - i;
1367 else
1368 dwc->priority = i;
1369
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001370 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1371 spin_lock_init(&dwc->lock);
1372 dwc->mask = 1 << i;
1373
1374 INIT_LIST_HEAD(&dwc->active_list);
1375 INIT_LIST_HEAD(&dwc->queue);
1376 INIT_LIST_HEAD(&dwc->free_list);
1377
1378 channel_clear_bit(dw, CH_EN, dwc->mask);
1379 }
1380
1381 /* Clear/disable all interrupts on all channels. */
1382 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1383 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1384 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1385 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1386 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1387
1388 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1389 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1390 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1391 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1392 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1393
1394 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1395 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
Jamie Iles95ea7592011-01-21 14:11:54 +00001396 if (pdata->is_private)
1397 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001398 dw->dma.dev = &pdev->dev;
1399 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1400 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1401
1402 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1403
1404 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001405 dw->dma.device_control = dwc_control;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001406
Linus Walleij07934482010-03-26 16:50:49 -07001407 dw->dma.device_tx_status = dwc_tx_status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001408 dw->dma.device_issue_pending = dwc_issue_pending;
1409
1410 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1411
1412 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
Kay Sieversdfbc9012009-03-24 16:38:22 -07001413 dev_name(&pdev->dev), dw->dma.chancnt);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001414
1415 dma_async_device_register(&dw->dma);
1416
1417 return 0;
1418
1419err_irq:
1420 clk_disable(dw->clk);
1421 clk_put(dw->clk);
1422err_clk:
1423 iounmap(dw->regs);
1424 dw->regs = NULL;
1425err_release_r:
1426 release_resource(io);
1427err_kfree:
1428 kfree(dw);
1429 return err;
1430}
1431
1432static int __exit dw_remove(struct platform_device *pdev)
1433{
1434 struct dw_dma *dw = platform_get_drvdata(pdev);
1435 struct dw_dma_chan *dwc, *_dwc;
1436 struct resource *io;
1437
1438 dw_dma_off(dw);
1439 dma_async_device_unregister(&dw->dma);
1440
1441 free_irq(platform_get_irq(pdev, 0), dw);
1442 tasklet_kill(&dw->tasklet);
1443
1444 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1445 chan.device_node) {
1446 list_del(&dwc->chan.device_node);
1447 channel_clear_bit(dw, CH_EN, dwc->mask);
1448 }
1449
1450 clk_disable(dw->clk);
1451 clk_put(dw->clk);
1452
1453 iounmap(dw->regs);
1454 dw->regs = NULL;
1455
1456 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1457 release_mem_region(io->start, DW_REGLEN);
1458
1459 kfree(dw);
1460
1461 return 0;
1462}
1463
1464static void dw_shutdown(struct platform_device *pdev)
1465{
1466 struct dw_dma *dw = platform_get_drvdata(pdev);
1467
1468 dw_dma_off(platform_get_drvdata(pdev));
1469 clk_disable(dw->clk);
1470}
1471
Magnus Damm4a256b52009-07-08 13:22:18 +02001472static int dw_suspend_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001473{
Magnus Damm4a256b52009-07-08 13:22:18 +02001474 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001475 struct dw_dma *dw = platform_get_drvdata(pdev);
1476
1477 dw_dma_off(platform_get_drvdata(pdev));
1478 clk_disable(dw->clk);
1479 return 0;
1480}
1481
Magnus Damm4a256b52009-07-08 13:22:18 +02001482static int dw_resume_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001483{
Magnus Damm4a256b52009-07-08 13:22:18 +02001484 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001485 struct dw_dma *dw = platform_get_drvdata(pdev);
1486
1487 clk_enable(dw->clk);
1488 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1489 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001490}
1491
Alexey Dobriyan47145212009-12-14 18:00:08 -08001492static const struct dev_pm_ops dw_dev_pm_ops = {
Magnus Damm4a256b52009-07-08 13:22:18 +02001493 .suspend_noirq = dw_suspend_noirq,
1494 .resume_noirq = dw_resume_noirq,
1495};
1496
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001497static struct platform_driver dw_driver = {
1498 .remove = __exit_p(dw_remove),
1499 .shutdown = dw_shutdown,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001500 .driver = {
1501 .name = "dw_dmac",
Magnus Damm4a256b52009-07-08 13:22:18 +02001502 .pm = &dw_dev_pm_ops,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001503 },
1504};
1505
1506static int __init dw_init(void)
1507{
1508 return platform_driver_probe(&dw_driver, dw_probe);
1509}
Viresh Kumarcb689a72011-03-03 15:47:15 +05301510subsys_initcall(dw_init);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001511
1512static void __exit dw_exit(void)
1513{
1514 platform_driver_unregister(&dw_driver);
1515}
1516module_exit(dw_exit);
1517
1518MODULE_LICENSE("GPL v2");
1519MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1520MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");