blob: 1ed29d10a5faf54160ed45744935b982c62c2e0e [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
Stefan Weilc2e07b32010-08-03 19:44:52 +020013 * The support for MPC8349 DMA controller is also added.
Zhang Wei173acc72008-03-01 07:42:48 -070014 *
Ira W. Snydera7aea372009-04-23 16:17:54 -070015 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
Zhang Wei173acc72008-03-01 07:42:48 -070020 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Zhang Wei173acc72008-03-01 07:42:48 -070031#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
36#include <linux/of_platform.h>
37
Ira Snyderbbea0b62009-09-08 17:53:04 -070038#include <asm/fsldma.h>
Zhang Wei173acc72008-03-01 07:42:48 -070039#include "fsldma.h"
40
Ira Snyderc14330412010-09-30 11:46:45 +000041static const char msg_ld_oom[] = "No free memory for link descriptor\n";
42
Ira Snydera1c03312010-01-06 13:34:05 +000043static void dma_init(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070044{
45 /* Reset the channel */
Ira Snydera1c03312010-01-06 13:34:05 +000046 DMA_OUT(chan, &chan->regs->mr, 0, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070047
Ira Snydera1c03312010-01-06 13:34:05 +000048 switch (chan->feature & FSL_DMA_IP_MASK) {
Zhang Wei173acc72008-03-01 07:42:48 -070049 case FSL_DMA_IP_85XX:
50 /* Set the channel to below modes:
51 * EIE - Error interrupt enable
52 * EOSIE - End of segments interrupt enable (basic mode)
53 * EOLNIE - End of links interrupt enable
54 */
Ira Snydera1c03312010-01-06 13:34:05 +000055 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
Zhang Wei173acc72008-03-01 07:42:48 -070056 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
57 break;
58 case FSL_DMA_IP_83XX:
59 /* Set the channel to below modes:
60 * EOTIE - End-of-transfer interrupt enable
Ira W. Snydera7aea372009-04-23 16:17:54 -070061 * PRC_RM - PCI read multiple
Zhang Wei173acc72008-03-01 07:42:48 -070062 */
Ira Snydera1c03312010-01-06 13:34:05 +000063 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
Ira W. Snydera7aea372009-04-23 16:17:54 -070064 | FSL_DMA_MR_PRC_RM, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070065 break;
66 }
Zhang Wei173acc72008-03-01 07:42:48 -070067}
68
Ira Snydera1c03312010-01-06 13:34:05 +000069static void set_sr(struct fsldma_chan *chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070070{
Ira Snydera1c03312010-01-06 13:34:05 +000071 DMA_OUT(chan, &chan->regs->sr, val, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070072}
73
Ira Snydera1c03312010-01-06 13:34:05 +000074static u32 get_sr(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070075{
Ira Snydera1c03312010-01-06 13:34:05 +000076 return DMA_IN(chan, &chan->regs->sr, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070077}
78
Ira Snydera1c03312010-01-06 13:34:05 +000079static void set_desc_cnt(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -070080 struct fsl_dma_ld_hw *hw, u32 count)
81{
Ira Snydera1c03312010-01-06 13:34:05 +000082 hw->count = CPU_TO_DMA(chan, count, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070083}
84
Ira Snydera1c03312010-01-06 13:34:05 +000085static void set_desc_src(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -070086 struct fsl_dma_ld_hw *hw, dma_addr_t src)
87{
88 u64 snoop_bits;
89
Ira Snydera1c03312010-01-06 13:34:05 +000090 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
Zhang Wei173acc72008-03-01 07:42:48 -070091 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
Ira Snydera1c03312010-01-06 13:34:05 +000092 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
Zhang Wei173acc72008-03-01 07:42:48 -070093}
94
Ira Snydera1c03312010-01-06 13:34:05 +000095static void set_desc_dst(struct fsldma_chan *chan,
Ira Snyder738f5f72010-01-06 13:34:02 +000096 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
Zhang Wei173acc72008-03-01 07:42:48 -070097{
98 u64 snoop_bits;
99
Ira Snydera1c03312010-01-06 13:34:05 +0000100 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
Zhang Wei173acc72008-03-01 07:42:48 -0700101 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000102 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700103}
104
Ira Snydera1c03312010-01-06 13:34:05 +0000105static void set_desc_next(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700106 struct fsl_dma_ld_hw *hw, dma_addr_t next)
107{
108 u64 snoop_bits;
109
Ira Snydera1c03312010-01-06 13:34:05 +0000110 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
Zhang Wei173acc72008-03-01 07:42:48 -0700111 ? FSL_DMA_SNEN : 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000112 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700113}
114
Ira Snydera1c03312010-01-06 13:34:05 +0000115static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700116{
Ira Snydera1c03312010-01-06 13:34:05 +0000117 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700118}
119
Ira Snydera1c03312010-01-06 13:34:05 +0000120static dma_addr_t get_cdar(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700121{
Ira Snydera1c03312010-01-06 13:34:05 +0000122 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
Zhang Wei173acc72008-03-01 07:42:48 -0700123}
124
Ira Snydera1c03312010-01-06 13:34:05 +0000125static dma_addr_t get_ndar(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700126{
Ira Snydera1c03312010-01-06 13:34:05 +0000127 return DMA_IN(chan, &chan->regs->ndar, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700128}
129
Ira Snydera1c03312010-01-06 13:34:05 +0000130static u32 get_bcr(struct fsldma_chan *chan)
Zhang Weif79abb62008-03-18 18:45:00 -0700131{
Ira Snydera1c03312010-01-06 13:34:05 +0000132 return DMA_IN(chan, &chan->regs->bcr, 32);
Zhang Weif79abb62008-03-18 18:45:00 -0700133}
134
Ira Snydera1c03312010-01-06 13:34:05 +0000135static int dma_is_idle(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700136{
Ira Snydera1c03312010-01-06 13:34:05 +0000137 u32 sr = get_sr(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700138 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
139}
140
Ira Snydera1c03312010-01-06 13:34:05 +0000141static void dma_start(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700142{
Ira Snyder272ca652010-01-06 13:33:59 +0000143 u32 mode;
Zhang Wei173acc72008-03-01 07:42:48 -0700144
Ira Snydera1c03312010-01-06 13:34:05 +0000145 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000146
Ira Snydera1c03312010-01-06 13:34:05 +0000147 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
148 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
149 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000150 mode |= FSL_DMA_MR_EMP_EN;
151 } else {
152 mode &= ~FSL_DMA_MR_EMP_EN;
153 }
Ira Snyder43a1a3e2009-05-28 09:26:40 +0000154 }
Zhang Wei173acc72008-03-01 07:42:48 -0700155
Ira Snydera1c03312010-01-06 13:34:05 +0000156 if (chan->feature & FSL_DMA_CHAN_START_EXT)
Ira Snyder272ca652010-01-06 13:33:59 +0000157 mode |= FSL_DMA_MR_EMS_EN;
Zhang Wei173acc72008-03-01 07:42:48 -0700158 else
Ira Snyder272ca652010-01-06 13:33:59 +0000159 mode |= FSL_DMA_MR_CS;
Zhang Wei173acc72008-03-01 07:42:48 -0700160
Ira Snydera1c03312010-01-06 13:34:05 +0000161 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700162}
163
Ira Snydera1c03312010-01-06 13:34:05 +0000164static void dma_halt(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700165{
Ira Snyder272ca652010-01-06 13:33:59 +0000166 u32 mode;
Dan Williams900325a2009-03-02 15:33:46 -0700167 int i;
168
Ira Snydera1c03312010-01-06 13:34:05 +0000169 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000170 mode |= FSL_DMA_MR_CA;
Ira Snydera1c03312010-01-06 13:34:05 +0000171 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000172
173 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
Ira Snydera1c03312010-01-06 13:34:05 +0000174 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700175
Dan Williams900325a2009-03-02 15:33:46 -0700176 for (i = 0; i < 100; i++) {
Ira Snydera1c03312010-01-06 13:34:05 +0000177 if (dma_is_idle(chan))
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000178 return;
179
Zhang Wei173acc72008-03-01 07:42:48 -0700180 udelay(10);
Dan Williams900325a2009-03-02 15:33:46 -0700181 }
Ira Snyder272ca652010-01-06 13:33:59 +0000182
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000183 if (!dma_is_idle(chan))
Ira Snydera1c03312010-01-06 13:34:05 +0000184 dev_err(chan->dev, "DMA halt timeout!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700185}
186
Ira Snydera1c03312010-01-06 13:34:05 +0000187static void set_ld_eol(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700188 struct fsl_desc_sw *desc)
189{
Ira Snyder776c8942009-05-15 11:33:20 -0700190 u64 snoop_bits;
191
Ira Snydera1c03312010-01-06 13:34:05 +0000192 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
Ira Snyder776c8942009-05-15 11:33:20 -0700193 ? FSL_DMA_SNEN : 0;
194
Ira Snydera1c03312010-01-06 13:34:05 +0000195 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
196 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
Ira Snyder776c8942009-05-15 11:33:20 -0700197 | snoop_bits, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700198}
199
Zhang Wei173acc72008-03-01 07:42:48 -0700200/**
201 * fsl_chan_set_src_loop_size - Set source address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000202 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700203 * @size : Address loop size, 0 for disable loop
204 *
205 * The set source address hold transfer size. The source
206 * address hold or loop transfer size is when the DMA transfer
207 * data from source address (SA), if the loop size is 4, the DMA will
208 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
209 * SA + 1 ... and so on.
210 */
Ira Snydera1c03312010-01-06 13:34:05 +0000211static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700212{
Ira Snyder272ca652010-01-06 13:33:59 +0000213 u32 mode;
214
Ira Snydera1c03312010-01-06 13:34:05 +0000215 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000216
Zhang Wei173acc72008-03-01 07:42:48 -0700217 switch (size) {
218 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000219 mode &= ~FSL_DMA_MR_SAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700220 break;
221 case 1:
222 case 2:
223 case 4:
224 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000225 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
Zhang Wei173acc72008-03-01 07:42:48 -0700226 break;
227 }
Ira Snyder272ca652010-01-06 13:33:59 +0000228
Ira Snydera1c03312010-01-06 13:34:05 +0000229 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700230}
231
232/**
Ira Snyder738f5f72010-01-06 13:34:02 +0000233 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000234 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700235 * @size : Address loop size, 0 for disable loop
236 *
237 * The set destination address hold transfer size. The destination
238 * address hold or loop transfer size is when the DMA transfer
239 * data to destination address (TA), if the loop size is 4, the DMA will
240 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
241 * TA + 1 ... and so on.
242 */
Ira Snydera1c03312010-01-06 13:34:05 +0000243static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700244{
Ira Snyder272ca652010-01-06 13:33:59 +0000245 u32 mode;
246
Ira Snydera1c03312010-01-06 13:34:05 +0000247 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000248
Zhang Wei173acc72008-03-01 07:42:48 -0700249 switch (size) {
250 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000251 mode &= ~FSL_DMA_MR_DAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700252 break;
253 case 1:
254 case 2:
255 case 4:
256 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000257 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
Zhang Wei173acc72008-03-01 07:42:48 -0700258 break;
259 }
Ira Snyder272ca652010-01-06 13:33:59 +0000260
Ira Snydera1c03312010-01-06 13:34:05 +0000261 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700262}
263
264/**
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700265 * fsl_chan_set_request_count - Set DMA Request Count for external control
Ira Snydera1c03312010-01-06 13:34:05 +0000266 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700267 * @size : Number of bytes to transfer in a single request
268 *
269 * The Freescale DMA channel can be controlled by the external signal DREQ#.
270 * The DMA request count is how many bytes are allowed to transfer before
271 * pausing the channel, after which a new assertion of DREQ# resumes channel
272 * operation.
273 *
274 * A size of 0 disables external pause control. The maximum size is 1024.
275 */
Ira Snydera1c03312010-01-06 13:34:05 +0000276static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700277{
Ira Snyder272ca652010-01-06 13:33:59 +0000278 u32 mode;
279
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700280 BUG_ON(size > 1024);
Ira Snyder272ca652010-01-06 13:33:59 +0000281
Ira Snydera1c03312010-01-06 13:34:05 +0000282 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000283 mode |= (__ilog2(size) << 24) & 0x0f000000;
284
Ira Snydera1c03312010-01-06 13:34:05 +0000285 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700286}
287
288/**
Zhang Wei173acc72008-03-01 07:42:48 -0700289 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
Ira Snydera1c03312010-01-06 13:34:05 +0000290 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700291 * @enable : 0 is disabled, 1 is enabled.
Zhang Wei173acc72008-03-01 07:42:48 -0700292 *
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700293 * The Freescale DMA channel can be controlled by the external signal DREQ#.
294 * The DMA Request Count feature should be used in addition to this feature
295 * to set the number of bytes to transfer before pausing the channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700296 */
Ira Snydera1c03312010-01-06 13:34:05 +0000297static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700298{
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700299 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000300 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700301 else
Ira Snydera1c03312010-01-06 13:34:05 +0000302 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700303}
304
305/**
306 * fsl_chan_toggle_ext_start - Toggle channel external start status
Ira Snydera1c03312010-01-06 13:34:05 +0000307 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700308 * @enable : 0 is disabled, 1 is enabled.
309 *
310 * If enable the external start, the channel can be started by an
311 * external DMA start pin. So the dma_start() does not start the
312 * transfer immediately. The DMA channel will wait for the
313 * control pin asserted.
314 */
Ira Snydera1c03312010-01-06 13:34:05 +0000315static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700316{
317 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000318 chan->feature |= FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700319 else
Ira Snydera1c03312010-01-06 13:34:05 +0000320 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700321}
322
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000323static void append_ld_queue(struct fsldma_chan *chan,
324 struct fsl_desc_sw *desc)
325{
326 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
327
328 if (list_empty(&chan->ld_pending))
329 goto out_splice;
330
331 /*
332 * Add the hardware descriptor to the chain of hardware descriptors
333 * that already exists in memory.
334 *
335 * This will un-set the EOL bit of the existing transaction, and the
336 * last link in this transaction will become the EOL descriptor.
337 */
338 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
339
340 /*
341 * Add the software descriptor and all children to the list
342 * of pending transactions
343 */
344out_splice:
345 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
346}
347
Zhang Wei173acc72008-03-01 07:42:48 -0700348static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
349{
Ira Snydera1c03312010-01-06 13:34:05 +0000350 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
Dan Williamseda34232009-09-08 17:53:02 -0700351 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
352 struct fsl_desc_sw *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700353 unsigned long flags;
354 dma_cookie_t cookie;
355
Ira Snydera1c03312010-01-06 13:34:05 +0000356 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700357
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000358 /*
359 * assign cookies to all of the software descriptors
360 * that make up this transaction
361 */
Ira Snydera1c03312010-01-06 13:34:05 +0000362 cookie = chan->common.cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700363 list_for_each_entry(child, &desc->tx_list, node) {
Ira Snyderbcfb7462009-05-15 14:27:16 -0700364 cookie++;
365 if (cookie < 0)
366 cookie = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700367
Steven J. Magnani6ca3a7a2010-02-25 13:39:30 -0600368 child->async_tx.cookie = cookie;
Ira Snyderbcfb7462009-05-15 14:27:16 -0700369 }
370
Ira Snydera1c03312010-01-06 13:34:05 +0000371 chan->common.cookie = cookie;
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000372
373 /* put this transaction onto the tail of the pending queue */
Ira Snydera1c03312010-01-06 13:34:05 +0000374 append_ld_queue(chan, desc);
Zhang Wei173acc72008-03-01 07:42:48 -0700375
Ira Snydera1c03312010-01-06 13:34:05 +0000376 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700377
378 return cookie;
379}
380
381/**
382 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
Ira Snydera1c03312010-01-06 13:34:05 +0000383 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700384 *
385 * Return - The descriptor allocated. NULL for failed.
386 */
387static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
Ira Snydera1c03312010-01-06 13:34:05 +0000388 struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700389{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000390 struct fsl_desc_sw *desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700391 dma_addr_t pdesc;
Zhang Wei173acc72008-03-01 07:42:48 -0700392
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000393 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
394 if (!desc) {
395 dev_dbg(chan->dev, "out of memory for link desc\n");
396 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700397 }
398
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000399 memset(desc, 0, sizeof(*desc));
400 INIT_LIST_HEAD(&desc->tx_list);
401 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
402 desc->async_tx.tx_submit = fsl_dma_tx_submit;
403 desc->async_tx.phys = pdesc;
404
405 return desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700406}
407
408
409/**
410 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000411 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700412 *
413 * This function will create a dma pool for descriptor allocation.
414 *
415 * Return - The number of descriptors allocated.
416 */
Ira Snydera1c03312010-01-06 13:34:05 +0000417static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700418{
Ira Snydera1c03312010-01-06 13:34:05 +0000419 struct fsldma_chan *chan = to_fsl_chan(dchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700420
421 /* Has this channel already been allocated? */
Ira Snydera1c03312010-01-06 13:34:05 +0000422 if (chan->desc_pool)
Timur Tabi77cd62e2008-09-26 17:00:11 -0700423 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700424
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000425 /*
426 * We need the descriptor to be aligned to 32bytes
Zhang Wei173acc72008-03-01 07:42:48 -0700427 * for meeting FSL DMA specification requirement.
428 */
Ira Snydera1c03312010-01-06 13:34:05 +0000429 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000430 chan->dev,
431 sizeof(struct fsl_desc_sw),
432 __alignof__(struct fsl_desc_sw), 0);
Ira Snydera1c03312010-01-06 13:34:05 +0000433 if (!chan->desc_pool) {
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000434 dev_err(chan->dev, "unable to allocate channel %d "
435 "descriptor pool\n", chan->id);
436 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -0700437 }
438
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000439 /* there is at least one descriptor free to be allocated */
Zhang Wei173acc72008-03-01 07:42:48 -0700440 return 1;
441}
442
443/**
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000444 * fsldma_free_desc_list - Free all descriptors in a queue
445 * @chan: Freescae DMA channel
446 * @list: the list to free
447 *
448 * LOCKING: must hold chan->desc_lock
449 */
450static void fsldma_free_desc_list(struct fsldma_chan *chan,
451 struct list_head *list)
452{
453 struct fsl_desc_sw *desc, *_desc;
454
455 list_for_each_entry_safe(desc, _desc, list, node) {
456 list_del(&desc->node);
457 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
458 }
459}
460
461static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
462 struct list_head *list)
463{
464 struct fsl_desc_sw *desc, *_desc;
465
466 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
467 list_del(&desc->node);
468 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
469 }
470}
471
472/**
Zhang Wei173acc72008-03-01 07:42:48 -0700473 * fsl_dma_free_chan_resources - Free all resources of the channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000474 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700475 */
Ira Snydera1c03312010-01-06 13:34:05 +0000476static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700477{
Ira Snydera1c03312010-01-06 13:34:05 +0000478 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700479 unsigned long flags;
480
Ira Snydera1c03312010-01-06 13:34:05 +0000481 dev_dbg(chan->dev, "Free all channel resources.\n");
482 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000483 fsldma_free_desc_list(chan, &chan->ld_pending);
484 fsldma_free_desc_list(chan, &chan->ld_running);
Ira Snydera1c03312010-01-06 13:34:05 +0000485 spin_unlock_irqrestore(&chan->desc_lock, flags);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700486
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000487 dma_pool_destroy(chan->desc_pool);
Ira Snydera1c03312010-01-06 13:34:05 +0000488 chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700489}
490
Zhang Wei2187c262008-03-13 17:45:28 -0700491static struct dma_async_tx_descriptor *
Ira Snydera1c03312010-01-06 13:34:05 +0000492fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700493{
Ira Snydera1c03312010-01-06 13:34:05 +0000494 struct fsldma_chan *chan;
Zhang Wei2187c262008-03-13 17:45:28 -0700495 struct fsl_desc_sw *new;
496
Ira Snydera1c03312010-01-06 13:34:05 +0000497 if (!dchan)
Zhang Wei2187c262008-03-13 17:45:28 -0700498 return NULL;
499
Ira Snydera1c03312010-01-06 13:34:05 +0000500 chan = to_fsl_chan(dchan);
Zhang Wei2187c262008-03-13 17:45:28 -0700501
Ira Snydera1c03312010-01-06 13:34:05 +0000502 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei2187c262008-03-13 17:45:28 -0700503 if (!new) {
Ira Snyderc14330412010-09-30 11:46:45 +0000504 dev_err(chan->dev, msg_ld_oom);
Zhang Wei2187c262008-03-13 17:45:28 -0700505 return NULL;
506 }
507
508 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700509 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700510
Zhang Weif79abb62008-03-18 18:45:00 -0700511 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700512 list_add_tail(&new->node, &new->tx_list);
Zhang Weif79abb62008-03-18 18:45:00 -0700513
Zhang Wei2187c262008-03-13 17:45:28 -0700514 /* Set End-of-link to the last link descriptor of new list*/
Ira Snydera1c03312010-01-06 13:34:05 +0000515 set_ld_eol(chan, new);
Zhang Wei2187c262008-03-13 17:45:28 -0700516
517 return &new->async_tx;
518}
519
Zhang Wei173acc72008-03-01 07:42:48 -0700520static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
Ira Snydera1c03312010-01-06 13:34:05 +0000521 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
Zhang Wei173acc72008-03-01 07:42:48 -0700522 size_t len, unsigned long flags)
523{
Ira Snydera1c03312010-01-06 13:34:05 +0000524 struct fsldma_chan *chan;
Zhang Wei173acc72008-03-01 07:42:48 -0700525 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
526 size_t copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700527
Ira Snydera1c03312010-01-06 13:34:05 +0000528 if (!dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700529 return NULL;
530
531 if (!len)
532 return NULL;
533
Ira Snydera1c03312010-01-06 13:34:05 +0000534 chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700535
536 do {
537
538 /* Allocate the link descriptor from DMA pool */
Ira Snydera1c03312010-01-06 13:34:05 +0000539 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700540 if (!new) {
Ira Snyderc14330412010-09-30 11:46:45 +0000541 dev_err(chan->dev, msg_ld_oom);
Ira Snyder2e077f82009-05-15 09:59:46 -0700542 goto fail;
Zhang Wei173acc72008-03-01 07:42:48 -0700543 }
544#ifdef FSL_DMA_LD_DEBUG
Ira Snydera1c03312010-01-06 13:34:05 +0000545 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
Zhang Wei173acc72008-03-01 07:42:48 -0700546#endif
547
Zhang Wei56822842008-03-13 10:45:27 -0700548 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700549
Ira Snydera1c03312010-01-06 13:34:05 +0000550 set_desc_cnt(chan, &new->hw, copy);
551 set_desc_src(chan, &new->hw, dma_src);
552 set_desc_dst(chan, &new->hw, dma_dst);
Zhang Wei173acc72008-03-01 07:42:48 -0700553
554 if (!first)
555 first = new;
556 else
Ira Snydera1c03312010-01-06 13:34:05 +0000557 set_desc_next(chan, &prev->hw, new->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700558
559 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700560 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700561
562 prev = new;
563 len -= copy;
564 dma_src += copy;
Ira Snyder738f5f72010-01-06 13:34:02 +0000565 dma_dst += copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700566
567 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700568 list_add_tail(&new->node, &first->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700569 } while (len);
570
Dan Williams636bdea2008-04-17 20:17:26 -0700571 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700572 new->async_tx.cookie = -EBUSY;
573
574 /* Set End-of-link to the last link descriptor of new list*/
Ira Snydera1c03312010-01-06 13:34:05 +0000575 set_ld_eol(chan, new);
Zhang Wei173acc72008-03-01 07:42:48 -0700576
Ira Snyder2e077f82009-05-15 09:59:46 -0700577 return &first->async_tx;
578
579fail:
580 if (!first)
581 return NULL;
582
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000583 fsldma_free_desc_list_reverse(chan, &first->tx_list);
Ira Snyder2e077f82009-05-15 09:59:46 -0700584 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700585}
586
Ira Snyderc14330412010-09-30 11:46:45 +0000587static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
588 struct scatterlist *dst_sg, unsigned int dst_nents,
589 struct scatterlist *src_sg, unsigned int src_nents,
590 unsigned long flags)
591{
592 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
593 struct fsldma_chan *chan = to_fsl_chan(dchan);
594 size_t dst_avail, src_avail;
595 dma_addr_t dst, src;
596 size_t len;
597
598 /* basic sanity checks */
599 if (dst_nents == 0 || src_nents == 0)
600 return NULL;
601
602 if (dst_sg == NULL || src_sg == NULL)
603 return NULL;
604
605 /*
606 * TODO: should we check that both scatterlists have the same
607 * TODO: number of bytes in total? Is that really an error?
608 */
609
610 /* get prepared for the loop */
611 dst_avail = sg_dma_len(dst_sg);
612 src_avail = sg_dma_len(src_sg);
613
614 /* run until we are out of scatterlist entries */
615 while (true) {
616
617 /* create the largest transaction possible */
618 len = min_t(size_t, src_avail, dst_avail);
619 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
620 if (len == 0)
621 goto fetch;
622
623 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
624 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
625
626 /* allocate and populate the descriptor */
627 new = fsl_dma_alloc_descriptor(chan);
628 if (!new) {
629 dev_err(chan->dev, msg_ld_oom);
630 goto fail;
631 }
632#ifdef FSL_DMA_LD_DEBUG
633 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
634#endif
635
636 set_desc_cnt(chan, &new->hw, len);
637 set_desc_src(chan, &new->hw, src);
638 set_desc_dst(chan, &new->hw, dst);
639
640 if (!first)
641 first = new;
642 else
643 set_desc_next(chan, &prev->hw, new->async_tx.phys);
644
645 new->async_tx.cookie = 0;
646 async_tx_ack(&new->async_tx);
647 prev = new;
648
649 /* Insert the link descriptor to the LD ring */
650 list_add_tail(&new->node, &first->tx_list);
651
652 /* update metadata */
653 dst_avail -= len;
654 src_avail -= len;
655
656fetch:
657 /* fetch the next dst scatterlist entry */
658 if (dst_avail == 0) {
659
660 /* no more entries: we're done */
661 if (dst_nents == 0)
662 break;
663
664 /* fetch the next entry: if there are no more: done */
665 dst_sg = sg_next(dst_sg);
666 if (dst_sg == NULL)
667 break;
668
669 dst_nents--;
670 dst_avail = sg_dma_len(dst_sg);
671 }
672
673 /* fetch the next src scatterlist entry */
674 if (src_avail == 0) {
675
676 /* no more entries: we're done */
677 if (src_nents == 0)
678 break;
679
680 /* fetch the next entry: if there are no more: done */
681 src_sg = sg_next(src_sg);
682 if (src_sg == NULL)
683 break;
684
685 src_nents--;
686 src_avail = sg_dma_len(src_sg);
687 }
688 }
689
690 new->async_tx.flags = flags; /* client is in control of this ack */
691 new->async_tx.cookie = -EBUSY;
692
693 /* Set End-of-link to the last link descriptor of new list */
694 set_ld_eol(chan, new);
695
696 return &first->async_tx;
697
698fail:
699 if (!first)
700 return NULL;
701
702 fsldma_free_desc_list_reverse(chan, &first->tx_list);
703 return NULL;
704}
705
Zhang Wei173acc72008-03-01 07:42:48 -0700706/**
Ira Snyderbbea0b62009-09-08 17:53:04 -0700707 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
708 * @chan: DMA channel
709 * @sgl: scatterlist to transfer to/from
710 * @sg_len: number of entries in @scatterlist
711 * @direction: DMA direction
712 * @flags: DMAEngine flags
713 *
714 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
715 * DMA_SLAVE API, this gets the device-specific information from the
716 * chan->private variable.
717 */
718static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
Ira Snydera1c03312010-01-06 13:34:05 +0000719 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
Ira Snyderbbea0b62009-09-08 17:53:04 -0700720 enum dma_data_direction direction, unsigned long flags)
721{
Ira Snydera1c03312010-01-06 13:34:05 +0000722 struct fsldma_chan *chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700723 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
724 struct fsl_dma_slave *slave;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700725 size_t copy;
726
727 int i;
728 struct scatterlist *sg;
729 size_t sg_used;
730 size_t hw_used;
731 struct fsl_dma_hw_addr *hw;
732 dma_addr_t dma_dst, dma_src;
733
Ira Snydera1c03312010-01-06 13:34:05 +0000734 if (!dchan)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700735 return NULL;
736
Ira Snydera1c03312010-01-06 13:34:05 +0000737 if (!dchan->private)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700738 return NULL;
739
Ira Snydera1c03312010-01-06 13:34:05 +0000740 chan = to_fsl_chan(dchan);
741 slave = dchan->private;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700742
743 if (list_empty(&slave->addresses))
744 return NULL;
745
746 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
747 hw_used = 0;
748
749 /*
750 * Build the hardware transaction to copy from the scatterlist to
751 * the hardware, or from the hardware to the scatterlist
752 *
753 * If you are copying from the hardware to the scatterlist and it
754 * takes two hardware entries to fill an entire page, then both
755 * hardware entries will be coalesced into the same page
756 *
757 * If you are copying from the scatterlist to the hardware and a
758 * single page can fill two hardware entries, then the data will
759 * be read out of the page into the first hardware entry, and so on
760 */
761 for_each_sg(sgl, sg, sg_len, i) {
762 sg_used = 0;
763
764 /* Loop until the entire scatterlist entry is used */
765 while (sg_used < sg_dma_len(sg)) {
766
767 /*
768 * If we've used up the current hardware address/length
769 * pair, we need to load a new one
770 *
771 * This is done in a while loop so that descriptors with
772 * length == 0 will be skipped
773 */
774 while (hw_used >= hw->length) {
775
776 /*
777 * If the current hardware entry is the last
778 * entry in the list, we're finished
779 */
780 if (list_is_last(&hw->entry, &slave->addresses))
781 goto finished;
782
783 /* Get the next hardware address/length pair */
784 hw = list_entry(hw->entry.next,
785 struct fsl_dma_hw_addr, entry);
786 hw_used = 0;
787 }
788
789 /* Allocate the link descriptor from DMA pool */
Ira Snydera1c03312010-01-06 13:34:05 +0000790 new = fsl_dma_alloc_descriptor(chan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700791 if (!new) {
Ira Snydera1c03312010-01-06 13:34:05 +0000792 dev_err(chan->dev, "No free memory for "
Ira Snyderbbea0b62009-09-08 17:53:04 -0700793 "link descriptor\n");
794 goto fail;
795 }
796#ifdef FSL_DMA_LD_DEBUG
Ira Snydera1c03312010-01-06 13:34:05 +0000797 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700798#endif
799
800 /*
801 * Calculate the maximum number of bytes to transfer,
802 * making sure it is less than the DMA controller limit
803 */
804 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
805 hw->length - hw_used);
806 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
807
808 /*
809 * DMA_FROM_DEVICE
810 * from the hardware to the scatterlist
811 *
812 * DMA_TO_DEVICE
813 * from the scatterlist to the hardware
814 */
815 if (direction == DMA_FROM_DEVICE) {
816 dma_src = hw->address + hw_used;
817 dma_dst = sg_dma_address(sg) + sg_used;
818 } else {
819 dma_src = sg_dma_address(sg) + sg_used;
820 dma_dst = hw->address + hw_used;
821 }
822
823 /* Fill in the descriptor */
Ira Snydera1c03312010-01-06 13:34:05 +0000824 set_desc_cnt(chan, &new->hw, copy);
825 set_desc_src(chan, &new->hw, dma_src);
826 set_desc_dst(chan, &new->hw, dma_dst);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700827
828 /*
829 * If this is not the first descriptor, chain the
830 * current descriptor after the previous descriptor
831 */
832 if (!first) {
833 first = new;
834 } else {
Ira Snydera1c03312010-01-06 13:34:05 +0000835 set_desc_next(chan, &prev->hw,
Ira Snyderbbea0b62009-09-08 17:53:04 -0700836 new->async_tx.phys);
837 }
838
839 new->async_tx.cookie = 0;
840 async_tx_ack(&new->async_tx);
841
842 prev = new;
843 sg_used += copy;
844 hw_used += copy;
845
846 /* Insert the link descriptor into the LD ring */
847 list_add_tail(&new->node, &first->tx_list);
848 }
849 }
850
851finished:
852
853 /* All of the hardware address/length pairs had length == 0 */
854 if (!first || !new)
855 return NULL;
856
857 new->async_tx.flags = flags;
858 new->async_tx.cookie = -EBUSY;
859
860 /* Set End-of-link to the last link descriptor of new list */
Ira Snydera1c03312010-01-06 13:34:05 +0000861 set_ld_eol(chan, new);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700862
863 /* Enable extra controller features */
Ira Snydera1c03312010-01-06 13:34:05 +0000864 if (chan->set_src_loop_size)
865 chan->set_src_loop_size(chan, slave->src_loop_size);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700866
Ira Snydera1c03312010-01-06 13:34:05 +0000867 if (chan->set_dst_loop_size)
868 chan->set_dst_loop_size(chan, slave->dst_loop_size);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700869
Ira Snydera1c03312010-01-06 13:34:05 +0000870 if (chan->toggle_ext_start)
871 chan->toggle_ext_start(chan, slave->external_start);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700872
Ira Snydera1c03312010-01-06 13:34:05 +0000873 if (chan->toggle_ext_pause)
874 chan->toggle_ext_pause(chan, slave->external_pause);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700875
Ira Snydera1c03312010-01-06 13:34:05 +0000876 if (chan->set_request_count)
877 chan->set_request_count(chan, slave->request_count);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700878
879 return &first->async_tx;
880
881fail:
882 /* If first was not set, then we failed to allocate the very first
883 * descriptor, and we're done */
884 if (!first)
885 return NULL;
886
887 /*
888 * First is set, so all of the descriptors we allocated have been added
889 * to first->tx_list, INCLUDING "first" itself. Therefore we
890 * must traverse the list backwards freeing each descriptor in turn
891 *
892 * We're re-using variables for the loop, oh well
893 */
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000894 fsldma_free_desc_list_reverse(chan, &first->tx_list);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700895 return NULL;
896}
897
Linus Walleijc3635c72010-03-26 16:44:01 -0700898static int fsl_dma_device_control(struct dma_chan *dchan,
Linus Walleij05827632010-05-17 16:30:42 -0700899 enum dma_ctrl_cmd cmd, unsigned long arg)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700900{
Ira Snydera1c03312010-01-06 13:34:05 +0000901 struct fsldma_chan *chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700902 unsigned long flags;
903
Linus Walleijc3635c72010-03-26 16:44:01 -0700904 /* Only supports DMA_TERMINATE_ALL */
905 if (cmd != DMA_TERMINATE_ALL)
906 return -ENXIO;
907
Ira Snydera1c03312010-01-06 13:34:05 +0000908 if (!dchan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700909 return -EINVAL;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700910
Ira Snydera1c03312010-01-06 13:34:05 +0000911 chan = to_fsl_chan(dchan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700912
913 /* Halt the DMA engine */
Ira Snydera1c03312010-01-06 13:34:05 +0000914 dma_halt(chan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700915
Ira Snydera1c03312010-01-06 13:34:05 +0000916 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700917
918 /* Remove and free all of the descriptors in the LD queue */
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000919 fsldma_free_desc_list(chan, &chan->ld_pending);
920 fsldma_free_desc_list(chan, &chan->ld_running);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700921
Ira Snydera1c03312010-01-06 13:34:05 +0000922 spin_unlock_irqrestore(&chan->desc_lock, flags);
Linus Walleijc3635c72010-03-26 16:44:01 -0700923
924 return 0;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700925}
926
927/**
Zhang Wei173acc72008-03-01 07:42:48 -0700928 * fsl_dma_update_completed_cookie - Update the completed cookie.
Ira Snydera1c03312010-01-06 13:34:05 +0000929 * @chan : Freescale DMA channel
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000930 *
931 * CONTEXT: hardirq
Zhang Wei173acc72008-03-01 07:42:48 -0700932 */
Ira Snydera1c03312010-01-06 13:34:05 +0000933static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700934{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000935 struct fsl_desc_sw *desc;
936 unsigned long flags;
937 dma_cookie_t cookie;
Zhang Wei173acc72008-03-01 07:42:48 -0700938
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000939 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700940
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000941 if (list_empty(&chan->ld_running)) {
942 dev_dbg(chan->dev, "no running descriptors\n");
943 goto out_unlock;
Zhang Wei173acc72008-03-01 07:42:48 -0700944 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000945
946 /* Get the last descriptor, update the cookie to that */
947 desc = to_fsl_desc(chan->ld_running.prev);
948 if (dma_is_idle(chan))
949 cookie = desc->async_tx.cookie;
Steven J. Magnani76bd0612010-02-28 22:18:16 -0700950 else {
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000951 cookie = desc->async_tx.cookie - 1;
Steven J. Magnani76bd0612010-02-28 22:18:16 -0700952 if (unlikely(cookie < DMA_MIN_COOKIE))
953 cookie = DMA_MAX_COOKIE;
954 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000955
956 chan->completed_cookie = cookie;
957
958out_unlock:
959 spin_unlock_irqrestore(&chan->desc_lock, flags);
960}
961
962/**
963 * fsldma_desc_status - Check the status of a descriptor
964 * @chan: Freescale DMA channel
965 * @desc: DMA SW descriptor
966 *
967 * This function will return the status of the given descriptor
968 */
969static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
970 struct fsl_desc_sw *desc)
971{
972 return dma_async_is_complete(desc->async_tx.cookie,
973 chan->completed_cookie,
974 chan->common.cookie);
Zhang Wei173acc72008-03-01 07:42:48 -0700975}
976
977/**
978 * fsl_chan_ld_cleanup - Clean up link descriptors
Ira Snydera1c03312010-01-06 13:34:05 +0000979 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700980 *
981 * This function clean up the ld_queue of DMA channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700982 */
Ira Snydera1c03312010-01-06 13:34:05 +0000983static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700984{
985 struct fsl_desc_sw *desc, *_desc;
986 unsigned long flags;
987
Ira Snydera1c03312010-01-06 13:34:05 +0000988 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700989
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000990 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
991 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
Zhang Wei173acc72008-03-01 07:42:48 -0700992 dma_async_tx_callback callback;
993 void *callback_param;
994
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000995 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
Zhang Wei173acc72008-03-01 07:42:48 -0700996 break;
997
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000998 /* Remove from the list of running transactions */
Zhang Wei173acc72008-03-01 07:42:48 -0700999 list_del(&desc->node);
1000
Zhang Wei173acc72008-03-01 07:42:48 -07001001 /* Run the link descriptor callback function */
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001002 callback = desc->async_tx.callback;
1003 callback_param = desc->async_tx.callback_param;
Zhang Wei173acc72008-03-01 07:42:48 -07001004 if (callback) {
Ira Snydera1c03312010-01-06 13:34:05 +00001005 spin_unlock_irqrestore(&chan->desc_lock, flags);
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001006 dev_dbg(chan->dev, "LD %p callback\n", desc);
Zhang Wei173acc72008-03-01 07:42:48 -07001007 callback(callback_param);
Ira Snydera1c03312010-01-06 13:34:05 +00001008 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -07001009 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001010
1011 /* Run any dependencies, then free the descriptor */
1012 dma_run_dependencies(&desc->async_tx);
1013 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -07001014 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001015
Ira Snydera1c03312010-01-06 13:34:05 +00001016 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -07001017}
1018
1019/**
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001020 * fsl_chan_xfer_ld_queue - transfer any pending transactions
Ira Snydera1c03312010-01-06 13:34:05 +00001021 * @chan : Freescale DMA channel
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001022 *
1023 * This will make sure that any pending transactions will be run.
1024 * If the DMA controller is idle, it will be started. Otherwise,
1025 * the DMA controller's interrupt handler will start any pending
1026 * transactions when it becomes idle.
Zhang Wei173acc72008-03-01 07:42:48 -07001027 */
Ira Snydera1c03312010-01-06 13:34:05 +00001028static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -07001029{
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001030 struct fsl_desc_sw *desc;
Zhang Wei173acc72008-03-01 07:42:48 -07001031 unsigned long flags;
1032
Ira Snydera1c03312010-01-06 13:34:05 +00001033 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyder138ef012009-05-19 15:42:13 -07001034
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001035 /*
1036 * If the list of pending descriptors is empty, then we
1037 * don't need to do any work at all
1038 */
1039 if (list_empty(&chan->ld_pending)) {
1040 dev_dbg(chan->dev, "no pending LDs\n");
Ira Snyder138ef012009-05-19 15:42:13 -07001041 goto out_unlock;
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001042 }
Zhang Wei173acc72008-03-01 07:42:48 -07001043
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001044 /*
1045 * The DMA controller is not idle, which means the interrupt
1046 * handler will start any queued transactions when it runs
1047 * at the end of the current transaction
1048 */
1049 if (!dma_is_idle(chan)) {
1050 dev_dbg(chan->dev, "DMA controller still busy\n");
1051 goto out_unlock;
1052 }
1053
1054 /*
1055 * TODO:
1056 * make sure the dma_halt() function really un-wedges the
1057 * controller as much as possible
1058 */
Ira Snydera1c03312010-01-06 13:34:05 +00001059 dma_halt(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001060
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001061 /*
1062 * If there are some link descriptors which have not been
1063 * transferred, we need to start the controller
Zhang Wei173acc72008-03-01 07:42:48 -07001064 */
Zhang Wei173acc72008-03-01 07:42:48 -07001065
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001066 /*
1067 * Move all elements from the queue of pending transactions
1068 * onto the list of running transactions
1069 */
1070 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
1071 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
Zhang Wei173acc72008-03-01 07:42:48 -07001072
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001073 /*
1074 * Program the descriptor's address into the DMA controller,
1075 * then start the DMA transaction
1076 */
1077 set_cdar(chan, desc->async_tx.phys);
1078 dma_start(chan);
Ira Snyder138ef012009-05-19 15:42:13 -07001079
1080out_unlock:
Ira Snydera1c03312010-01-06 13:34:05 +00001081 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -07001082}
1083
1084/**
1085 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
Ira Snydera1c03312010-01-06 13:34:05 +00001086 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -07001087 */
Ira Snydera1c03312010-01-06 13:34:05 +00001088static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -07001089{
Ira Snydera1c03312010-01-06 13:34:05 +00001090 struct fsldma_chan *chan = to_fsl_chan(dchan);
Ira Snydera1c03312010-01-06 13:34:05 +00001091 fsl_chan_xfer_ld_queue(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001092}
1093
Zhang Wei173acc72008-03-01 07:42:48 -07001094/**
Linus Walleij07934482010-03-26 16:50:49 -07001095 * fsl_tx_status - Determine the DMA status
Ira Snydera1c03312010-01-06 13:34:05 +00001096 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -07001097 */
Linus Walleij07934482010-03-26 16:50:49 -07001098static enum dma_status fsl_tx_status(struct dma_chan *dchan,
Zhang Wei173acc72008-03-01 07:42:48 -07001099 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -07001100 struct dma_tx_state *txstate)
Zhang Wei173acc72008-03-01 07:42:48 -07001101{
Ira Snydera1c03312010-01-06 13:34:05 +00001102 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -07001103 dma_cookie_t last_used;
1104 dma_cookie_t last_complete;
1105
Ira Snydera1c03312010-01-06 13:34:05 +00001106 fsl_chan_ld_cleanup(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001107
Ira Snydera1c03312010-01-06 13:34:05 +00001108 last_used = dchan->cookie;
1109 last_complete = chan->completed_cookie;
Zhang Wei173acc72008-03-01 07:42:48 -07001110
Dan Williamsbca34692010-03-26 16:52:10 -07001111 dma_set_tx_state(txstate, last_complete, last_used, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001112
1113 return dma_async_is_complete(cookie, last_complete, last_used);
1114}
1115
Ira Snyderd3f620b2010-01-06 13:34:04 +00001116/*----------------------------------------------------------------------------*/
1117/* Interrupt Handling */
1118/*----------------------------------------------------------------------------*/
1119
Ira Snydere7a29152010-01-06 13:34:03 +00001120static irqreturn_t fsldma_chan_irq(int irq, void *data)
Zhang Wei173acc72008-03-01 07:42:48 -07001121{
Ira Snydera1c03312010-01-06 13:34:05 +00001122 struct fsldma_chan *chan = data;
Zhang Wei1c629792008-04-17 20:17:25 -07001123 int update_cookie = 0;
1124 int xfer_ld_q = 0;
Ira Snydera1c03312010-01-06 13:34:05 +00001125 u32 stat;
Zhang Wei173acc72008-03-01 07:42:48 -07001126
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001127 /* save and clear the status register */
Ira Snydera1c03312010-01-06 13:34:05 +00001128 stat = get_sr(chan);
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001129 set_sr(chan, stat);
1130 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
Zhang Wei173acc72008-03-01 07:42:48 -07001131
1132 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1133 if (!stat)
1134 return IRQ_NONE;
1135
1136 if (stat & FSL_DMA_SR_TE)
Ira Snydera1c03312010-01-06 13:34:05 +00001137 dev_err(chan->dev, "Transfer Error!\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001138
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001139 /*
1140 * Programming Error
Zhang Weif79abb62008-03-18 18:45:00 -07001141 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1142 * triger a PE interrupt.
1143 */
1144 if (stat & FSL_DMA_SR_PE) {
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001145 dev_dbg(chan->dev, "irq: Programming Error INT\n");
Ira Snydera1c03312010-01-06 13:34:05 +00001146 if (get_bcr(chan) == 0) {
Zhang Weif79abb62008-03-18 18:45:00 -07001147 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1148 * Now, update the completed cookie, and continue the
1149 * next uncompleted transfer.
1150 */
Zhang Wei1c629792008-04-17 20:17:25 -07001151 update_cookie = 1;
1152 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -07001153 }
1154 stat &= ~FSL_DMA_SR_PE;
1155 }
1156
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001157 /*
1158 * If the link descriptor segment transfer finishes,
Zhang Wei173acc72008-03-01 07:42:48 -07001159 * we will recycle the used descriptor.
1160 */
1161 if (stat & FSL_DMA_SR_EOSI) {
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001162 dev_dbg(chan->dev, "irq: End-of-segments INT\n");
1163 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
Ira Snydera1c03312010-01-06 13:34:05 +00001164 (unsigned long long)get_cdar(chan),
1165 (unsigned long long)get_ndar(chan));
Zhang Wei173acc72008-03-01 07:42:48 -07001166 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -07001167 update_cookie = 1;
1168 }
1169
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001170 /*
1171 * For MPC8349, EOCDI event need to update cookie
Zhang Wei1c629792008-04-17 20:17:25 -07001172 * and start the next transfer if it exist.
1173 */
1174 if (stat & FSL_DMA_SR_EOCDI) {
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001175 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
Zhang Wei1c629792008-04-17 20:17:25 -07001176 stat &= ~FSL_DMA_SR_EOCDI;
1177 update_cookie = 1;
1178 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001179 }
1180
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001181 /*
1182 * If it current transfer is the end-of-transfer,
Zhang Wei173acc72008-03-01 07:42:48 -07001183 * we should clear the Channel Start bit for
1184 * prepare next transfer.
1185 */
Zhang Wei1c629792008-04-17 20:17:25 -07001186 if (stat & FSL_DMA_SR_EOLNI) {
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001187 dev_dbg(chan->dev, "irq: End-of-link INT\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001188 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -07001189 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001190 }
1191
Zhang Wei1c629792008-04-17 20:17:25 -07001192 if (update_cookie)
Ira Snydera1c03312010-01-06 13:34:05 +00001193 fsl_dma_update_completed_cookie(chan);
Zhang Wei1c629792008-04-17 20:17:25 -07001194 if (xfer_ld_q)
Ira Snydera1c03312010-01-06 13:34:05 +00001195 fsl_chan_xfer_ld_queue(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001196 if (stat)
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001197 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
Zhang Wei173acc72008-03-01 07:42:48 -07001198
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001199 dev_dbg(chan->dev, "irq: Exit\n");
Ira Snydera1c03312010-01-06 13:34:05 +00001200 tasklet_schedule(&chan->tasklet);
Zhang Wei173acc72008-03-01 07:42:48 -07001201 return IRQ_HANDLED;
1202}
1203
Zhang Wei173acc72008-03-01 07:42:48 -07001204static void dma_do_tasklet(unsigned long data)
1205{
Ira Snydera1c03312010-01-06 13:34:05 +00001206 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1207 fsl_chan_ld_cleanup(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001208}
1209
Ira Snyderd3f620b2010-01-06 13:34:04 +00001210static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1211{
1212 struct fsldma_device *fdev = data;
1213 struct fsldma_chan *chan;
1214 unsigned int handled = 0;
1215 u32 gsr, mask;
1216 int i;
1217
1218 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1219 : in_le32(fdev->regs);
1220 mask = 0xff000000;
1221 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1222
1223 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1224 chan = fdev->chan[i];
1225 if (!chan)
1226 continue;
1227
1228 if (gsr & mask) {
1229 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1230 fsldma_chan_irq(irq, chan);
1231 handled++;
1232 }
1233
1234 gsr &= ~mask;
1235 mask >>= 8;
1236 }
1237
1238 return IRQ_RETVAL(handled);
1239}
1240
1241static void fsldma_free_irqs(struct fsldma_device *fdev)
1242{
1243 struct fsldma_chan *chan;
1244 int i;
1245
1246 if (fdev->irq != NO_IRQ) {
1247 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1248 free_irq(fdev->irq, fdev);
1249 return;
1250 }
1251
1252 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1253 chan = fdev->chan[i];
1254 if (chan && chan->irq != NO_IRQ) {
1255 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
1256 free_irq(chan->irq, chan);
1257 }
1258 }
1259}
1260
1261static int fsldma_request_irqs(struct fsldma_device *fdev)
1262{
1263 struct fsldma_chan *chan;
1264 int ret;
1265 int i;
1266
1267 /* if we have a per-controller IRQ, use that */
1268 if (fdev->irq != NO_IRQ) {
1269 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1270 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1271 "fsldma-controller", fdev);
1272 return ret;
1273 }
1274
1275 /* no per-controller IRQ, use the per-channel IRQs */
1276 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1277 chan = fdev->chan[i];
1278 if (!chan)
1279 continue;
1280
1281 if (chan->irq == NO_IRQ) {
1282 dev_err(fdev->dev, "no interrupts property defined for "
1283 "DMA channel %d. Please fix your "
1284 "device tree\n", chan->id);
1285 ret = -ENODEV;
1286 goto out_unwind;
1287 }
1288
1289 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
1290 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1291 "fsldma-chan", chan);
1292 if (ret) {
1293 dev_err(fdev->dev, "unable to request IRQ for DMA "
1294 "channel %d\n", chan->id);
1295 goto out_unwind;
1296 }
1297 }
1298
1299 return 0;
1300
1301out_unwind:
1302 for (/* none */; i >= 0; i--) {
1303 chan = fdev->chan[i];
1304 if (!chan)
1305 continue;
1306
1307 if (chan->irq == NO_IRQ)
1308 continue;
1309
1310 free_irq(chan->irq, chan);
1311 }
1312
1313 return ret;
1314}
1315
Ira Snydera4f56d42010-01-06 13:34:01 +00001316/*----------------------------------------------------------------------------*/
1317/* OpenFirmware Subsystem */
1318/*----------------------------------------------------------------------------*/
1319
1320static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001321 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -07001322{
Ira Snydera1c03312010-01-06 13:34:05 +00001323 struct fsldma_chan *chan;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001324 struct resource res;
Zhang Wei173acc72008-03-01 07:42:48 -07001325 int err;
1326
Zhang Wei173acc72008-03-01 07:42:48 -07001327 /* alloc channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001328 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1329 if (!chan) {
Ira Snydere7a29152010-01-06 13:34:03 +00001330 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1331 err = -ENOMEM;
1332 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001333 }
1334
Ira Snydere7a29152010-01-06 13:34:03 +00001335 /* ioremap registers for use */
Ira Snydera1c03312010-01-06 13:34:05 +00001336 chan->regs = of_iomap(node, 0);
1337 if (!chan->regs) {
Ira Snydere7a29152010-01-06 13:34:03 +00001338 dev_err(fdev->dev, "unable to ioremap registers\n");
1339 err = -ENOMEM;
Ira Snydera1c03312010-01-06 13:34:05 +00001340 goto out_free_chan;
Ira Snydere7a29152010-01-06 13:34:03 +00001341 }
1342
Ira Snyder4ce0e952010-01-06 13:34:00 +00001343 err = of_address_to_resource(node, 0, &res);
Zhang Wei173acc72008-03-01 07:42:48 -07001344 if (err) {
Ira Snydere7a29152010-01-06 13:34:03 +00001345 dev_err(fdev->dev, "unable to find 'reg' property\n");
1346 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001347 }
1348
Ira Snydera1c03312010-01-06 13:34:05 +00001349 chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001350 if (!fdev->feature)
Ira Snydera1c03312010-01-06 13:34:05 +00001351 fdev->feature = chan->feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001352
Ira Snydere7a29152010-01-06 13:34:03 +00001353 /*
1354 * If the DMA device's feature is different than the feature
1355 * of its channels, report the bug
Zhang Wei173acc72008-03-01 07:42:48 -07001356 */
Ira Snydera1c03312010-01-06 13:34:05 +00001357 WARN_ON(fdev->feature != chan->feature);
Zhang Wei173acc72008-03-01 07:42:48 -07001358
Ira Snydera1c03312010-01-06 13:34:05 +00001359 chan->dev = fdev->dev;
1360 chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1361 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Ira Snydere7a29152010-01-06 13:34:03 +00001362 dev_err(fdev->dev, "too many channels for device\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001363 err = -EINVAL;
Ira Snydere7a29152010-01-06 13:34:03 +00001364 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001365 }
Zhang Wei173acc72008-03-01 07:42:48 -07001366
Ira Snydera1c03312010-01-06 13:34:05 +00001367 fdev->chan[chan->id] = chan;
1368 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
Ira Snydere7a29152010-01-06 13:34:03 +00001369
1370 /* Initialize the channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001371 dma_init(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001372
1373 /* Clear cdar registers */
Ira Snydera1c03312010-01-06 13:34:05 +00001374 set_cdar(chan, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001375
Ira Snydera1c03312010-01-06 13:34:05 +00001376 switch (chan->feature & FSL_DMA_IP_MASK) {
Zhang Wei173acc72008-03-01 07:42:48 -07001377 case FSL_DMA_IP_85XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001378 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
Zhang Wei173acc72008-03-01 07:42:48 -07001379 case FSL_DMA_IP_83XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001380 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1381 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1382 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1383 chan->set_request_count = fsl_chan_set_request_count;
Zhang Wei173acc72008-03-01 07:42:48 -07001384 }
1385
Ira Snydera1c03312010-01-06 13:34:05 +00001386 spin_lock_init(&chan->desc_lock);
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001387 INIT_LIST_HEAD(&chan->ld_pending);
1388 INIT_LIST_HEAD(&chan->ld_running);
Zhang Wei173acc72008-03-01 07:42:48 -07001389
Ira Snydera1c03312010-01-06 13:34:05 +00001390 chan->common.device = &fdev->common;
Zhang Wei173acc72008-03-01 07:42:48 -07001391
Ira Snyderd3f620b2010-01-06 13:34:04 +00001392 /* find the IRQ line, if it exists in the device tree */
Ira Snydera1c03312010-01-06 13:34:05 +00001393 chan->irq = irq_of_parse_and_map(node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001394
Zhang Wei173acc72008-03-01 07:42:48 -07001395 /* Add the channel to DMA device channel list */
Ira Snydera1c03312010-01-06 13:34:05 +00001396 list_add_tail(&chan->common.device_node, &fdev->common.channels);
Zhang Wei173acc72008-03-01 07:42:48 -07001397 fdev->common.chancnt++;
1398
Ira Snydera1c03312010-01-06 13:34:05 +00001399 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1400 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001401
1402 return 0;
Li Yang51ee87f22008-05-29 23:25:45 -07001403
Ira Snydere7a29152010-01-06 13:34:03 +00001404out_iounmap_regs:
Ira Snydera1c03312010-01-06 13:34:05 +00001405 iounmap(chan->regs);
1406out_free_chan:
1407 kfree(chan);
Ira Snydere7a29152010-01-06 13:34:03 +00001408out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001409 return err;
1410}
1411
Ira Snydera1c03312010-01-06 13:34:05 +00001412static void fsl_dma_chan_remove(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -07001413{
Ira Snydera1c03312010-01-06 13:34:05 +00001414 irq_dispose_mapping(chan->irq);
1415 list_del(&chan->common.device_node);
1416 iounmap(chan->regs);
1417 kfree(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001418}
1419
Grant Likely2dc11582010-08-06 09:25:50 -06001420static int __devinit fsldma_of_probe(struct platform_device *op,
Zhang Wei173acc72008-03-01 07:42:48 -07001421 const struct of_device_id *match)
1422{
Ira Snydera4f56d42010-01-06 13:34:01 +00001423 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001424 struct device_node *child;
Ira Snydere7a29152010-01-06 13:34:03 +00001425 int err;
Zhang Wei173acc72008-03-01 07:42:48 -07001426
Ira Snydera4f56d42010-01-06 13:34:01 +00001427 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
Zhang Wei173acc72008-03-01 07:42:48 -07001428 if (!fdev) {
Ira Snydere7a29152010-01-06 13:34:03 +00001429 dev_err(&op->dev, "No enough memory for 'priv'\n");
1430 err = -ENOMEM;
1431 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001432 }
Ira Snydere7a29152010-01-06 13:34:03 +00001433
1434 fdev->dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001435 INIT_LIST_HEAD(&fdev->common.channels);
1436
Ira Snydere7a29152010-01-06 13:34:03 +00001437 /* ioremap the registers for use */
Grant Likely61c7a082010-04-13 16:12:29 -07001438 fdev->regs = of_iomap(op->dev.of_node, 0);
Ira Snydere7a29152010-01-06 13:34:03 +00001439 if (!fdev->regs) {
1440 dev_err(&op->dev, "unable to ioremap registers\n");
1441 err = -ENOMEM;
1442 goto out_free_fdev;
Zhang Wei173acc72008-03-01 07:42:48 -07001443 }
1444
Ira Snyderd3f620b2010-01-06 13:34:04 +00001445 /* map the channel IRQ if it exists, but don't hookup the handler yet */
Grant Likely61c7a082010-04-13 16:12:29 -07001446 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001447
Zhang Wei173acc72008-03-01 07:42:48 -07001448 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1449 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
Ira Snyderc14330412010-09-30 11:46:45 +00001450 dma_cap_set(DMA_SG, fdev->common.cap_mask);
Ira Snyderbbea0b62009-09-08 17:53:04 -07001451 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
Zhang Wei173acc72008-03-01 07:42:48 -07001452 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1453 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001454 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001455 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
Ira Snyderc14330412010-09-30 11:46:45 +00001456 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
Linus Walleij07934482010-03-26 16:50:49 -07001457 fdev->common.device_tx_status = fsl_tx_status;
Zhang Wei173acc72008-03-01 07:42:48 -07001458 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Ira Snyderbbea0b62009-09-08 17:53:04 -07001459 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001460 fdev->common.device_control = fsl_dma_device_control;
Ira Snydere7a29152010-01-06 13:34:03 +00001461 fdev->common.dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001462
Ira Snydere7a29152010-01-06 13:34:03 +00001463 dev_set_drvdata(&op->dev, fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001464
Ira Snydere7a29152010-01-06 13:34:03 +00001465 /*
1466 * We cannot use of_platform_bus_probe() because there is no
1467 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
Timur Tabi77cd62e2008-09-26 17:00:11 -07001468 * channel object.
1469 */
Grant Likely61c7a082010-04-13 16:12:29 -07001470 for_each_child_of_node(op->dev.of_node, child) {
Ira Snydere7a29152010-01-06 13:34:03 +00001471 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001472 fsl_dma_chan_probe(fdev, child,
1473 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1474 "fsl,eloplus-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001475 }
1476
1477 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001478 fsl_dma_chan_probe(fdev, child,
1479 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1480 "fsl,elo-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001481 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001482 }
Zhang Wei173acc72008-03-01 07:42:48 -07001483
Ira Snyderd3f620b2010-01-06 13:34:04 +00001484 /*
1485 * Hookup the IRQ handler(s)
1486 *
1487 * If we have a per-controller interrupt, we prefer that to the
1488 * per-channel interrupts to reduce the number of shared interrupt
1489 * handlers on the same IRQ line
1490 */
1491 err = fsldma_request_irqs(fdev);
1492 if (err) {
1493 dev_err(fdev->dev, "unable to request IRQs\n");
1494 goto out_free_fdev;
1495 }
1496
Zhang Wei173acc72008-03-01 07:42:48 -07001497 dma_async_device_register(&fdev->common);
1498 return 0;
1499
Ira Snydere7a29152010-01-06 13:34:03 +00001500out_free_fdev:
Ira Snyderd3f620b2010-01-06 13:34:04 +00001501 irq_dispose_mapping(fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001502 kfree(fdev);
Ira Snydere7a29152010-01-06 13:34:03 +00001503out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001504 return err;
1505}
1506
Grant Likely2dc11582010-08-06 09:25:50 -06001507static int fsldma_of_remove(struct platform_device *op)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001508{
Ira Snydera4f56d42010-01-06 13:34:01 +00001509 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001510 unsigned int i;
1511
Ira Snydere7a29152010-01-06 13:34:03 +00001512 fdev = dev_get_drvdata(&op->dev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001513 dma_async_device_unregister(&fdev->common);
1514
Ira Snyderd3f620b2010-01-06 13:34:04 +00001515 fsldma_free_irqs(fdev);
1516
Ira Snydere7a29152010-01-06 13:34:03 +00001517 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001518 if (fdev->chan[i])
1519 fsl_dma_chan_remove(fdev->chan[i]);
Ira Snydere7a29152010-01-06 13:34:03 +00001520 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001521
Ira Snydere7a29152010-01-06 13:34:03 +00001522 iounmap(fdev->regs);
1523 dev_set_drvdata(&op->dev, NULL);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001524 kfree(fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001525
1526 return 0;
1527}
1528
Márton Németh4b1cf1f2010-02-02 23:41:06 -07001529static const struct of_device_id fsldma_of_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001530 { .compatible = "fsl,eloplus-dma", },
1531 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001532 {}
1533};
1534
Ira Snydera4f56d42010-01-06 13:34:01 +00001535static struct of_platform_driver fsldma_of_driver = {
Grant Likely40182942010-04-13 16:13:02 -07001536 .driver = {
1537 .name = "fsl-elo-dma",
1538 .owner = THIS_MODULE,
1539 .of_match_table = fsldma_of_ids,
1540 },
1541 .probe = fsldma_of_probe,
1542 .remove = fsldma_of_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001543};
1544
Ira Snydera4f56d42010-01-06 13:34:01 +00001545/*----------------------------------------------------------------------------*/
1546/* Module Init / Exit */
1547/*----------------------------------------------------------------------------*/
1548
1549static __init int fsldma_init(void)
Zhang Wei173acc72008-03-01 07:42:48 -07001550{
Timur Tabi77cd62e2008-09-26 17:00:11 -07001551 int ret;
1552
1553 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1554
Ira Snydera4f56d42010-01-06 13:34:01 +00001555 ret = of_register_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001556 if (ret)
1557 pr_err("fsldma: failed to register platform driver\n");
1558
1559 return ret;
Zhang Wei173acc72008-03-01 07:42:48 -07001560}
1561
Ira Snydera4f56d42010-01-06 13:34:01 +00001562static void __exit fsldma_exit(void)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001563{
Ira Snydera4f56d42010-01-06 13:34:01 +00001564 of_unregister_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001565}
1566
Ira Snydera4f56d42010-01-06 13:34:01 +00001567subsys_initcall(fsldma_init);
1568module_exit(fsldma_exit);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001569
1570MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1571MODULE_LICENSE("GPL");