blob: 2b3479d850c95fe034a105b4eaeaba5972b317a4 [file] [log] [blame]
Yong Wang0c42bd02010-07-30 16:23:03 +08001/*
2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation
Tomoya MORINAGAe79e72b2011-11-17 16:14:22 +09004 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
Yong Wang0c42bd02010-07-30 16:23:03 +08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/dmaengine.h>
21#include <linux/dma-mapping.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/pch_dma.h>
27
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000028#include "dmaengine.h"
29
Yong Wang0c42bd02010-07-30 16:23:03 +080030#define DRV_NAME "pch-dma"
31
32#define DMA_CTL0_DISABLE 0x0
33#define DMA_CTL0_SG 0x1
34#define DMA_CTL0_ONESHOT 0x2
35#define DMA_CTL0_MODE_MASK_BITS 0x3
36#define DMA_CTL0_DIR_SHIFT_BITS 2
37#define DMA_CTL0_BITS_PER_CH 4
38
39#define DMA_CTL2_START_SHIFT_BITS 8
40#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
41
42#define DMA_STATUS_IDLE 0x0
43#define DMA_STATUS_DESC_READ 0x1
44#define DMA_STATUS_WAIT 0x2
45#define DMA_STATUS_ACCESS 0x3
46#define DMA_STATUS_BITS_PER_CH 2
47#define DMA_STATUS_MASK_BITS 0x3
48#define DMA_STATUS_SHIFT_BITS 16
49#define DMA_STATUS_IRQ(x) (0x1 << (x))
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +090050#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
51#define DMA_STATUS2_ERR(x) (0x1 << (x))
Yong Wang0c42bd02010-07-30 16:23:03 +080052
53#define DMA_DESC_WIDTH_SHIFT_BITS 12
54#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
55#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
56#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
57#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
58#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
59#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
60#define DMA_DESC_END_WITHOUT_IRQ 0x0
61#define DMA_DESC_END_WITH_IRQ 0x1
62#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
63#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
64
Tomoya MORINAGAc43f1502011-10-11 21:43:21 +090065#define MAX_CHAN_NR 12
Yong Wang0c42bd02010-07-30 16:23:03 +080066
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +090067#define DMA_MASK_CTL0_MODE 0x33333333
68#define DMA_MASK_CTL2_MODE 0x00003333
69
Yong Wang0c42bd02010-07-30 16:23:03 +080070static unsigned int init_nr_desc_per_channel = 64;
71module_param(init_nr_desc_per_channel, uint, 0644);
72MODULE_PARM_DESC(init_nr_desc_per_channel,
73 "initial descriptors per channel (default: 64)");
74
75struct pch_dma_desc_regs {
76 u32 dev_addr;
77 u32 mem_addr;
78 u32 size;
79 u32 next;
80};
81
82struct pch_dma_regs {
83 u32 dma_ctl0;
84 u32 dma_ctl1;
85 u32 dma_ctl2;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +090086 u32 dma_ctl3;
Yong Wang0c42bd02010-07-30 16:23:03 +080087 u32 dma_sts0;
88 u32 dma_sts1;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +090089 u32 dma_sts2;
Yong Wang0c42bd02010-07-30 16:23:03 +080090 u32 reserved3;
Tomoya MORINAGA26d890f2011-02-18 10:01:21 +053091 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
Yong Wang0c42bd02010-07-30 16:23:03 +080092};
93
94struct pch_dma_desc {
95 struct pch_dma_desc_regs regs;
96 struct dma_async_tx_descriptor txd;
97 struct list_head desc_node;
98 struct list_head tx_list;
99};
100
101struct pch_dma_chan {
102 struct dma_chan chan;
103 void __iomem *membase;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530104 enum dma_transfer_direction dir;
Yong Wang0c42bd02010-07-30 16:23:03 +0800105 struct tasklet_struct tasklet;
106 unsigned long err_status;
107
108 spinlock_t lock;
109
Yong Wang0c42bd02010-07-30 16:23:03 +0800110 struct list_head active_list;
111 struct list_head queue;
112 struct list_head free_list;
113 unsigned int descs_allocated;
114};
115
116#define PDC_DEV_ADDR 0x00
117#define PDC_MEM_ADDR 0x04
118#define PDC_SIZE 0x08
119#define PDC_NEXT 0x0C
120
121#define channel_readl(pdc, name) \
122 readl((pdc)->membase + PDC_##name)
123#define channel_writel(pdc, name, val) \
124 writel((val), (pdc)->membase + PDC_##name)
125
126struct pch_dma {
127 struct dma_device dma;
128 void __iomem *membase;
129 struct pci_pool *pool;
130 struct pch_dma_regs regs;
131 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
Tomoya MORINAGA26d890f2011-02-18 10:01:21 +0530132 struct pch_dma_chan channels[MAX_CHAN_NR];
Yong Wang0c42bd02010-07-30 16:23:03 +0800133};
134
135#define PCH_DMA_CTL0 0x00
136#define PCH_DMA_CTL1 0x04
137#define PCH_DMA_CTL2 0x08
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900138#define PCH_DMA_CTL3 0x0C
Yong Wang0c42bd02010-07-30 16:23:03 +0800139#define PCH_DMA_STS0 0x10
140#define PCH_DMA_STS1 0x14
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900141#define PCH_DMA_STS2 0x18
Yong Wang0c42bd02010-07-30 16:23:03 +0800142
143#define dma_readl(pd, name) \
Yong Wang61cd2202010-08-05 10:38:43 +0800144 readl((pd)->membase + PCH_DMA_##name)
Yong Wang0c42bd02010-07-30 16:23:03 +0800145#define dma_writel(pd, name, val) \
Yong Wang61cd2202010-08-05 10:38:43 +0800146 writel((val), (pd)->membase + PCH_DMA_##name)
Yong Wang0c42bd02010-07-30 16:23:03 +0800147
Tomoya MORINAGA08645fd2011-05-09 16:09:36 +0900148static inline
149struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
Yong Wang0c42bd02010-07-30 16:23:03 +0800150{
151 return container_of(txd, struct pch_dma_desc, txd);
152}
153
154static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
155{
156 return container_of(chan, struct pch_dma_chan, chan);
157}
158
159static inline struct pch_dma *to_pd(struct dma_device *ddev)
160{
161 return container_of(ddev, struct pch_dma, dma);
162}
163
164static inline struct device *chan2dev(struct dma_chan *chan)
165{
166 return &chan->dev->device;
167}
168
169static inline struct device *chan2parent(struct dma_chan *chan)
170{
171 return chan->dev->device.parent;
172}
173
Tomoya MORINAGA08645fd2011-05-09 16:09:36 +0900174static inline
175struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
Yong Wang0c42bd02010-07-30 16:23:03 +0800176{
177 return list_first_entry(&pd_chan->active_list,
178 struct pch_dma_desc, desc_node);
179}
180
Tomoya MORINAGA08645fd2011-05-09 16:09:36 +0900181static inline
182struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
Yong Wang0c42bd02010-07-30 16:23:03 +0800183{
184 return list_first_entry(&pd_chan->queue,
185 struct pch_dma_desc, desc_node);
186}
187
188static void pdc_enable_irq(struct dma_chan *chan, int enable)
189{
190 struct pch_dma *pd = to_pd(chan->device);
191 u32 val;
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900192 int pos;
193
194 if (chan->chan_id < 8)
195 pos = chan->chan_id;
196 else
197 pos = chan->chan_id + 8;
Yong Wang0c42bd02010-07-30 16:23:03 +0800198
199 val = dma_readl(pd, CTL2);
200
201 if (enable)
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900202 val |= 0x1 << pos;
Yong Wang0c42bd02010-07-30 16:23:03 +0800203 else
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900204 val &= ~(0x1 << pos);
Yong Wang0c42bd02010-07-30 16:23:03 +0800205
206 dma_writel(pd, CTL2, val);
207
208 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
209 chan->chan_id, val);
210}
211
212static void pdc_set_dir(struct dma_chan *chan)
213{
214 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
215 struct pch_dma *pd = to_pd(chan->device);
216 u32 val;
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900217 u32 mask_mode;
218 u32 mask_ctl;
Yong Wang0c42bd02010-07-30 16:23:03 +0800219
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900220 if (chan->chan_id < 8) {
221 val = dma_readl(pd, CTL0);
Yong Wang0c42bd02010-07-30 16:23:03 +0800222
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900223 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
224 (DMA_CTL0_BITS_PER_CH * chan->chan_id);
225 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
226 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
227 val &= mask_mode;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530228 if (pd_chan->dir == DMA_MEM_TO_DEV)
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900229 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
230 DMA_CTL0_DIR_SHIFT_BITS);
231 else
232 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
233 DMA_CTL0_DIR_SHIFT_BITS));
Yong Wang0c42bd02010-07-30 16:23:03 +0800234
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900235 val |= mask_ctl;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900236 dma_writel(pd, CTL0, val);
237 } else {
238 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
239 val = dma_readl(pd, CTL3);
240
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900241 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
242 (DMA_CTL0_BITS_PER_CH * ch);
243 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
244 (DMA_CTL0_BITS_PER_CH * ch));
245 val &= mask_mode;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530246 if (pd_chan->dir == DMA_MEM_TO_DEV)
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900247 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
248 DMA_CTL0_DIR_SHIFT_BITS);
249 else
250 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
251 DMA_CTL0_DIR_SHIFT_BITS));
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900252 val |= mask_ctl;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900253 dma_writel(pd, CTL3, val);
254 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800255
256 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
257 chan->chan_id, val);
258}
259
260static void pdc_set_mode(struct dma_chan *chan, u32 mode)
261{
262 struct pch_dma *pd = to_pd(chan->device);
263 u32 val;
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900264 u32 mask_ctl;
265 u32 mask_dir;
Yong Wang0c42bd02010-07-30 16:23:03 +0800266
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900267 if (chan->chan_id < 8) {
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900268 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
269 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
270 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
271 DMA_CTL0_DIR_SHIFT_BITS);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900272 val = dma_readl(pd, CTL0);
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900273 val &= mask_dir;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900274 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900275 val |= mask_ctl;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900276 dma_writel(pd, CTL0, val);
277 } else {
278 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900279 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
280 (DMA_CTL0_BITS_PER_CH * ch));
281 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
282 DMA_CTL0_DIR_SHIFT_BITS);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900283 val = dma_readl(pd, CTL3);
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900284 val &= mask_dir;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900285 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
Tomoya MORINAGA0b052f42011-07-14 09:52:38 +0900286 val |= mask_ctl;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900287 dma_writel(pd, CTL3, val);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900288 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800289
290 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
291 chan->chan_id, val);
292}
293
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900294static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
Yong Wang0c42bd02010-07-30 16:23:03 +0800295{
296 struct pch_dma *pd = to_pd(pd_chan->chan.device);
297 u32 val;
298
299 val = dma_readl(pd, STS0);
300 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
301 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
302}
303
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900304static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
305{
306 struct pch_dma *pd = to_pd(pd_chan->chan.device);
307 u32 val;
308
309 val = dma_readl(pd, STS2);
310 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
311 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
312}
313
Yong Wang0c42bd02010-07-30 16:23:03 +0800314static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
315{
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900316 u32 sts;
317
318 if (pd_chan->chan.chan_id < 8)
319 sts = pdc_get_status0(pd_chan);
320 else
321 sts = pdc_get_status2(pd_chan);
322
323
324 if (sts == DMA_STATUS_IDLE)
Yong Wang0c42bd02010-07-30 16:23:03 +0800325 return true;
326 else
327 return false;
328}
329
330static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
331{
Yong Wang0c42bd02010-07-30 16:23:03 +0800332 if (!pdc_is_idle(pd_chan)) {
333 dev_err(chan2dev(&pd_chan->chan),
334 "BUG: Attempt to start non-idle channel\n");
335 return;
336 }
337
Yong Wang0c42bd02010-07-30 16:23:03 +0800338 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
339 pd_chan->chan.chan_id, desc->regs.dev_addr);
340 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
341 pd_chan->chan.chan_id, desc->regs.mem_addr);
342 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
343 pd_chan->chan.chan_id, desc->regs.size);
344 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
345 pd_chan->chan.chan_id, desc->regs.next);
346
Tomoya MORINAGA943d8d82010-12-01 19:49:48 +0900347 if (list_empty(&desc->tx_list)) {
348 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
349 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
350 channel_writel(pd_chan, SIZE, desc->regs.size);
351 channel_writel(pd_chan, NEXT, desc->regs.next);
Yong Wang0c42bd02010-07-30 16:23:03 +0800352 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
Tomoya MORINAGA943d8d82010-12-01 19:49:48 +0900353 } else {
354 channel_writel(pd_chan, NEXT, desc->txd.phys);
Yong Wang0c42bd02010-07-30 16:23:03 +0800355 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
Tomoya MORINAGA943d8d82010-12-01 19:49:48 +0900356 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800357}
358
359static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
360 struct pch_dma_desc *desc)
361{
362 struct dma_async_tx_descriptor *txd = &desc->txd;
363 dma_async_tx_callback callback = txd->callback;
364 void *param = txd->callback_param;
365
366 list_splice_init(&desc->tx_list, &pd_chan->free_list);
367 list_move(&desc->desc_node, &pd_chan->free_list);
368
369 if (callback)
370 callback(param);
371}
372
373static void pdc_complete_all(struct pch_dma_chan *pd_chan)
374{
375 struct pch_dma_desc *desc, *_d;
376 LIST_HEAD(list);
377
378 BUG_ON(!pdc_is_idle(pd_chan));
379
380 if (!list_empty(&pd_chan->queue))
381 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
382
383 list_splice_init(&pd_chan->active_list, &list);
384 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
385
386 list_for_each_entry_safe(desc, _d, &list, desc_node)
387 pdc_chain_complete(pd_chan, desc);
388}
389
390static void pdc_handle_error(struct pch_dma_chan *pd_chan)
391{
392 struct pch_dma_desc *bad_desc;
393
394 bad_desc = pdc_first_active(pd_chan);
395 list_del(&bad_desc->desc_node);
396
397 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
398
399 if (!list_empty(&pd_chan->active_list))
400 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
401
402 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
403 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
404 bad_desc->txd.cookie);
405
406 pdc_chain_complete(pd_chan, bad_desc);
407}
408
409static void pdc_advance_work(struct pch_dma_chan *pd_chan)
410{
411 if (list_empty(&pd_chan->active_list) ||
412 list_is_singular(&pd_chan->active_list)) {
413 pdc_complete_all(pd_chan);
414 } else {
415 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
416 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
417 }
418}
419
420static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
421 struct pch_dma_desc *desc)
422{
423 dma_cookie_t cookie = pd_chan->chan.cookie;
424
425 if (++cookie < 0)
426 cookie = 1;
427
428 pd_chan->chan.cookie = cookie;
429 desc->txd.cookie = cookie;
430
431 return cookie;
432}
433
434static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
435{
436 struct pch_dma_desc *desc = to_pd_desc(txd);
437 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
438 dma_cookie_t cookie;
439
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530440 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800441 cookie = pdc_assign_cookie(pd_chan, desc);
442
443 if (list_empty(&pd_chan->active_list)) {
444 list_add_tail(&desc->desc_node, &pd_chan->active_list);
445 pdc_dostart(pd_chan, desc);
446 } else {
447 list_add_tail(&desc->desc_node, &pd_chan->queue);
448 }
449
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530450 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800451 return 0;
452}
453
454static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
455{
456 struct pch_dma_desc *desc = NULL;
457 struct pch_dma *pd = to_pd(chan->device);
458 dma_addr_t addr;
459
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530460 desc = pci_pool_alloc(pd->pool, flags, &addr);
Yong Wang0c42bd02010-07-30 16:23:03 +0800461 if (desc) {
462 memset(desc, 0, sizeof(struct pch_dma_desc));
463 INIT_LIST_HEAD(&desc->tx_list);
464 dma_async_tx_descriptor_init(&desc->txd, chan);
465 desc->txd.tx_submit = pd_tx_submit;
466 desc->txd.flags = DMA_CTRL_ACK;
467 desc->txd.phys = addr;
468 }
469
470 return desc;
471}
472
473static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
474{
475 struct pch_dma_desc *desc, *_d;
476 struct pch_dma_desc *ret = NULL;
Liu Yuan364de772011-04-02 14:20:47 +0800477 int i = 0;
Yong Wang0c42bd02010-07-30 16:23:03 +0800478
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530479 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800480 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
481 i++;
482 if (async_tx_test_ack(&desc->txd)) {
483 list_del(&desc->desc_node);
484 ret = desc;
485 break;
486 }
487 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
488 }
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530489 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800490 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
491
492 if (!ret) {
493 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
494 if (ret) {
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530495 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800496 pd_chan->descs_allocated++;
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530497 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800498 } else {
499 dev_err(chan2dev(&pd_chan->chan),
500 "failed to alloc desc\n");
501 }
502 }
503
504 return ret;
505}
506
507static void pdc_desc_put(struct pch_dma_chan *pd_chan,
508 struct pch_dma_desc *desc)
509{
510 if (desc) {
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530511 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800512 list_splice_init(&desc->tx_list, &pd_chan->free_list);
513 list_add(&desc->desc_node, &pd_chan->free_list);
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530514 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800515 }
516}
517
518static int pd_alloc_chan_resources(struct dma_chan *chan)
519{
520 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
521 struct pch_dma_desc *desc;
522 LIST_HEAD(tmp_list);
523 int i;
524
525 if (!pdc_is_idle(pd_chan)) {
526 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
527 return -EIO;
528 }
529
530 if (!list_empty(&pd_chan->free_list))
531 return pd_chan->descs_allocated;
532
533 for (i = 0; i < init_nr_desc_per_channel; i++) {
534 desc = pdc_alloc_desc(chan, GFP_KERNEL);
535
536 if (!desc) {
537 dev_warn(chan2dev(chan),
538 "Only allocated %d initial descriptors\n", i);
539 break;
540 }
541
542 list_add_tail(&desc->desc_node, &tmp_list);
543 }
544
Alexander Stein70f18912011-06-22 17:05:33 +0200545 spin_lock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800546 list_splice(&tmp_list, &pd_chan->free_list);
547 pd_chan->descs_allocated = i;
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000548 chan->completed_cookie = chan->cookie = 1;
Alexander Stein70f18912011-06-22 17:05:33 +0200549 spin_unlock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800550
551 pdc_enable_irq(chan, 1);
Yong Wang0c42bd02010-07-30 16:23:03 +0800552
553 return pd_chan->descs_allocated;
554}
555
556static void pd_free_chan_resources(struct dma_chan *chan)
557{
558 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
559 struct pch_dma *pd = to_pd(chan->device);
560 struct pch_dma_desc *desc, *_d;
561 LIST_HEAD(tmp_list);
562
563 BUG_ON(!pdc_is_idle(pd_chan));
564 BUG_ON(!list_empty(&pd_chan->active_list));
565 BUG_ON(!list_empty(&pd_chan->queue));
566
Alexander Stein70f18912011-06-22 17:05:33 +0200567 spin_lock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800568 list_splice_init(&pd_chan->free_list, &tmp_list);
569 pd_chan->descs_allocated = 0;
Alexander Stein70f18912011-06-22 17:05:33 +0200570 spin_unlock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800571
572 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
573 pci_pool_free(pd->pool, desc, desc->txd.phys);
574
575 pdc_enable_irq(chan, 0);
576}
577
578static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
579 struct dma_tx_state *txstate)
580{
581 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
582 dma_cookie_t last_used;
583 dma_cookie_t last_completed;
584 int ret;
585
Alexander Stein70f18912011-06-22 17:05:33 +0200586 spin_lock_irq(&pd_chan->lock);
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000587 last_completed = chan->completed_cookie;
Yong Wang0c42bd02010-07-30 16:23:03 +0800588 last_used = chan->cookie;
Alexander Stein70f18912011-06-22 17:05:33 +0200589 spin_unlock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800590
591 ret = dma_async_is_complete(cookie, last_completed, last_used);
592
593 dma_set_tx_state(txstate, last_completed, last_used, 0);
594
595 return ret;
596}
597
598static void pd_issue_pending(struct dma_chan *chan)
599{
600 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
601
602 if (pdc_is_idle(pd_chan)) {
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530603 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800604 pdc_advance_work(pd_chan);
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530605 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800606 }
607}
608
609static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
610 struct scatterlist *sgl, unsigned int sg_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530611 enum dma_transfer_direction direction, unsigned long flags)
Yong Wang0c42bd02010-07-30 16:23:03 +0800612{
613 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
614 struct pch_dma_slave *pd_slave = chan->private;
615 struct pch_dma_desc *first = NULL;
616 struct pch_dma_desc *prev = NULL;
617 struct pch_dma_desc *desc = NULL;
618 struct scatterlist *sg;
619 dma_addr_t reg;
620 int i;
621
622 if (unlikely(!sg_len)) {
623 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
624 return NULL;
625 }
626
Vinod Kouldb8196d2011-10-13 22:34:23 +0530627 if (direction == DMA_DEV_TO_MEM)
Yong Wang0c42bd02010-07-30 16:23:03 +0800628 reg = pd_slave->rx_reg;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530629 else if (direction == DMA_MEM_TO_DEV)
Yong Wang0c42bd02010-07-30 16:23:03 +0800630 reg = pd_slave->tx_reg;
631 else
632 return NULL;
633
Tomoya MORINAGAc8fcba62011-05-09 16:09:35 +0900634 pd_chan->dir = direction;
635 pdc_set_dir(chan);
636
Yong Wang0c42bd02010-07-30 16:23:03 +0800637 for_each_sg(sgl, sg, sg_len, i) {
638 desc = pdc_desc_get(pd_chan);
639
640 if (!desc)
641 goto err_desc_get;
642
643 desc->regs.dev_addr = reg;
644 desc->regs.mem_addr = sg_phys(sg);
645 desc->regs.size = sg_dma_len(sg);
646 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
647
648 switch (pd_slave->width) {
649 case PCH_DMA_WIDTH_1_BYTE:
650 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
651 goto err_desc_get;
652 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
653 break;
654 case PCH_DMA_WIDTH_2_BYTES:
655 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
656 goto err_desc_get;
657 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
658 break;
659 case PCH_DMA_WIDTH_4_BYTES:
660 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
661 goto err_desc_get;
662 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
663 break;
664 default:
665 goto err_desc_get;
666 }
667
Yong Wang0c42bd02010-07-30 16:23:03 +0800668 if (!first) {
669 first = desc;
670 } else {
671 prev->regs.next |= desc->txd.phys;
672 list_add_tail(&desc->desc_node, &first->tx_list);
673 }
674
675 prev = desc;
676 }
677
678 if (flags & DMA_PREP_INTERRUPT)
679 desc->regs.next = DMA_DESC_END_WITH_IRQ;
680 else
681 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
682
683 first->txd.cookie = -EBUSY;
684 desc->txd.flags = flags;
685
686 return &first->txd;
687
688err_desc_get:
689 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
690 pdc_desc_put(pd_chan, first);
691 return NULL;
692}
693
694static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
695 unsigned long arg)
696{
697 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
698 struct pch_dma_desc *desc, *_d;
699 LIST_HEAD(list);
700
701 if (cmd != DMA_TERMINATE_ALL)
702 return -ENXIO;
703
Alexander Stein70f18912011-06-22 17:05:33 +0200704 spin_lock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800705
706 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
707
708 list_splice_init(&pd_chan->active_list, &list);
709 list_splice_init(&pd_chan->queue, &list);
710
711 list_for_each_entry_safe(desc, _d, &list, desc_node)
712 pdc_chain_complete(pd_chan, desc);
713
Alexander Stein70f18912011-06-22 17:05:33 +0200714 spin_unlock_irq(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800715
Yong Wang0c42bd02010-07-30 16:23:03 +0800716 return 0;
717}
718
719static void pdc_tasklet(unsigned long data)
720{
721 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530722 unsigned long flags;
Yong Wang0c42bd02010-07-30 16:23:03 +0800723
724 if (!pdc_is_idle(pd_chan)) {
725 dev_err(chan2dev(&pd_chan->chan),
726 "BUG: handle non-idle channel in tasklet\n");
727 return;
728 }
729
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530730 spin_lock_irqsave(&pd_chan->lock, flags);
Yong Wang0c42bd02010-07-30 16:23:03 +0800731 if (test_and_clear_bit(0, &pd_chan->err_status))
732 pdc_handle_error(pd_chan);
733 else
734 pdc_advance_work(pd_chan);
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530735 spin_unlock_irqrestore(&pd_chan->lock, flags);
Yong Wang0c42bd02010-07-30 16:23:03 +0800736}
737
738static irqreturn_t pd_irq(int irq, void *devid)
739{
740 struct pch_dma *pd = (struct pch_dma *)devid;
741 struct pch_dma_chan *pd_chan;
742 u32 sts0;
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900743 u32 sts2;
Yong Wang0c42bd02010-07-30 16:23:03 +0800744 int i;
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900745 int ret0 = IRQ_NONE;
746 int ret2 = IRQ_NONE;
Yong Wang0c42bd02010-07-30 16:23:03 +0800747
748 sts0 = dma_readl(pd, STS0);
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900749 sts2 = dma_readl(pd, STS2);
Yong Wang0c42bd02010-07-30 16:23:03 +0800750
751 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
752
753 for (i = 0; i < pd->dma.chancnt; i++) {
754 pd_chan = &pd->channels[i];
755
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900756 if (i < 8) {
757 if (sts0 & DMA_STATUS_IRQ(i)) {
758 if (sts0 & DMA_STATUS0_ERR(i))
759 set_bit(0, &pd_chan->err_status);
Yong Wang0c42bd02010-07-30 16:23:03 +0800760
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900761 tasklet_schedule(&pd_chan->tasklet);
762 ret0 = IRQ_HANDLED;
763 }
764 } else {
765 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
766 if (sts2 & DMA_STATUS2_ERR(i))
767 set_bit(0, &pd_chan->err_status);
768
769 tasklet_schedule(&pd_chan->tasklet);
770 ret2 = IRQ_HANDLED;
771 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800772 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800773 }
774
775 /* clear interrupt bits in status register */
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900776 if (ret0)
777 dma_writel(pd, STS0, sts0);
778 if (ret2)
779 dma_writel(pd, STS2, sts2);
Yong Wang0c42bd02010-07-30 16:23:03 +0800780
Tomoya MORINAGAc3d49132011-05-31 10:34:45 +0900781 return ret0 | ret2;
Yong Wang0c42bd02010-07-30 16:23:03 +0800782}
783
Rakib Mullick0b863b32011-03-06 17:26:10 +0600784#ifdef CONFIG_PM
Yong Wang0c42bd02010-07-30 16:23:03 +0800785static void pch_dma_save_regs(struct pch_dma *pd)
786{
787 struct pch_dma_chan *pd_chan;
788 struct dma_chan *chan, *_c;
789 int i = 0;
790
791 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
792 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
793 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900794 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
Yong Wang0c42bd02010-07-30 16:23:03 +0800795
796 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
797 pd_chan = to_pd_chan(chan);
798
799 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
800 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
801 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
802 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
803
804 i++;
805 }
806}
807
808static void pch_dma_restore_regs(struct pch_dma *pd)
809{
810 struct pch_dma_chan *pd_chan;
811 struct dma_chan *chan, *_c;
812 int i = 0;
813
814 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
815 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
816 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900817 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
Yong Wang0c42bd02010-07-30 16:23:03 +0800818
819 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
820 pd_chan = to_pd_chan(chan);
821
822 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
823 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
824 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
825 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
826
827 i++;
828 }
829}
830
831static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
832{
833 struct pch_dma *pd = pci_get_drvdata(pdev);
834
835 if (pd)
836 pch_dma_save_regs(pd);
837
838 pci_save_state(pdev);
839 pci_disable_device(pdev);
840 pci_set_power_state(pdev, pci_choose_state(pdev, state));
841
842 return 0;
843}
844
845static int pch_dma_resume(struct pci_dev *pdev)
846{
847 struct pch_dma *pd = pci_get_drvdata(pdev);
848 int err;
849
850 pci_set_power_state(pdev, PCI_D0);
851 pci_restore_state(pdev);
852
853 err = pci_enable_device(pdev);
854 if (err) {
855 dev_dbg(&pdev->dev, "failed to enable device\n");
856 return err;
857 }
858
859 if (pd)
860 pch_dma_restore_regs(pd);
861
862 return 0;
863}
Rakib Mullick0b863b32011-03-06 17:26:10 +0600864#endif
Yong Wang0c42bd02010-07-30 16:23:03 +0800865
866static int __devinit pch_dma_probe(struct pci_dev *pdev,
867 const struct pci_device_id *id)
868{
869 struct pch_dma *pd;
870 struct pch_dma_regs *regs;
871 unsigned int nr_channels;
872 int err;
873 int i;
874
875 nr_channels = id->driver_data;
Tomoya MORINAGA01631243d2011-10-12 09:38:35 +0900876 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
Yong Wang0c42bd02010-07-30 16:23:03 +0800877 if (!pd)
878 return -ENOMEM;
879
880 pci_set_drvdata(pdev, pd);
881
882 err = pci_enable_device(pdev);
883 if (err) {
884 dev_err(&pdev->dev, "Cannot enable PCI device\n");
885 goto err_free_mem;
886 }
887
888 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
889 dev_err(&pdev->dev, "Cannot find proper base address\n");
890 goto err_disable_pdev;
891 }
892
893 err = pci_request_regions(pdev, DRV_NAME);
894 if (err) {
895 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
896 goto err_disable_pdev;
897 }
898
899 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
900 if (err) {
901 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
902 goto err_free_res;
903 }
904
905 regs = pd->membase = pci_iomap(pdev, 1, 0);
906 if (!pd->membase) {
907 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
908 err = -ENOMEM;
909 goto err_free_res;
910 }
911
912 pci_set_master(pdev);
913
914 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
915 if (err) {
916 dev_err(&pdev->dev, "Failed to request IRQ\n");
917 goto err_iounmap;
918 }
919
920 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
921 sizeof(struct pch_dma_desc), 4, 0);
922 if (!pd->pool) {
923 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
924 err = -ENOMEM;
925 goto err_free_irq;
926 }
927
928 pd->dma.dev = &pdev->dev;
Yong Wang0c42bd02010-07-30 16:23:03 +0800929
930 INIT_LIST_HEAD(&pd->dma.channels);
931
932 for (i = 0; i < nr_channels; i++) {
933 struct pch_dma_chan *pd_chan = &pd->channels[i];
934
935 pd_chan->chan.device = &pd->dma;
936 pd_chan->chan.cookie = 1;
Yong Wang0c42bd02010-07-30 16:23:03 +0800937
938 pd_chan->membase = &regs->desc[i];
939
Yong Wang0c42bd02010-07-30 16:23:03 +0800940 spin_lock_init(&pd_chan->lock);
941
942 INIT_LIST_HEAD(&pd_chan->active_list);
943 INIT_LIST_HEAD(&pd_chan->queue);
944 INIT_LIST_HEAD(&pd_chan->free_list);
945
946 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
947 (unsigned long)pd_chan);
948 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
949 }
950
951 dma_cap_zero(pd->dma.cap_mask);
952 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
953 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
954
955 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
956 pd->dma.device_free_chan_resources = pd_free_chan_resources;
957 pd->dma.device_tx_status = pd_tx_status;
958 pd->dma.device_issue_pending = pd_issue_pending;
959 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
960 pd->dma.device_control = pd_device_control;
961
962 err = dma_async_device_register(&pd->dma);
963 if (err) {
964 dev_err(&pdev->dev, "Failed to register DMA device\n");
965 goto err_free_pool;
966 }
967
968 return 0;
969
970err_free_pool:
971 pci_pool_destroy(pd->pool);
972err_free_irq:
973 free_irq(pdev->irq, pd);
974err_iounmap:
975 pci_iounmap(pdev, pd->membase);
976err_free_res:
977 pci_release_regions(pdev);
978err_disable_pdev:
979 pci_disable_device(pdev);
980err_free_mem:
981 return err;
982}
983
984static void __devexit pch_dma_remove(struct pci_dev *pdev)
985{
986 struct pch_dma *pd = pci_get_drvdata(pdev);
987 struct pch_dma_chan *pd_chan;
988 struct dma_chan *chan, *_c;
989
990 if (pd) {
991 dma_async_device_unregister(&pd->dma);
992
993 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
994 device_node) {
995 pd_chan = to_pd_chan(chan);
996
997 tasklet_disable(&pd_chan->tasklet);
998 tasklet_kill(&pd_chan->tasklet);
999 }
1000
1001 pci_pool_destroy(pd->pool);
1002 free_irq(pdev->irq, pd);
1003 pci_iounmap(pdev, pd->membase);
1004 pci_release_regions(pdev);
1005 pci_disable_device(pdev);
1006 kfree(pd);
1007 }
1008}
1009
1010/* PCI Device ID of DMA device */
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +09001011#define PCI_VENDOR_ID_ROHM 0x10DB
1012#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
1013#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
1014#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
1015#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
1016#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +09001017#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
Tomoya MORINAGAc0dfc042011-05-09 16:09:39 +09001018#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
1019#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
1020#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
1021#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
Tomoya MORINAGAca7fe2d2011-11-17 16:14:23 +09001022#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
1023#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
Yong Wang0c42bd02010-07-30 16:23:03 +08001024
Tomoya MORINAGAeb8590b2011-05-09 16:09:40 +09001025DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +09001026 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1027 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1028 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1029 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1030 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +09001031 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
Tomoya MORINAGAc0dfc042011-05-09 16:09:39 +09001032 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1033 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1034 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1035 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
Tomoya MORINAGAca7fe2d2011-11-17 16:14:23 +09001036 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1037 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
Dzianis Kahanovich87acf5a2010-10-27 20:33:05 -06001038 { 0, },
Yong Wang0c42bd02010-07-30 16:23:03 +08001039};
1040
1041static struct pci_driver pch_dma_driver = {
1042 .name = DRV_NAME,
1043 .id_table = pch_dma_id_table,
1044 .probe = pch_dma_probe,
1045 .remove = __devexit_p(pch_dma_remove),
1046#ifdef CONFIG_PM
1047 .suspend = pch_dma_suspend,
1048 .resume = pch_dma_resume,
1049#endif
1050};
1051
1052static int __init pch_dma_init(void)
1053{
1054 return pci_register_driver(&pch_dma_driver);
1055}
1056
1057static void __exit pch_dma_exit(void)
1058{
1059 pci_unregister_driver(&pch_dma_driver);
1060}
1061
1062module_init(pch_dma_init);
1063module_exit(pch_dma_exit);
1064
Tomoya MORINAGAca7fe2d2011-11-17 16:14:23 +09001065MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +09001066 "DMA controller driver");
Yong Wang0c42bd02010-07-30 16:23:03 +08001067MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1068MODULE_LICENSE("GPL v2");