blob: 99c31a76e74efa086747ed5d9bf72fab499d1f59 [file] [log] [blame]
Jassi Brarb3040e42010-05-23 20:28:19 -07001/* linux/drivers/dma/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h>
Boojin Kima2f52032011-09-02 09:44:29 +090020#include <linux/pm_runtime.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090021#include <linux/scatterlist.h>
Thomas Abraham93ed5542011-10-24 11:43:31 +020022#include <linux/of.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070023
24#define NR_DEFAULT_DESC 16
25
26enum desc_status {
27 /* In the DMAC pool */
28 FREE,
29 /*
30 * Allocted to some channel during prep_xxx
31 * Also may be sitting on the work_list.
32 */
33 PREP,
34 /*
35 * Sitting on the work_list and already submitted
36 * to the PL330 core. Not more than two descriptors
37 * of a channel can be BUSY at any time.
38 */
39 BUSY,
40 /*
41 * Sitting on the channel work_list but xfer done
42 * by PL330 core
43 */
44 DONE,
45};
46
47struct dma_pl330_chan {
48 /* Schedule desc completion */
49 struct tasklet_struct task;
50
51 /* DMA-Engine Channel */
52 struct dma_chan chan;
53
Jassi Brarb3040e42010-05-23 20:28:19 -070054 /* List of to be xfered descriptors */
55 struct list_head work_list;
56
57 /* Pointer to the DMAC that manages this channel,
58 * NULL if the channel is available to be acquired.
59 * As the parent, this DMAC also provides descriptors
60 * to the channel.
61 */
62 struct dma_pl330_dmac *dmac;
63
64 /* To protect channel manipulation */
65 spinlock_t lock;
66
67 /* Token of a hardware channel thread of PL330 DMAC
68 * NULL if the channel is available to be acquired.
69 */
70 void *pl330_chid;
Boojin Kim1b9bb712011-09-02 09:44:30 +090071
72 /* For D-to-M and M-to-D channels */
73 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +090074 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +090075 dma_addr_t fifo_addr;
Boojin Kim42bc9cf2011-09-02 09:44:33 +090076
77 /* for cyclic capability */
78 bool cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -070079};
80
81struct dma_pl330_dmac {
82 struct pl330_info pif;
83
84 /* DMA-Engine Device */
85 struct dma_device ddma;
86
87 /* Pool of descriptors available for the DMAC's channels */
88 struct list_head desc_pool;
89 /* To protect desc_pool manipulation */
90 spinlock_t pool_lock;
91
92 /* Peripheral channels connected to this DMAC */
Rob Herring4e0e6102011-07-25 16:05:04 -050093 struct dma_pl330_chan *peripherals; /* keep at end */
Boojin Kima2f52032011-09-02 09:44:29 +090094
95 struct clk *clk;
Jassi Brarb3040e42010-05-23 20:28:19 -070096};
97
98struct dma_pl330_desc {
99 /* To attach to a queue as child */
100 struct list_head node;
101
102 /* Descriptor for the DMA Engine API */
103 struct dma_async_tx_descriptor txd;
104
105 /* Xfer for PL330 core */
106 struct pl330_xfer px;
107
108 struct pl330_reqcfg rqcfg;
109 struct pl330_req req;
110
111 enum desc_status status;
112
113 /* The channel which currently holds this desc */
114 struct dma_pl330_chan *pchan;
115};
116
Thomas Abraham3e2ec132011-10-24 11:43:02 +0200117/* forward declaration */
118static struct amba_driver pl330_driver;
119
Jassi Brarb3040e42010-05-23 20:28:19 -0700120static inline struct dma_pl330_chan *
121to_pchan(struct dma_chan *ch)
122{
123 if (!ch)
124 return NULL;
125
126 return container_of(ch, struct dma_pl330_chan, chan);
127}
128
129static inline struct dma_pl330_desc *
130to_desc(struct dma_async_tx_descriptor *tx)
131{
132 return container_of(tx, struct dma_pl330_desc, txd);
133}
134
135static inline void free_desc_list(struct list_head *list)
136{
137 struct dma_pl330_dmac *pdmac;
138 struct dma_pl330_desc *desc;
139 struct dma_pl330_chan *pch;
140 unsigned long flags;
141
142 if (list_empty(list))
143 return;
144
145 /* Finish off the work list */
146 list_for_each_entry(desc, list, node) {
147 dma_async_tx_callback callback;
148 void *param;
149
150 /* All desc in a list belong to same channel */
151 pch = desc->pchan;
152 callback = desc->txd.callback;
153 param = desc->txd.callback_param;
154
155 if (callback)
156 callback(param);
157
158 desc->pchan = NULL;
159 }
160
161 pdmac = pch->dmac;
162
163 spin_lock_irqsave(&pdmac->pool_lock, flags);
164 list_splice_tail_init(list, &pdmac->desc_pool);
165 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
166}
167
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900168static inline void handle_cyclic_desc_list(struct list_head *list)
169{
170 struct dma_pl330_desc *desc;
171 struct dma_pl330_chan *pch;
172 unsigned long flags;
173
174 if (list_empty(list))
175 return;
176
177 list_for_each_entry(desc, list, node) {
178 dma_async_tx_callback callback;
179
180 /* Change status to reload it */
181 desc->status = PREP;
182 pch = desc->pchan;
183 callback = desc->txd.callback;
184 if (callback)
185 callback(desc->txd.callback_param);
186 }
187
188 spin_lock_irqsave(&pch->lock, flags);
189 list_splice_tail_init(list, &pch->work_list);
190 spin_unlock_irqrestore(&pch->lock, flags);
191}
192
Jassi Brarb3040e42010-05-23 20:28:19 -0700193static inline void fill_queue(struct dma_pl330_chan *pch)
194{
195 struct dma_pl330_desc *desc;
196 int ret;
197
198 list_for_each_entry(desc, &pch->work_list, node) {
199
200 /* If already submitted */
201 if (desc->status == BUSY)
202 break;
203
204 ret = pl330_submit_req(pch->pl330_chid,
205 &desc->req);
206 if (!ret) {
207 desc->status = BUSY;
208 break;
209 } else if (ret == -EAGAIN) {
210 /* QFull or DMAC Dying */
211 break;
212 } else {
213 /* Unacceptable request */
214 desc->status = DONE;
215 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
216 __func__, __LINE__, desc->txd.cookie);
217 tasklet_schedule(&pch->task);
218 }
219 }
220}
221
222static void pl330_tasklet(unsigned long data)
223{
224 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
225 struct dma_pl330_desc *desc, *_dt;
226 unsigned long flags;
227 LIST_HEAD(list);
228
229 spin_lock_irqsave(&pch->lock, flags);
230
231 /* Pick up ripe tomatoes */
232 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
233 if (desc->status == DONE) {
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000234 pch->chan.completed_cookie = desc->txd.cookie;
Jassi Brarb3040e42010-05-23 20:28:19 -0700235 list_move_tail(&desc->node, &list);
236 }
237
238 /* Try to submit a req imm. next to the last completed cookie */
239 fill_queue(pch);
240
241 /* Make sure the PL330 Channel thread is active */
242 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
243
244 spin_unlock_irqrestore(&pch->lock, flags);
245
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900246 if (pch->cyclic)
247 handle_cyclic_desc_list(&list);
248 else
249 free_desc_list(&list);
Jassi Brarb3040e42010-05-23 20:28:19 -0700250}
251
252static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
253{
254 struct dma_pl330_desc *desc = token;
255 struct dma_pl330_chan *pch = desc->pchan;
256 unsigned long flags;
257
258 /* If desc aborted */
259 if (!pch)
260 return;
261
262 spin_lock_irqsave(&pch->lock, flags);
263
264 desc->status = DONE;
265
266 spin_unlock_irqrestore(&pch->lock, flags);
267
268 tasklet_schedule(&pch->task);
269}
270
Thomas Abraham3e2ec132011-10-24 11:43:02 +0200271bool pl330_filter(struct dma_chan *chan, void *param)
272{
Thomas Abrahamcd072512011-10-24 11:43:11 +0200273 u8 *peri_id;
Thomas Abraham3e2ec132011-10-24 11:43:02 +0200274
275 if (chan->device->dev->driver != &pl330_driver.drv)
276 return false;
277
Thomas Abraham93ed5542011-10-24 11:43:31 +0200278#ifdef CONFIG_OF
279 if (chan->device->dev->of_node) {
280 const __be32 *prop_value;
281 phandle phandle;
282 struct device_node *node;
283
284 prop_value = ((struct property *)param)->value;
285 phandle = be32_to_cpup(prop_value++);
286 node = of_find_node_by_phandle(phandle);
287 return ((chan->private == node) &&
288 (chan->chan_id == be32_to_cpup(prop_value)));
289 }
290#endif
291
Thomas Abrahamcd072512011-10-24 11:43:11 +0200292 peri_id = chan->private;
293 return *peri_id == (unsigned)param;
Thomas Abraham3e2ec132011-10-24 11:43:02 +0200294}
295EXPORT_SYMBOL(pl330_filter);
296
Jassi Brarb3040e42010-05-23 20:28:19 -0700297static int pl330_alloc_chan_resources(struct dma_chan *chan)
298{
299 struct dma_pl330_chan *pch = to_pchan(chan);
300 struct dma_pl330_dmac *pdmac = pch->dmac;
301 unsigned long flags;
302
303 spin_lock_irqsave(&pch->lock, flags);
304
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000305 chan->completed_cookie = chan->cookie = 1;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900306 pch->cyclic = false;
Jassi Brarb3040e42010-05-23 20:28:19 -0700307
308 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
309 if (!pch->pl330_chid) {
310 spin_unlock_irqrestore(&pch->lock, flags);
311 return 0;
312 }
313
314 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
315
316 spin_unlock_irqrestore(&pch->lock, flags);
317
318 return 1;
319}
320
321static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
322{
323 struct dma_pl330_chan *pch = to_pchan(chan);
Boojin Kimae43b882011-09-02 09:44:32 +0900324 struct dma_pl330_desc *desc, *_dt;
Jassi Brarb3040e42010-05-23 20:28:19 -0700325 unsigned long flags;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900326 struct dma_pl330_dmac *pdmac = pch->dmac;
327 struct dma_slave_config *slave_config;
Boojin Kimae43b882011-09-02 09:44:32 +0900328 LIST_HEAD(list);
Jassi Brarb3040e42010-05-23 20:28:19 -0700329
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900330 switch (cmd) {
331 case DMA_TERMINATE_ALL:
332 spin_lock_irqsave(&pch->lock, flags);
333
334 /* FLUSH the PL330 Channel thread */
335 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
336
337 /* Mark all desc done */
Boojin Kimae43b882011-09-02 09:44:32 +0900338 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900339 desc->status = DONE;
Boojin Kimae43b882011-09-02 09:44:32 +0900340 pch->completed = desc->txd.cookie;
341 list_move_tail(&desc->node, &list);
342 }
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900343
Boojin Kimae43b882011-09-02 09:44:32 +0900344 list_splice_tail_init(&list, &pdmac->desc_pool);
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900345 spin_unlock_irqrestore(&pch->lock, flags);
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900346 break;
347 case DMA_SLAVE_CONFIG:
348 slave_config = (struct dma_slave_config *)arg;
349
Vinod Kouldb8196d2011-10-13 22:34:23 +0530350 if (slave_config->direction == DMA_MEM_TO_DEV) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900351 if (slave_config->dst_addr)
352 pch->fifo_addr = slave_config->dst_addr;
353 if (slave_config->dst_addr_width)
354 pch->burst_sz = __ffs(slave_config->dst_addr_width);
355 if (slave_config->dst_maxburst)
356 pch->burst_len = slave_config->dst_maxburst;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530357 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900358 if (slave_config->src_addr)
359 pch->fifo_addr = slave_config->src_addr;
360 if (slave_config->src_addr_width)
361 pch->burst_sz = __ffs(slave_config->src_addr_width);
362 if (slave_config->src_maxburst)
363 pch->burst_len = slave_config->src_maxburst;
364 }
365 break;
366 default:
367 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
Jassi Brarb3040e42010-05-23 20:28:19 -0700368 return -ENXIO;
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900369 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700370
371 return 0;
372}
373
374static void pl330_free_chan_resources(struct dma_chan *chan)
375{
376 struct dma_pl330_chan *pch = to_pchan(chan);
377 unsigned long flags;
378
379 spin_lock_irqsave(&pch->lock, flags);
380
381 tasklet_kill(&pch->task);
382
383 pl330_release_channel(pch->pl330_chid);
384 pch->pl330_chid = NULL;
385
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900386 if (pch->cyclic)
387 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
388
Jassi Brarb3040e42010-05-23 20:28:19 -0700389 spin_unlock_irqrestore(&pch->lock, flags);
390}
391
392static enum dma_status
393pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
394 struct dma_tx_state *txstate)
395{
396 struct dma_pl330_chan *pch = to_pchan(chan);
397 dma_cookie_t last_done, last_used;
398 int ret;
399
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000400 last_done = chan->completed_cookie;
Jassi Brarb3040e42010-05-23 20:28:19 -0700401 last_used = chan->cookie;
402
403 ret = dma_async_is_complete(cookie, last_done, last_used);
404
405 dma_set_tx_state(txstate, last_done, last_used, 0);
406
407 return ret;
408}
409
410static void pl330_issue_pending(struct dma_chan *chan)
411{
412 pl330_tasklet((unsigned long) to_pchan(chan));
413}
414
415/*
416 * We returned the last one of the circular list of descriptor(s)
417 * from prep_xxx, so the argument to submit corresponds to the last
418 * descriptor of the list.
419 */
420static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
421{
422 struct dma_pl330_desc *desc, *last = to_desc(tx);
423 struct dma_pl330_chan *pch = to_pchan(tx->chan);
424 dma_cookie_t cookie;
425 unsigned long flags;
426
427 spin_lock_irqsave(&pch->lock, flags);
428
429 /* Assign cookies to all nodes */
430 cookie = tx->chan->cookie;
431
432 while (!list_empty(&last->node)) {
433 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
434
435 if (++cookie < 0)
436 cookie = 1;
437 desc->txd.cookie = cookie;
438
439 list_move_tail(&desc->node, &pch->work_list);
440 }
441
442 if (++cookie < 0)
443 cookie = 1;
444 last->txd.cookie = cookie;
445
446 list_add_tail(&last->node, &pch->work_list);
447
448 tx->chan->cookie = cookie;
449
450 spin_unlock_irqrestore(&pch->lock, flags);
451
452 return cookie;
453}
454
455static inline void _init_desc(struct dma_pl330_desc *desc)
456{
457 desc->pchan = NULL;
458 desc->req.x = &desc->px;
459 desc->req.token = desc;
460 desc->rqcfg.swap = SWAP_NO;
461 desc->rqcfg.privileged = 0;
462 desc->rqcfg.insnaccess = 0;
463 desc->rqcfg.scctl = SCCTRL0;
464 desc->rqcfg.dcctl = DCCTRL0;
465 desc->req.cfg = &desc->rqcfg;
466 desc->req.xfer_cb = dma_pl330_rqcb;
467 desc->txd.tx_submit = pl330_tx_submit;
468
469 INIT_LIST_HEAD(&desc->node);
470}
471
472/* Returns the number of descriptors added to the DMAC pool */
473int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
474{
475 struct dma_pl330_desc *desc;
476 unsigned long flags;
477 int i;
478
479 if (!pdmac)
480 return 0;
481
482 desc = kmalloc(count * sizeof(*desc), flg);
483 if (!desc)
484 return 0;
485
486 spin_lock_irqsave(&pdmac->pool_lock, flags);
487
488 for (i = 0; i < count; i++) {
489 _init_desc(&desc[i]);
490 list_add_tail(&desc[i].node, &pdmac->desc_pool);
491 }
492
493 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
494
495 return count;
496}
497
498static struct dma_pl330_desc *
499pluck_desc(struct dma_pl330_dmac *pdmac)
500{
501 struct dma_pl330_desc *desc = NULL;
502 unsigned long flags;
503
504 if (!pdmac)
505 return NULL;
506
507 spin_lock_irqsave(&pdmac->pool_lock, flags);
508
509 if (!list_empty(&pdmac->desc_pool)) {
510 desc = list_entry(pdmac->desc_pool.next,
511 struct dma_pl330_desc, node);
512
513 list_del_init(&desc->node);
514
515 desc->status = PREP;
516 desc->txd.callback = NULL;
517 }
518
519 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
520
521 return desc;
522}
523
524static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
525{
526 struct dma_pl330_dmac *pdmac = pch->dmac;
Thomas Abrahamcd072512011-10-24 11:43:11 +0200527 u8 *peri_id = pch->chan.private;
Jassi Brarb3040e42010-05-23 20:28:19 -0700528 struct dma_pl330_desc *desc;
529
530 /* Pluck one desc from the pool of DMAC */
531 desc = pluck_desc(pdmac);
532
533 /* If the DMAC pool is empty, alloc new */
534 if (!desc) {
535 if (!add_desc(pdmac, GFP_ATOMIC, 1))
536 return NULL;
537
538 /* Try again */
539 desc = pluck_desc(pdmac);
540 if (!desc) {
541 dev_err(pch->dmac->pif.dev,
542 "%s:%d ALERT!\n", __func__, __LINE__);
543 return NULL;
544 }
545 }
546
547 /* Initialize the descriptor */
548 desc->pchan = pch;
549 desc->txd.cookie = 0;
550 async_tx_ack(&desc->txd);
551
Thomas Abrahamcd072512011-10-24 11:43:11 +0200552 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -0700553
554 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
555
556 return desc;
557}
558
559static inline void fill_px(struct pl330_xfer *px,
560 dma_addr_t dst, dma_addr_t src, size_t len)
561{
562 px->next = NULL;
563 px->bytes = len;
564 px->dst_addr = dst;
565 px->src_addr = src;
566}
567
568static struct dma_pl330_desc *
569__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
570 dma_addr_t src, size_t len)
571{
572 struct dma_pl330_desc *desc = pl330_get_desc(pch);
573
574 if (!desc) {
575 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
576 __func__, __LINE__);
577 return NULL;
578 }
579
580 /*
581 * Ideally we should lookout for reqs bigger than
582 * those that can be programmed with 256 bytes of
583 * MC buffer, but considering a req size is seldom
584 * going to be word-unaligned and more than 200MB,
585 * we take it easy.
586 * Also, should the limit is reached we'd rather
587 * have the platform increase MC buffer size than
588 * complicating this API driver.
589 */
590 fill_px(&desc->px, dst, src, len);
591
592 return desc;
593}
594
595/* Call after fixing burst size */
596static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
597{
598 struct dma_pl330_chan *pch = desc->pchan;
599 struct pl330_info *pi = &pch->dmac->pif;
600 int burst_len;
601
602 burst_len = pi->pcfg.data_bus_width / 8;
603 burst_len *= pi->pcfg.data_buf_dep;
604 burst_len >>= desc->rqcfg.brst_size;
605
606 /* src/dst_burst_len can't be more than 16 */
607 if (burst_len > 16)
608 burst_len = 16;
609
610 while (burst_len > 1) {
611 if (!(len % (burst_len << desc->rqcfg.brst_size)))
612 break;
613 burst_len--;
614 }
615
616 return burst_len;
617}
618
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900619static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
620 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530621 size_t period_len, enum dma_transfer_direction direction)
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900622{
623 struct dma_pl330_desc *desc;
624 struct dma_pl330_chan *pch = to_pchan(chan);
625 dma_addr_t dst;
626 dma_addr_t src;
627
628 desc = pl330_get_desc(pch);
629 if (!desc) {
630 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
631 __func__, __LINE__);
632 return NULL;
633 }
634
635 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530636 case DMA_MEM_TO_DEV:
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900637 desc->rqcfg.src_inc = 1;
638 desc->rqcfg.dst_inc = 0;
Thomas Abrahamcd072512011-10-24 11:43:11 +0200639 desc->req.rqtype = MEMTODEV;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900640 src = dma_addr;
641 dst = pch->fifo_addr;
642 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530643 case DMA_DEV_TO_MEM:
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900644 desc->rqcfg.src_inc = 0;
645 desc->rqcfg.dst_inc = 1;
Thomas Abrahamcd072512011-10-24 11:43:11 +0200646 desc->req.rqtype = DEVTOMEM;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900647 src = pch->fifo_addr;
648 dst = dma_addr;
649 break;
650 default:
651 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
652 __func__, __LINE__);
653 return NULL;
654 }
655
656 desc->rqcfg.brst_size = pch->burst_sz;
657 desc->rqcfg.brst_len = 1;
658
659 pch->cyclic = true;
660
661 fill_px(&desc->px, dst, src, period_len);
662
663 return &desc->txd;
664}
665
Jassi Brarb3040e42010-05-23 20:28:19 -0700666static struct dma_async_tx_descriptor *
667pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
668 dma_addr_t src, size_t len, unsigned long flags)
669{
670 struct dma_pl330_desc *desc;
671 struct dma_pl330_chan *pch = to_pchan(chan);
Jassi Brarb3040e42010-05-23 20:28:19 -0700672 struct pl330_info *pi;
673 int burst;
674
Rob Herring4e0e6102011-07-25 16:05:04 -0500675 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -0700676 return NULL;
677
Jassi Brarb3040e42010-05-23 20:28:19 -0700678 pi = &pch->dmac->pif;
679
680 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
681 if (!desc)
682 return NULL;
683
684 desc->rqcfg.src_inc = 1;
685 desc->rqcfg.dst_inc = 1;
Thomas Abrahamcd072512011-10-24 11:43:11 +0200686 desc->req.rqtype = MEMTOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -0700687
688 /* Select max possible burst size */
689 burst = pi->pcfg.data_bus_width / 8;
690
691 while (burst > 1) {
692 if (!(len % burst))
693 break;
694 burst /= 2;
695 }
696
697 desc->rqcfg.brst_size = 0;
698 while (burst != (1 << desc->rqcfg.brst_size))
699 desc->rqcfg.brst_size++;
700
701 desc->rqcfg.brst_len = get_burst_len(desc, len);
702
703 desc->txd.flags = flags;
704
705 return &desc->txd;
706}
707
708static struct dma_async_tx_descriptor *
709pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530710 unsigned int sg_len, enum dma_transfer_direction direction,
Jassi Brarb3040e42010-05-23 20:28:19 -0700711 unsigned long flg)
712{
713 struct dma_pl330_desc *first, *desc = NULL;
714 struct dma_pl330_chan *pch = to_pchan(chan);
Jassi Brarb3040e42010-05-23 20:28:19 -0700715 struct scatterlist *sg;
716 unsigned long flags;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900717 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -0700718 dma_addr_t addr;
719
Thomas Abrahamcd072512011-10-24 11:43:11 +0200720 if (unlikely(!pch || !sgl || !sg_len))
Jassi Brarb3040e42010-05-23 20:28:19 -0700721 return NULL;
722
Boojin Kim1b9bb712011-09-02 09:44:30 +0900723 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -0700724
725 first = NULL;
726
727 for_each_sg(sgl, sg, sg_len, i) {
728
729 desc = pl330_get_desc(pch);
730 if (!desc) {
731 struct dma_pl330_dmac *pdmac = pch->dmac;
732
733 dev_err(pch->dmac->pif.dev,
734 "%s:%d Unable to fetch desc\n",
735 __func__, __LINE__);
736 if (!first)
737 return NULL;
738
739 spin_lock_irqsave(&pdmac->pool_lock, flags);
740
741 while (!list_empty(&first->node)) {
742 desc = list_entry(first->node.next,
743 struct dma_pl330_desc, node);
744 list_move_tail(&desc->node, &pdmac->desc_pool);
745 }
746
747 list_move_tail(&first->node, &pdmac->desc_pool);
748
749 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
750
751 return NULL;
752 }
753
754 if (!first)
755 first = desc;
756 else
757 list_add_tail(&desc->node, &first->node);
758
Vinod Kouldb8196d2011-10-13 22:34:23 +0530759 if (direction == DMA_MEM_TO_DEV) {
Jassi Brarb3040e42010-05-23 20:28:19 -0700760 desc->rqcfg.src_inc = 1;
761 desc->rqcfg.dst_inc = 0;
Thomas Abrahamcd072512011-10-24 11:43:11 +0200762 desc->req.rqtype = MEMTODEV;
Jassi Brarb3040e42010-05-23 20:28:19 -0700763 fill_px(&desc->px,
764 addr, sg_dma_address(sg), sg_dma_len(sg));
765 } else {
766 desc->rqcfg.src_inc = 0;
767 desc->rqcfg.dst_inc = 1;
Thomas Abrahamcd072512011-10-24 11:43:11 +0200768 desc->req.rqtype = DEVTOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -0700769 fill_px(&desc->px,
770 sg_dma_address(sg), addr, sg_dma_len(sg));
771 }
772
Boojin Kim1b9bb712011-09-02 09:44:30 +0900773 desc->rqcfg.brst_size = pch->burst_sz;
Jassi Brarb3040e42010-05-23 20:28:19 -0700774 desc->rqcfg.brst_len = 1;
775 }
776
777 /* Return the last desc in the chain */
778 desc->txd.flags = flg;
779 return &desc->txd;
780}
781
782static irqreturn_t pl330_irq_handler(int irq, void *data)
783{
784 if (pl330_update(data))
785 return IRQ_HANDLED;
786 else
787 return IRQ_NONE;
788}
789
790static int __devinit
Russell Kingaa25afa2011-02-19 15:55:00 +0000791pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -0700792{
793 struct dma_pl330_platdata *pdat;
794 struct dma_pl330_dmac *pdmac;
795 struct dma_pl330_chan *pch;
796 struct pl330_info *pi;
797 struct dma_device *pd;
798 struct resource *res;
799 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -0500800 int num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -0700801
802 pdat = adev->dev.platform_data;
803
Jassi Brarb3040e42010-05-23 20:28:19 -0700804 /* Allocate a new DMAC and its Channels */
Rob Herring4e0e6102011-07-25 16:05:04 -0500805 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700806 if (!pdmac) {
807 dev_err(&adev->dev, "unable to allocate mem\n");
808 return -ENOMEM;
809 }
810
811 pi = &pdmac->pif;
812 pi->dev = &adev->dev;
813 pi->pl330_data = NULL;
Rob Herring4e0e6102011-07-25 16:05:04 -0500814 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -0700815
816 res = &adev->res;
817 request_mem_region(res->start, resource_size(res), "dma-pl330");
818
819 pi->base = ioremap(res->start, resource_size(res));
820 if (!pi->base) {
821 ret = -ENXIO;
822 goto probe_err1;
823 }
824
Boojin Kima2f52032011-09-02 09:44:29 +0900825 pdmac->clk = clk_get(&adev->dev, "dma");
826 if (IS_ERR(pdmac->clk)) {
827 dev_err(&adev->dev, "Cannot get operation clock.\n");
828 ret = -EINVAL;
Julia Lawall7bec78e2012-01-12 10:55:06 +0100829 goto probe_err2;
Boojin Kima2f52032011-09-02 09:44:29 +0900830 }
831
832 amba_set_drvdata(adev, pdmac);
833
Tushar Behera3506c0d2011-12-06 16:15:54 +0530834#ifndef CONFIG_PM_RUNTIME
Boojin Kima2f52032011-09-02 09:44:29 +0900835 /* enable dma clk */
836 clk_enable(pdmac->clk);
837#endif
838
Jassi Brarb3040e42010-05-23 20:28:19 -0700839 irq = adev->irq[0];
840 ret = request_irq(irq, pl330_irq_handler, 0,
841 dev_name(&adev->dev), pi);
842 if (ret)
Julia Lawall7bec78e2012-01-12 10:55:06 +0100843 goto probe_err3;
Jassi Brarb3040e42010-05-23 20:28:19 -0700844
845 ret = pl330_add(pi);
846 if (ret)
Julia Lawall7bec78e2012-01-12 10:55:06 +0100847 goto probe_err4;
Jassi Brarb3040e42010-05-23 20:28:19 -0700848
849 INIT_LIST_HEAD(&pdmac->desc_pool);
850 spin_lock_init(&pdmac->pool_lock);
851
852 /* Create a descriptor pool of default size */
853 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
854 dev_warn(&adev->dev, "unable to allocate desc\n");
855
856 pd = &pdmac->ddma;
857 INIT_LIST_HEAD(&pd->channels);
858
859 /* Initialize channel parameters */
Thomas Abraham93ed5542011-10-24 11:43:31 +0200860 num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
861 (u8)pi->pcfg.num_chan);
Rob Herring4e0e6102011-07-25 16:05:04 -0500862 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -0700863
Rob Herring4e0e6102011-07-25 16:05:04 -0500864 for (i = 0; i < num_chan; i++) {
865 pch = &pdmac->peripherals[i];
Thomas Abraham93ed5542011-10-24 11:43:31 +0200866 if (!adev->dev.of_node)
867 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
868 else
869 pch->chan.private = adev->dev.of_node;
Jassi Brarb3040e42010-05-23 20:28:19 -0700870
871 INIT_LIST_HEAD(&pch->work_list);
872 spin_lock_init(&pch->lock);
873 pch->pl330_chid = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -0700874 pch->chan.device = pd;
Jassi Brarb3040e42010-05-23 20:28:19 -0700875 pch->dmac = pdmac;
876
877 /* Add the channel to the DMAC list */
Jassi Brarb3040e42010-05-23 20:28:19 -0700878 list_add_tail(&pch->chan.device_node, &pd->channels);
879 }
880
881 pd->dev = &adev->dev;
Thomas Abraham93ed5542011-10-24 11:43:31 +0200882 if (pdat) {
Thomas Abrahamcd072512011-10-24 11:43:11 +0200883 pd->cap_mask = pdat->cap_mask;
Thomas Abraham93ed5542011-10-24 11:43:31 +0200884 } else {
Thomas Abrahamcd072512011-10-24 11:43:11 +0200885 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Thomas Abraham93ed5542011-10-24 11:43:31 +0200886 if (pi->pcfg.num_peri) {
887 dma_cap_set(DMA_SLAVE, pd->cap_mask);
888 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
889 }
890 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700891
892 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
893 pd->device_free_chan_resources = pl330_free_chan_resources;
894 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900895 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -0700896 pd->device_tx_status = pl330_tx_status;
897 pd->device_prep_slave_sg = pl330_prep_slave_sg;
898 pd->device_control = pl330_control;
899 pd->device_issue_pending = pl330_issue_pending;
900
901 ret = dma_async_device_register(pd);
902 if (ret) {
903 dev_err(&adev->dev, "unable to register DMAC\n");
Julia Lawall7bec78e2012-01-12 10:55:06 +0100904 goto probe_err5;
Jassi Brarb3040e42010-05-23 20:28:19 -0700905 }
906
Jassi Brarb3040e42010-05-23 20:28:19 -0700907 dev_info(&adev->dev,
908 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
909 dev_info(&adev->dev,
910 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
911 pi->pcfg.data_buf_dep,
912 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
913 pi->pcfg.num_peri, pi->pcfg.num_events);
914
915 return 0;
916
Julia Lawall7bec78e2012-01-12 10:55:06 +0100917probe_err5:
Jassi Brarb3040e42010-05-23 20:28:19 -0700918 pl330_del(pi);
Julia Lawall7bec78e2012-01-12 10:55:06 +0100919probe_err4:
Jassi Brarb3040e42010-05-23 20:28:19 -0700920 free_irq(irq, pi);
Julia Lawall7bec78e2012-01-12 10:55:06 +0100921probe_err3:
922#ifndef CONFIG_PM_RUNTIME
923 clk_disable(pdmac->clk);
924#endif
925 clk_put(pdmac->clk);
Jassi Brarb3040e42010-05-23 20:28:19 -0700926probe_err2:
927 iounmap(pi->base);
928probe_err1:
929 release_mem_region(res->start, resource_size(res));
930 kfree(pdmac);
931
932 return ret;
933}
934
935static int __devexit pl330_remove(struct amba_device *adev)
936{
937 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
938 struct dma_pl330_chan *pch, *_p;
939 struct pl330_info *pi;
940 struct resource *res;
941 int irq;
942
943 if (!pdmac)
944 return 0;
945
946 amba_set_drvdata(adev, NULL);
947
948 /* Idle the DMAC */
949 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
950 chan.device_node) {
951
952 /* Remove the channel */
953 list_del(&pch->chan.device_node);
954
955 /* Flush the channel */
956 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
957 pl330_free_chan_resources(&pch->chan);
958 }
959
960 pi = &pdmac->pif;
961
962 pl330_del(pi);
963
964 irq = adev->irq[0];
965 free_irq(irq, pi);
966
967 iounmap(pi->base);
968
969 res = &adev->res;
970 release_mem_region(res->start, resource_size(res));
971
Tushar Behera3506c0d2011-12-06 16:15:54 +0530972#ifndef CONFIG_PM_RUNTIME
Boojin Kima2f52032011-09-02 09:44:29 +0900973 clk_disable(pdmac->clk);
974#endif
975
Jassi Brarb3040e42010-05-23 20:28:19 -0700976 kfree(pdmac);
977
978 return 0;
979}
980
981static struct amba_id pl330_ids[] = {
982 {
983 .id = 0x00041330,
984 .mask = 0x000fffff,
985 },
986 { 0, 0 },
987};
988
Dave Martine8fa5162011-10-05 15:15:20 +0100989MODULE_DEVICE_TABLE(amba, pl330_ids);
990
Boojin Kima2f52032011-09-02 09:44:29 +0900991#ifdef CONFIG_PM_RUNTIME
992static int pl330_runtime_suspend(struct device *dev)
993{
994 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
995
996 if (!pdmac) {
997 dev_err(dev, "failed to get dmac\n");
998 return -ENODEV;
999 }
1000
1001 clk_disable(pdmac->clk);
1002
1003 return 0;
1004}
1005
1006static int pl330_runtime_resume(struct device *dev)
1007{
1008 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1009
1010 if (!pdmac) {
1011 dev_err(dev, "failed to get dmac\n");
1012 return -ENODEV;
1013 }
1014
1015 clk_enable(pdmac->clk);
1016
1017 return 0;
1018}
1019#else
1020#define pl330_runtime_suspend NULL
1021#define pl330_runtime_resume NULL
1022#endif /* CONFIG_PM_RUNTIME */
1023
1024static const struct dev_pm_ops pl330_pm_ops = {
1025 .runtime_suspend = pl330_runtime_suspend,
1026 .runtime_resume = pl330_runtime_resume,
1027};
1028
Jassi Brarb3040e42010-05-23 20:28:19 -07001029static struct amba_driver pl330_driver = {
1030 .drv = {
1031 .owner = THIS_MODULE,
1032 .name = "dma-pl330",
Boojin Kima2f52032011-09-02 09:44:29 +09001033 .pm = &pl330_pm_ops,
Jassi Brarb3040e42010-05-23 20:28:19 -07001034 },
1035 .id_table = pl330_ids,
1036 .probe = pl330_probe,
1037 .remove = pl330_remove,
1038};
1039
1040static int __init pl330_init(void)
1041{
1042 return amba_driver_register(&pl330_driver);
1043}
1044module_init(pl330_init);
1045
1046static void __exit pl330_exit(void)
1047{
1048 amba_driver_unregister(&pl330_driver);
1049 return;
1050}
1051module_exit(pl330_exit);
1052
1053MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1054MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1055MODULE_LICENSE("GPL");