blob: d72eff6cfaef56620c604962ee26c51ef832473b [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
37/* The number of free d40_desc to keep in memory before starting
38 * to kfree() them */
39#define D40_DESC_CACHE_SIZE 50
40
41/* Hardware designer of the block */
42#define D40_PERIPHID2_DESIGNER 0x8
43
44/**
45 * enum 40_command - The different commands and/or statuses.
46 *
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 */
52enum d40_command {
53 D40_DMA_STOP = 0,
54 D40_DMA_RUN = 1,
55 D40_DMA_SUSPEND_REQ = 2,
56 D40_DMA_SUSPENDED = 3
57};
58
59/**
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 *
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
68 */
69struct d40_lli_pool {
70 void *base;
71 int size;
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
74};
75
76/**
77 * struct d40_desc - A descriptor is one DMA job.
78 *
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000084 * @lli_len: Number of llis of current descriptor.
85 * @lli_count: Number of transfered llis.
86 * @lli_tx_len: Max number of LLIs per transfer, there can be
87 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020088 * @txd: DMA engine struct. Used for among other things for communication
89 * during a transfer.
90 * @node: List entry.
91 * @dir: The transfer direction of this job.
92 * @is_in_client_list: true if the client owns this descriptor.
93 *
94 * This descriptor is used for both logical and physical transfers.
95 */
96
97struct d40_desc {
98 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy;
100 /* LLI logical */
101 struct d40_log_lli_bidir lli_log;
102
103 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000104 int lli_len;
105 int lli_count;
106 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200107
108 struct dma_async_tx_descriptor txd;
109 struct list_head node;
110
111 enum dma_data_direction dir;
112 bool is_in_client_list;
113};
114
115/**
116 * struct d40_lcla_pool - LCLA pool settings and data.
117 *
118 * @base: The virtual address of LCLA.
119 * @phy: Physical base address of LCLA.
120 * @base_size: size of lcla.
121 * @lock: Lock to protect the content in this struct.
122 * @alloc_map: Mapping between physical channel and LCLA entries.
123 * @num_blocks: The number of entries of alloc_map. Equals to the
124 * number of physical channels.
125 */
126struct d40_lcla_pool {
127 void *base;
128 dma_addr_t phy;
129 resource_size_t base_size;
130 spinlock_t lock;
131 u32 *alloc_map;
132 int num_blocks;
133};
134
135/**
136 * struct d40_phy_res - struct for handling eventlines mapped to physical
137 * channels.
138 *
139 * @lock: A lock protection this entity.
140 * @num: The physical channel number of this entity.
141 * @allocated_src: Bit mapped to show which src event line's are mapped to
142 * this physical channel. Can also be free or physically allocated.
143 * @allocated_dst: Same as for src but is dst.
144 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
145 * event line number. Both allocated_src and allocated_dst can not be
146 * allocated to a physical channel, since the interrupt handler has then
147 * no way of figure out which one the interrupt belongs to.
148 */
149struct d40_phy_res {
150 spinlock_t lock;
151 int num;
152 u32 allocated_src;
153 u32 allocated_dst;
154};
155
156struct d40_base;
157
158/**
159 * struct d40_chan - Struct that describes a channel.
160 *
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
164 * current cookie.
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
166 * and tasklet.
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on.
169 * @chan: DMA engine handle.
170 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
171 * transfer and call client callback.
172 * @client: Cliented owned descriptor list.
173 * @active: Active descriptor.
174 * @queue: Queued jobs.
175 * @free: List of free descripts, ready to be reused.
176 * @free_len: Number of descriptors in the free list.
177 * @dma_cfg: The client configuration of this dma channel.
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
184 *
185 * This struct can either "be" a logical or a physical channel.
186 */
187struct d40_chan {
188 spinlock_t lock;
189 int log_num;
190 /* ID of the most recent completed transfer */
191 int completed;
192 int pending_tx;
193 bool busy;
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
200 struct list_head free;
201 int free_len;
202 struct stedma40_chan_cfg dma_cfg;
203 struct d40_base *base;
204 /* Default register configurations */
205 u32 src_def_cfg;
206 u32 dst_def_cfg;
207 struct d40_def_lcsp log_def;
208 struct d40_lcla_elem lcla;
209 struct d40_log_lli_full *lcpa;
210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.
214 *
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
220 * @clk: Pointer to the DMA clock structure.
221 * @phy_start: Physical memory start of the DMA registers.
222 * @phy_size: Size of the DMA register map.
223 * @irq: The IRQ number.
224 * @num_phy_chans: The number of physical channels. Read from HW. This
225 * is the number of available channels for this driver, not counting "Secure
226 * mode" allocated physical channels.
227 * @num_log_chans: The number of logical channels. Calculated from
228 * num_phy_chans.
229 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
230 * @dma_slave: dma_device channels that can do only do slave transfers.
231 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
232 * @phy_chans: Room for all possible physical channels in system.
233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
239 * configuration.
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
245 */
246struct d40_base {
247 spinlock_t interrupt_lock;
248 spinlock_t execmd_lock;
249 struct device *dev;
250 void __iomem *virtbase;
251 struct clk *clk;
252 phys_addr_t phy_start;
253 resource_size_t phy_size;
254 int irq;
255 int num_phy_chans;
256 int num_log_chans;
257 struct dma_device dma_both;
258 struct dma_device dma_slave;
259 struct dma_device dma_memcpy;
260 struct d40_chan *phy_chans;
261 struct d40_chan *log_chans;
262 struct d40_chan **lookup_log_chans;
263 struct d40_chan **lookup_phy_chans;
264 struct stedma40_platform_data *plat_data;
265 /* Physical half channels */
266 struct d40_phy_res *phy_res;
267 struct d40_lcla_pool lcla_pool;
268 void *lcpa_base;
269 dma_addr_t phy_lcpa;
270 resource_size_t lcpa_size;
271};
272
273/**
274 * struct d40_interrupt_lookup - lookup table for interrupt handler
275 *
276 * @src: Interrupt mask register.
277 * @clr: Interrupt clear register.
278 * @is_error: true if this is an error interrupt.
279 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
280 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
281 */
282struct d40_interrupt_lookup {
283 u32 src;
284 u32 clr;
285 bool is_error;
286 int offset;
287};
288
289/**
290 * struct d40_reg_val - simple lookup struct
291 *
292 * @reg: The register.
293 * @val: The value that belongs to the register in reg.
294 */
295struct d40_reg_val {
296 unsigned int reg;
297 unsigned int val;
298};
299
300static int d40_pool_lli_alloc(struct d40_desc *d40d,
301 int lli_len, bool is_log)
302{
303 u32 align;
304 void *base;
305
306 if (is_log)
307 align = sizeof(struct d40_log_lli);
308 else
309 align = sizeof(struct d40_phy_lli);
310
311 if (lli_len == 1) {
312 base = d40d->lli_pool.pre_alloc_lli;
313 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
314 d40d->lli_pool.base = NULL;
315 } else {
316 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
317
318 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
319 d40d->lli_pool.base = base;
320
321 if (d40d->lli_pool.base == NULL)
322 return -ENOMEM;
323 }
324
325 if (is_log) {
326 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
327 align);
328 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
329 align);
330 } else {
331 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
332 align);
333 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
334 align);
335
336 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
337 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
338 }
339
340 return 0;
341}
342
343static void d40_pool_lli_free(struct d40_desc *d40d)
344{
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
352 d40d->lli_phy.src_addr = 0;
353 d40d->lli_phy.dst_addr = 0;
354}
355
356static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
357 struct d40_desc *desc)
358{
359 dma_cookie_t cookie = d40c->chan.cookie;
360
361 if (++cookie < 0)
362 cookie = 1;
363
364 d40c->chan.cookie = cookie;
365 desc->txd.cookie = cookie;
366
367 return cookie;
368}
369
Linus Walleij8d318a52010-03-30 15:33:42 +0200370static void d40_desc_remove(struct d40_desc *d40d)
371{
372 list_del(&d40d->node);
373}
374
375static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
376{
Linus Walleij8d318a52010-03-30 15:33:42 +0200377 struct d40_desc *d;
378 struct d40_desc *_d;
379
380 if (!list_empty(&d40c->client)) {
381 list_for_each_entry_safe(d, _d, &d40c->client, node)
382 if (async_tx_test_ack(&d->txd)) {
383 d40_pool_lli_free(d);
384 d40_desc_remove(d);
385 desc = d;
386 goto out;
387 }
388 }
389
390 if (list_empty(&d40c->free)) {
391 /* Alloc new desc because we're out of used ones */
392 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
393 if (desc == NULL)
394 goto out;
395 INIT_LIST_HEAD(&desc->node);
396 } else {
397 /* Reuse an old desc. */
398 desc = list_first_entry(&d40c->free,
399 struct d40_desc,
400 node);
401 list_del(&desc->node);
402 d40c->free_len--;
403 }
404out:
405 return desc;
406}
407
408static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
409{
410 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
411 list_add_tail(&d40d->node, &d40c->free);
412 d40c->free_len++;
413 } else
414 kfree(d40d);
415}
416
417static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
418{
419 list_add_tail(&desc->node, &d40c->active);
420}
421
422static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
423{
424 struct d40_desc *d;
425
426 if (list_empty(&d40c->active))
427 return NULL;
428
429 d = list_first_entry(&d40c->active,
430 struct d40_desc,
431 node);
432 return d;
433}
434
435static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
436{
437 list_add_tail(&desc->node, &d40c->queue);
438}
439
440static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
441{
442 struct d40_desc *d;
443
444 if (list_empty(&d40c->queue))
445 return NULL;
446
447 d = list_first_entry(&d40c->queue,
448 struct d40_desc,
449 node);
450 return d;
451}
452
453/* Support functions for logical channels */
454
455static int d40_lcla_id_get(struct d40_chan *d40c,
456 struct d40_lcla_pool *pool)
457{
458 int src_id = 0;
459 int dst_id = 0;
460 struct d40_log_lli *lcla_lidx_base =
461 pool->base + d40c->phy_chan->num * 1024;
462 int i;
463 int lli_per_log = d40c->base->plat_data->llis_per_log;
464
465 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
466 return 0;
467
468 if (pool->num_blocks > 32)
469 return -EINVAL;
470
471 spin_lock(&pool->lock);
472
473 for (i = 0; i < pool->num_blocks; i++) {
474 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
475 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
476 break;
477 }
478 }
479 src_id = i;
480 if (src_id >= pool->num_blocks)
481 goto err;
482
483 for (; i < pool->num_blocks; i++) {
484 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
485 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
486 break;
487 }
488 }
489
490 dst_id = i;
491 if (dst_id == src_id)
492 goto err;
493
494 d40c->lcla.src_id = src_id;
495 d40c->lcla.dst_id = dst_id;
496 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
497 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
498
499
500 spin_unlock(&pool->lock);
501 return 0;
502err:
503 spin_unlock(&pool->lock);
504 return -EINVAL;
505}
506
507static void d40_lcla_id_put(struct d40_chan *d40c,
508 struct d40_lcla_pool *pool,
509 int id)
510{
511 if (id < 0)
512 return;
513
514 d40c->lcla.src_id = -1;
515 d40c->lcla.dst_id = -1;
516
517 spin_lock(&pool->lock);
518 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
519 spin_unlock(&pool->lock);
520}
521
522static int d40_channel_execute_command(struct d40_chan *d40c,
523 enum d40_command command)
524{
525 int status, i;
526 void __iomem *active_reg;
527 int ret = 0;
528 unsigned long flags;
529
530 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
531
532 if (d40c->phy_chan->num % 2 == 0)
533 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
534 else
535 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
536
537 if (command == D40_DMA_SUSPEND_REQ) {
538 status = (readl(active_reg) &
539 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
540 D40_CHAN_POS(d40c->phy_chan->num);
541
542 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
543 goto done;
544 }
545
546 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
547
548 if (command == D40_DMA_SUSPEND_REQ) {
549
550 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
551 status = (readl(active_reg) &
552 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
553 D40_CHAN_POS(d40c->phy_chan->num);
554
555 cpu_relax();
556 /*
557 * Reduce the number of bus accesses while
558 * waiting for the DMA to suspend.
559 */
560 udelay(3);
561
562 if (status == D40_DMA_STOP ||
563 status == D40_DMA_SUSPENDED)
564 break;
565 }
566
567 if (i == D40_SUSPEND_MAX_IT) {
568 dev_err(&d40c->chan.dev->device,
569 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
570 __func__, d40c->phy_chan->num, d40c->log_num,
571 status);
572 dump_stack();
573 ret = -EBUSY;
574 }
575
576 }
577done:
578 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
579 return ret;
580}
581
582static void d40_term_all(struct d40_chan *d40c)
583{
584 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200585
586 /* Release active descriptors */
587 while ((d40d = d40_first_active_get(d40c))) {
588 d40_desc_remove(d40d);
589
590 /* Return desc to free-list */
591 d40_desc_free(d40c, d40d);
592 }
593
594 /* Release queued descriptors waiting for transfer */
595 while ((d40d = d40_first_queued(d40c))) {
596 d40_desc_remove(d40d);
597
598 /* Return desc to free-list */
599 d40_desc_free(d40c, d40d);
600 }
601
Linus Walleij8d318a52010-03-30 15:33:42 +0200602 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
603 d40c->lcla.src_id);
604 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
605 d40c->lcla.dst_id);
606
607 d40c->pending_tx = 0;
608 d40c->busy = false;
609}
610
611static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
612{
613 u32 val;
614 unsigned long flags;
615
616 if (do_enable)
617 val = D40_ACTIVATE_EVENTLINE;
618 else
619 val = D40_DEACTIVATE_EVENTLINE;
620
621 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
622
623 /* Enable event line connected to device (or memcpy) */
624 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
625 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
626 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
627
628 writel((val << D40_EVENTLINE_POS(event)) |
629 ~D40_EVENTLINE_MASK(event),
630 d40c->base->virtbase + D40_DREG_PCBASE +
631 d40c->phy_chan->num * D40_DREG_PCDELTA +
632 D40_CHAN_REG_SSLNK);
633 }
634 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
635 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
636
637 writel((val << D40_EVENTLINE_POS(event)) |
638 ~D40_EVENTLINE_MASK(event),
639 d40c->base->virtbase + D40_DREG_PCBASE +
640 d40c->phy_chan->num * D40_DREG_PCDELTA +
641 D40_CHAN_REG_SDLNK);
642 }
643
644 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
645}
646
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200647static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200648{
649 u32 val = 0;
650
651 /* If SSLNK or SDLNK is zero all events are disabled */
652 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
653 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
654 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
655 d40c->phy_chan->num * D40_DREG_PCDELTA +
656 D40_CHAN_REG_SSLNK);
657
658 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
659 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
660 d40c->phy_chan->num * D40_DREG_PCDELTA +
661 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200662 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200663}
664
665static void d40_config_enable_lidx(struct d40_chan *d40c)
666{
667 /* Set LIDX for lcla */
668 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
669 D40_SREG_ELEM_LOG_LIDX_MASK,
670 d40c->base->virtbase + D40_DREG_PCBASE +
671 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
672
673 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
674 D40_SREG_ELEM_LOG_LIDX_MASK,
675 d40c->base->virtbase + D40_DREG_PCBASE +
676 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
677}
678
679static int d40_config_write(struct d40_chan *d40c)
680{
681 u32 addr_base;
682 u32 var;
683 int res;
684
685 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
686 if (res)
687 return res;
688
689 /* Odd addresses are even addresses + 4 */
690 addr_base = (d40c->phy_chan->num % 2) * 4;
691 /* Setup channel mode to logical or physical */
692 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
693 D40_CHAN_POS(d40c->phy_chan->num);
694 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
695
696 /* Setup operational mode option register */
697 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
698 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
699
700 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
701
702 if (d40c->log_num != D40_PHY_CHAN) {
703 /* Set default config for CFG reg */
704 writel(d40c->src_def_cfg,
705 d40c->base->virtbase + D40_DREG_PCBASE +
706 d40c->phy_chan->num * D40_DREG_PCDELTA +
707 D40_CHAN_REG_SSCFG);
708 writel(d40c->dst_def_cfg,
709 d40c->base->virtbase + D40_DREG_PCBASE +
710 d40c->phy_chan->num * D40_DREG_PCDELTA +
711 D40_CHAN_REG_SDCFG);
712
713 d40_config_enable_lidx(d40c);
714 }
715 return res;
716}
717
718static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
719{
720
721 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
722 d40_phy_lli_write(d40c->base->virtbase,
723 d40c->phy_chan->num,
724 d40d->lli_phy.dst,
725 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200726 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200727 struct d40_log_lli *src = d40d->lli_log.src;
728 struct d40_log_lli *dst = d40d->lli_log.dst;
729
Per Friden941b77a2010-06-20 21:24:45 +0000730 src += d40d->lli_count;
731 dst += d40d->lli_count;
Linus Walleij8d318a52010-03-30 15:33:42 +0200732 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
733 d40c->lcla.dst,
734 dst, src,
735 d40c->base->plat_data->llis_per_log);
736 }
Per Friden941b77a2010-06-20 21:24:45 +0000737 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200738}
739
740static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
741{
742 struct d40_chan *d40c = container_of(tx->chan,
743 struct d40_chan,
744 chan);
745 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
746 unsigned long flags;
747
748 spin_lock_irqsave(&d40c->lock, flags);
749
750 tx->cookie = d40_assign_cookie(d40c, d40d);
751
752 d40_desc_queue(d40c, d40d);
753
754 spin_unlock_irqrestore(&d40c->lock, flags);
755
756 return tx->cookie;
757}
758
759static int d40_start(struct d40_chan *d40c)
760{
761 int err;
762
763 if (d40c->log_num != D40_PHY_CHAN) {
764 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
765 if (err)
766 return err;
767 d40_config_set_event(d40c, true);
768 }
769
770 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
771
772 return err;
773}
774
775static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
776{
777 struct d40_desc *d40d;
778 int err;
779
780 /* Start queued jobs, if any */
781 d40d = d40_first_queued(d40c);
782
783 if (d40d != NULL) {
784 d40c->busy = true;
785
786 /* Remove from queue */
787 d40_desc_remove(d40d);
788
789 /* Add to active queue */
790 d40_desc_submit(d40c, d40d);
791
792 /* Initiate DMA job */
793 d40_desc_load(d40c, d40d);
794
795 /* Start dma job */
796 err = d40_start(d40c);
797
798 if (err)
799 return NULL;
800 }
801
802 return d40d;
803}
804
805/* called from interrupt context */
806static void dma_tc_handle(struct d40_chan *d40c)
807{
808 struct d40_desc *d40d;
809
810 if (!d40c->phy_chan)
811 return;
812
813 /* Get first active entry from list */
814 d40d = d40_first_active_get(d40c);
815
816 if (d40d == NULL)
817 return;
818
Per Friden941b77a2010-06-20 21:24:45 +0000819 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200820
821 d40_desc_load(d40c, d40d);
822 /* Start dma job */
823 (void) d40_start(d40c);
824 return;
825 }
826
827 if (d40_queue_start(d40c) == NULL)
828 d40c->busy = false;
829
830 d40c->pending_tx++;
831 tasklet_schedule(&d40c->tasklet);
832
833}
834
835static void dma_tasklet(unsigned long data)
836{
837 struct d40_chan *d40c = (struct d40_chan *) data;
838 struct d40_desc *d40d_fin;
839 unsigned long flags;
840 dma_async_tx_callback callback;
841 void *callback_param;
842
843 spin_lock_irqsave(&d40c->lock, flags);
844
845 /* Get first active entry from list */
846 d40d_fin = d40_first_active_get(d40c);
847
848 if (d40d_fin == NULL)
849 goto err;
850
851 d40c->completed = d40d_fin->txd.cookie;
852
853 /*
854 * If terminating a channel pending_tx is set to zero.
855 * This prevents any finished active jobs to return to the client.
856 */
857 if (d40c->pending_tx == 0) {
858 spin_unlock_irqrestore(&d40c->lock, flags);
859 return;
860 }
861
862 /* Callback to client */
863 callback = d40d_fin->txd.callback;
864 callback_param = d40d_fin->txd.callback_param;
865
866 if (async_tx_test_ack(&d40d_fin->txd)) {
867 d40_pool_lli_free(d40d_fin);
868 d40_desc_remove(d40d_fin);
869 /* Return desc to free-list */
870 d40_desc_free(d40c, d40d_fin);
871 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200872 if (!d40d_fin->is_in_client_list) {
873 d40_desc_remove(d40d_fin);
874 list_add_tail(&d40d_fin->node, &d40c->client);
875 d40d_fin->is_in_client_list = true;
876 }
877 }
878
879 d40c->pending_tx--;
880
881 if (d40c->pending_tx)
882 tasklet_schedule(&d40c->tasklet);
883
884 spin_unlock_irqrestore(&d40c->lock, flags);
885
886 if (callback)
887 callback(callback_param);
888
889 return;
890
891 err:
892 /* Rescue manouver if receiving double interrupts */
893 if (d40c->pending_tx > 0)
894 d40c->pending_tx--;
895 spin_unlock_irqrestore(&d40c->lock, flags);
896}
897
898static irqreturn_t d40_handle_interrupt(int irq, void *data)
899{
900 static const struct d40_interrupt_lookup il[] = {
901 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
902 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
903 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
904 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
905 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
906 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
907 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
908 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
909 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
910 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
911 };
912
913 int i;
914 u32 regs[ARRAY_SIZE(il)];
915 u32 tmp;
916 u32 idx;
917 u32 row;
918 long chan = -1;
919 struct d40_chan *d40c;
920 unsigned long flags;
921 struct d40_base *base = data;
922
923 spin_lock_irqsave(&base->interrupt_lock, flags);
924
925 /* Read interrupt status of both logical and physical channels */
926 for (i = 0; i < ARRAY_SIZE(il); i++)
927 regs[i] = readl(base->virtbase + il[i].src);
928
929 for (;;) {
930
931 chan = find_next_bit((unsigned long *)regs,
932 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
933
934 /* No more set bits found? */
935 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
936 break;
937
938 row = chan / BITS_PER_LONG;
939 idx = chan & (BITS_PER_LONG - 1);
940
941 /* ACK interrupt */
942 tmp = readl(base->virtbase + il[row].clr);
943 tmp |= 1 << idx;
944 writel(tmp, base->virtbase + il[row].clr);
945
946 if (il[row].offset == D40_PHY_CHAN)
947 d40c = base->lookup_phy_chans[idx];
948 else
949 d40c = base->lookup_log_chans[il[row].offset + idx];
950 spin_lock(&d40c->lock);
951
952 if (!il[row].is_error)
953 dma_tc_handle(d40c);
954 else
955 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
956 __func__, chan, il[row].offset, idx);
957
958 spin_unlock(&d40c->lock);
959 }
960
961 spin_unlock_irqrestore(&base->interrupt_lock, flags);
962
963 return IRQ_HANDLED;
964}
965
966
967static int d40_validate_conf(struct d40_chan *d40c,
968 struct stedma40_chan_cfg *conf)
969{
970 int res = 0;
971 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
972 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
973 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
974 == STEDMA40_CHANNEL_IN_LOG_MODE;
975
976 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
977 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
978 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
979 __func__);
980 res = -EINVAL;
981 }
982
983 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
984 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
985 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
986 __func__);
987 res = -EINVAL;
988 }
989
990 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
991 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
992 dev_err(&d40c->chan.dev->device,
993 "[%s] No event line\n", __func__);
994 res = -EINVAL;
995 }
996
997 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
998 (src_event_group != dst_event_group)) {
999 dev_err(&d40c->chan.dev->device,
1000 "[%s] Invalid event group\n", __func__);
1001 res = -EINVAL;
1002 }
1003
1004 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1005 /*
1006 * DMAC HW supports it. Will be added to this driver,
1007 * in case any dma client requires it.
1008 */
1009 dev_err(&d40c->chan.dev->device,
1010 "[%s] periph to periph not supported\n",
1011 __func__);
1012 res = -EINVAL;
1013 }
1014
1015 return res;
1016}
1017
1018static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001019 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001020{
1021 unsigned long flags;
1022 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001023 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001024 /* Physical interrupts are masked per physical full channel */
1025 if (phy->allocated_src == D40_ALLOC_FREE &&
1026 phy->allocated_dst == D40_ALLOC_FREE) {
1027 phy->allocated_dst = D40_ALLOC_PHY;
1028 phy->allocated_src = D40_ALLOC_PHY;
1029 goto found;
1030 } else
1031 goto not_found;
1032 }
1033
1034 /* Logical channel */
1035 if (is_src) {
1036 if (phy->allocated_src == D40_ALLOC_PHY)
1037 goto not_found;
1038
1039 if (phy->allocated_src == D40_ALLOC_FREE)
1040 phy->allocated_src = D40_ALLOC_LOG_FREE;
1041
1042 if (!(phy->allocated_src & (1 << log_event_line))) {
1043 phy->allocated_src |= 1 << log_event_line;
1044 goto found;
1045 } else
1046 goto not_found;
1047 } else {
1048 if (phy->allocated_dst == D40_ALLOC_PHY)
1049 goto not_found;
1050
1051 if (phy->allocated_dst == D40_ALLOC_FREE)
1052 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1053
1054 if (!(phy->allocated_dst & (1 << log_event_line))) {
1055 phy->allocated_dst |= 1 << log_event_line;
1056 goto found;
1057 } else
1058 goto not_found;
1059 }
1060
1061not_found:
1062 spin_unlock_irqrestore(&phy->lock, flags);
1063 return false;
1064found:
1065 spin_unlock_irqrestore(&phy->lock, flags);
1066 return true;
1067}
1068
1069static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1070 int log_event_line)
1071{
1072 unsigned long flags;
1073 bool is_free = false;
1074
1075 spin_lock_irqsave(&phy->lock, flags);
1076 if (!log_event_line) {
1077 /* Physical interrupts are masked per physical full channel */
1078 phy->allocated_dst = D40_ALLOC_FREE;
1079 phy->allocated_src = D40_ALLOC_FREE;
1080 is_free = true;
1081 goto out;
1082 }
1083
1084 /* Logical channel */
1085 if (is_src) {
1086 phy->allocated_src &= ~(1 << log_event_line);
1087 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1088 phy->allocated_src = D40_ALLOC_FREE;
1089 } else {
1090 phy->allocated_dst &= ~(1 << log_event_line);
1091 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1092 phy->allocated_dst = D40_ALLOC_FREE;
1093 }
1094
1095 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1096 D40_ALLOC_FREE);
1097
1098out:
1099 spin_unlock_irqrestore(&phy->lock, flags);
1100
1101 return is_free;
1102}
1103
1104static int d40_allocate_channel(struct d40_chan *d40c)
1105{
1106 int dev_type;
1107 int event_group;
1108 int event_line;
1109 struct d40_phy_res *phys;
1110 int i;
1111 int j;
1112 int log_num;
1113 bool is_src;
1114 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1115 == STEDMA40_CHANNEL_IN_LOG_MODE;
1116
1117
1118 phys = d40c->base->phy_res;
1119
1120 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1121 dev_type = d40c->dma_cfg.src_dev_type;
1122 log_num = 2 * dev_type;
1123 is_src = true;
1124 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1125 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1126 /* dst event lines are used for logical memcpy */
1127 dev_type = d40c->dma_cfg.dst_dev_type;
1128 log_num = 2 * dev_type + 1;
1129 is_src = false;
1130 } else
1131 return -EINVAL;
1132
1133 event_group = D40_TYPE_TO_GROUP(dev_type);
1134 event_line = D40_TYPE_TO_EVENT(dev_type);
1135
1136 if (!is_log) {
1137 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1138 /* Find physical half channel */
1139 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1140
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001141 if (d40_alloc_mask_set(&phys[i], is_src,
1142 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001143 goto found_phy;
1144 }
1145 } else
1146 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1147 int phy_num = j + event_group * 2;
1148 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001149 if (d40_alloc_mask_set(&phys[i], is_src,
1150 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001151 goto found_phy;
1152 }
1153 }
1154 return -EINVAL;
1155found_phy:
1156 d40c->phy_chan = &phys[i];
1157 d40c->log_num = D40_PHY_CHAN;
1158 goto out;
1159 }
1160 if (dev_type == -1)
1161 return -EINVAL;
1162
1163 /* Find logical channel */
1164 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1165 int phy_num = j + event_group * 2;
1166 /*
1167 * Spread logical channels across all available physical rather
1168 * than pack every logical channel at the first available phy
1169 * channels.
1170 */
1171 if (is_src) {
1172 for (i = phy_num; i < phy_num + 2; i++) {
1173 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001174 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001175 goto found_log;
1176 }
1177 } else {
1178 for (i = phy_num + 1; i >= phy_num; i--) {
1179 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001180 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001181 goto found_log;
1182 }
1183 }
1184 }
1185 return -EINVAL;
1186
1187found_log:
1188 d40c->phy_chan = &phys[i];
1189 d40c->log_num = log_num;
1190out:
1191
1192 if (is_log)
1193 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1194 else
1195 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1196
1197 return 0;
1198
1199}
1200
Linus Walleij8d318a52010-03-30 15:33:42 +02001201static int d40_config_memcpy(struct d40_chan *d40c)
1202{
1203 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1204
1205 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1206 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1207 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1208 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1209 memcpy[d40c->chan.chan_id];
1210
1211 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1212 dma_has_cap(DMA_SLAVE, cap)) {
1213 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1214 } else {
1215 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1216 __func__);
1217 return -EINVAL;
1218 }
1219
1220 return 0;
1221}
1222
1223
1224static int d40_free_dma(struct d40_chan *d40c)
1225{
1226
1227 int res = 0;
1228 u32 event, dir;
1229 struct d40_phy_res *phy = d40c->phy_chan;
1230 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001231 struct d40_desc *d;
1232 struct d40_desc *_d;
1233
Linus Walleij8d318a52010-03-30 15:33:42 +02001234
1235 /* Terminate all queued and active transfers */
1236 d40_term_all(d40c);
1237
Per Fridena8be8622010-06-20 21:24:59 +00001238 /* Release client owned descriptors */
1239 if (!list_empty(&d40c->client))
1240 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1241 d40_pool_lli_free(d);
1242 d40_desc_remove(d);
1243 /* Return desc to free-list */
1244 d40_desc_free(d40c, d);
1245 }
1246
Linus Walleij8d318a52010-03-30 15:33:42 +02001247 if (phy == NULL) {
1248 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1249 __func__);
1250 return -EINVAL;
1251 }
1252
1253 if (phy->allocated_src == D40_ALLOC_FREE &&
1254 phy->allocated_dst == D40_ALLOC_FREE) {
1255 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1256 __func__);
1257 return -EINVAL;
1258 }
1259
1260
1261 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1262 if (res) {
1263 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1264 __func__);
1265 return res;
1266 }
1267
1268 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1269 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1270 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1271 dir = D40_CHAN_REG_SDLNK;
1272 is_src = false;
1273 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1274 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1275 dir = D40_CHAN_REG_SSLNK;
1276 is_src = true;
1277 } else {
1278 dev_err(&d40c->chan.dev->device,
1279 "[%s] Unknown direction\n", __func__);
1280 return -EINVAL;
1281 }
1282
1283 if (d40c->log_num != D40_PHY_CHAN) {
1284 /*
1285 * Release logical channel, deactivate the event line during
1286 * the time physical res is suspended.
1287 */
1288 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1289 D40_EVENTLINE_MASK(event),
1290 d40c->base->virtbase + D40_DREG_PCBASE +
1291 phy->num * D40_DREG_PCDELTA + dir);
1292
1293 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1294
1295 /*
1296 * Check if there are more logical allocation
1297 * on this phy channel.
1298 */
1299 if (!d40_alloc_mask_free(phy, is_src, event)) {
1300 /* Resume the other logical channels if any */
1301 if (d40_chan_has_events(d40c)) {
1302 res = d40_channel_execute_command(d40c,
1303 D40_DMA_RUN);
1304 if (res) {
1305 dev_err(&d40c->chan.dev->device,
1306 "[%s] Executing RUN command\n",
1307 __func__);
1308 return res;
1309 }
1310 }
1311 return 0;
1312 }
1313 } else
1314 d40_alloc_mask_free(phy, is_src, 0);
1315
1316 /* Release physical channel */
1317 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1318 if (res) {
1319 dev_err(&d40c->chan.dev->device,
1320 "[%s] Failed to stop channel\n", __func__);
1321 return res;
1322 }
1323 d40c->phy_chan = NULL;
1324 /* Invalidate channel type */
1325 d40c->dma_cfg.channel_type = 0;
1326 d40c->base->lookup_phy_chans[phy->num] = NULL;
1327
1328 return 0;
1329
1330
1331}
1332
1333static int d40_pause(struct dma_chan *chan)
1334{
1335 struct d40_chan *d40c =
1336 container_of(chan, struct d40_chan, chan);
1337 int res;
1338
1339 unsigned long flags;
1340
1341 spin_lock_irqsave(&d40c->lock, flags);
1342
1343 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1344 if (res == 0) {
1345 if (d40c->log_num != D40_PHY_CHAN) {
1346 d40_config_set_event(d40c, false);
1347 /* Resume the other logical channels if any */
1348 if (d40_chan_has_events(d40c))
1349 res = d40_channel_execute_command(d40c,
1350 D40_DMA_RUN);
1351 }
1352 }
1353
1354 spin_unlock_irqrestore(&d40c->lock, flags);
1355 return res;
1356}
1357
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001358static bool d40_is_paused(struct d40_chan *d40c)
1359{
1360 bool is_paused = false;
1361 unsigned long flags;
1362 void __iomem *active_reg;
1363 u32 status;
1364 u32 event;
1365 int res;
1366
1367 spin_lock_irqsave(&d40c->lock, flags);
1368
1369 if (d40c->log_num == D40_PHY_CHAN) {
1370 if (d40c->phy_chan->num % 2 == 0)
1371 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1372 else
1373 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1374
1375 status = (readl(active_reg) &
1376 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1377 D40_CHAN_POS(d40c->phy_chan->num);
1378 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1379 is_paused = true;
1380
1381 goto _exit;
1382 }
1383
1384 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1385 if (res != 0)
1386 goto _exit;
1387
1388 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1389 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1390 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1391 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1392 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1393 else {
1394 dev_err(&d40c->chan.dev->device,
1395 "[%s] Unknown direction\n", __func__);
1396 goto _exit;
1397 }
1398 status = d40_chan_has_events(d40c);
1399 status = (status & D40_EVENTLINE_MASK(event)) >>
1400 D40_EVENTLINE_POS(event);
1401
1402 if (status != D40_DMA_RUN)
1403 is_paused = true;
1404
1405 /* Resume the other logical channels if any */
1406 if (d40_chan_has_events(d40c))
1407 res = d40_channel_execute_command(d40c,
1408 D40_DMA_RUN);
1409
1410_exit:
1411 spin_unlock_irqrestore(&d40c->lock, flags);
1412 return is_paused;
1413
1414}
1415
1416
Linus Walleij8d318a52010-03-30 15:33:42 +02001417static bool d40_tx_is_linked(struct d40_chan *d40c)
1418{
1419 bool is_link;
1420
1421 if (d40c->log_num != D40_PHY_CHAN)
1422 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1423 else
1424 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1425 d40c->phy_chan->num * D40_DREG_PCDELTA +
1426 D40_CHAN_REG_SDLNK) &
1427 D40_SREG_LNK_PHYS_LNK_MASK;
1428 return is_link;
1429}
1430
1431static u32 d40_residue(struct d40_chan *d40c)
1432{
1433 u32 num_elt;
1434
1435 if (d40c->log_num != D40_PHY_CHAN)
1436 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1437 >> D40_MEM_LCSP2_ECNT_POS;
1438 else
1439 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1440 d40c->phy_chan->num * D40_DREG_PCDELTA +
1441 D40_CHAN_REG_SDELT) &
1442 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1443 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1444}
1445
1446static int d40_resume(struct dma_chan *chan)
1447{
1448 struct d40_chan *d40c =
1449 container_of(chan, struct d40_chan, chan);
1450 int res = 0;
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&d40c->lock, flags);
1454
1455 if (d40c->log_num != D40_PHY_CHAN) {
1456 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1457 if (res)
1458 goto out;
1459
1460 /* If bytes left to transfer or linked tx resume job */
1461 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1462 d40_config_set_event(d40c, true);
1463 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1464 }
1465 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1466 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1467
1468out:
1469 spin_unlock_irqrestore(&d40c->lock, flags);
1470 return res;
1471}
1472
1473static u32 stedma40_residue(struct dma_chan *chan)
1474{
1475 struct d40_chan *d40c =
1476 container_of(chan, struct d40_chan, chan);
1477 u32 bytes_left;
1478 unsigned long flags;
1479
1480 spin_lock_irqsave(&d40c->lock, flags);
1481 bytes_left = d40_residue(d40c);
1482 spin_unlock_irqrestore(&d40c->lock, flags);
1483
1484 return bytes_left;
1485}
1486
1487/* Public DMA functions in addition to the DMA engine framework */
1488
1489int stedma40_set_psize(struct dma_chan *chan,
1490 int src_psize,
1491 int dst_psize)
1492{
1493 struct d40_chan *d40c =
1494 container_of(chan, struct d40_chan, chan);
1495 unsigned long flags;
1496
1497 spin_lock_irqsave(&d40c->lock, flags);
1498
1499 if (d40c->log_num != D40_PHY_CHAN) {
1500 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1501 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1502 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1503 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1504 goto out;
1505 }
1506
1507 if (src_psize == STEDMA40_PSIZE_PHY_1)
1508 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1509 else {
1510 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1511 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1512 D40_SREG_CFG_PSIZE_POS);
1513 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1514 }
1515
1516 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1517 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1518 else {
1519 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1520 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1521 D40_SREG_CFG_PSIZE_POS);
1522 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1523 }
1524out:
1525 spin_unlock_irqrestore(&d40c->lock, flags);
1526 return 0;
1527}
1528EXPORT_SYMBOL(stedma40_set_psize);
1529
1530struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1531 struct scatterlist *sgl_dst,
1532 struct scatterlist *sgl_src,
1533 unsigned int sgl_len,
1534 unsigned long flags)
1535{
1536 int res;
1537 struct d40_desc *d40d;
1538 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1539 chan);
1540 unsigned long flg;
Linus Walleij8d318a52010-03-30 15:33:42 +02001541
1542
1543 spin_lock_irqsave(&d40c->lock, flg);
1544 d40d = d40_desc_get(d40c);
1545
1546 if (d40d == NULL)
1547 goto err;
1548
1549 memset(d40d, 0, sizeof(struct d40_desc));
1550 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001551 d40d->lli_tx_len = d40d->lli_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001552 d40d->txd.flags = flags;
1553
1554 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001555 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1556 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1557
Linus Walleij8d318a52010-03-30 15:33:42 +02001558 if (sgl_len > 1)
1559 /*
1560 * Check if there is space available in lcla. If not,
1561 * split list into 1-length and run only in lcpa
1562 * space.
1563 */
1564 if (d40_lcla_id_get(d40c,
1565 &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001566 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001567
1568 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1569 dev_err(&d40c->chan.dev->device,
1570 "[%s] Out of memory\n", __func__);
1571 goto err;
1572 }
1573
1574 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1575 sgl_src,
1576 sgl_len,
1577 d40d->lli_log.src,
1578 d40c->log_def.lcsp1,
1579 d40c->dma_cfg.src_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001580 flags & DMA_PREP_INTERRUPT,
1581 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001582 d40c->base->plat_data->llis_per_log);
1583
1584 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1585 sgl_dst,
1586 sgl_len,
1587 d40d->lli_log.dst,
1588 d40c->log_def.lcsp3,
1589 d40c->dma_cfg.dst_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001590 flags & DMA_PREP_INTERRUPT,
1591 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001592 d40c->base->plat_data->llis_per_log);
1593
1594
1595 } else {
1596 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1597 dev_err(&d40c->chan.dev->device,
1598 "[%s] Out of memory\n", __func__);
1599 goto err;
1600 }
1601
1602 res = d40_phy_sg_to_lli(sgl_src,
1603 sgl_len,
1604 0,
1605 d40d->lli_phy.src,
1606 d40d->lli_phy.src_addr,
1607 d40c->src_def_cfg,
1608 d40c->dma_cfg.src_info.data_width,
1609 d40c->dma_cfg.src_info.psize,
1610 true);
1611
1612 if (res < 0)
1613 goto err;
1614
1615 res = d40_phy_sg_to_lli(sgl_dst,
1616 sgl_len,
1617 0,
1618 d40d->lli_phy.dst,
1619 d40d->lli_phy.dst_addr,
1620 d40c->dst_def_cfg,
1621 d40c->dma_cfg.dst_info.data_width,
1622 d40c->dma_cfg.dst_info.psize,
1623 true);
1624
1625 if (res < 0)
1626 goto err;
1627
1628 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1629 d40d->lli_pool.size, DMA_TO_DEVICE);
1630 }
1631
1632 dma_async_tx_descriptor_init(&d40d->txd, chan);
1633
1634 d40d->txd.tx_submit = d40_tx_submit;
1635
1636 spin_unlock_irqrestore(&d40c->lock, flg);
1637
1638 return &d40d->txd;
1639err:
1640 spin_unlock_irqrestore(&d40c->lock, flg);
1641 return NULL;
1642}
1643EXPORT_SYMBOL(stedma40_memcpy_sg);
1644
1645bool stedma40_filter(struct dma_chan *chan, void *data)
1646{
1647 struct stedma40_chan_cfg *info = data;
1648 struct d40_chan *d40c =
1649 container_of(chan, struct d40_chan, chan);
1650 int err;
1651
1652 if (data) {
1653 err = d40_validate_conf(d40c, info);
1654 if (!err)
1655 d40c->dma_cfg = *info;
1656 } else
1657 err = d40_config_memcpy(d40c);
1658
1659 return err == 0;
1660}
1661EXPORT_SYMBOL(stedma40_filter);
1662
1663/* DMA ENGINE functions */
1664static int d40_alloc_chan_resources(struct dma_chan *chan)
1665{
1666 int err;
1667 unsigned long flags;
1668 struct d40_chan *d40c =
1669 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001670 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001671 spin_lock_irqsave(&d40c->lock, flags);
1672
1673 d40c->completed = chan->cookie = 1;
1674
1675 /*
1676 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001677 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001678 */
1679 if (d40c->dma_cfg.channel_type == 0) {
1680 err = d40_config_memcpy(d40c);
1681 if (err)
1682 goto err_alloc;
1683 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001684 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001685
1686 err = d40_allocate_channel(d40c);
1687 if (err) {
1688 dev_err(&d40c->chan.dev->device,
1689 "[%s] Failed to allocate channel\n", __func__);
1690 goto err_alloc;
1691 }
1692
Linus Walleijef1872e2010-06-20 21:24:52 +00001693 /* Fill in basic CFG register values */
1694 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1695 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1696
1697 if (d40c->log_num != D40_PHY_CHAN) {
1698 d40_log_cfg(&d40c->dma_cfg,
1699 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1700
1701 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1702 d40c->lcpa = d40c->base->lcpa_base +
1703 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1704 else
1705 d40c->lcpa = d40c->base->lcpa_base +
1706 d40c->dma_cfg.dst_dev_type *
1707 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1708 }
1709
1710 /*
1711 * Only write channel configuration to the DMA if the physical
1712 * resource is free. In case of multiple logical channels
1713 * on the same physical resource, only the first write is necessary.
1714 */
1715 if (is_free_phy) {
1716 err = d40_config_write(d40c);
1717 if (err) {
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to configure channel\n",
1720 __func__);
1721 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001722 }
1723
1724 spin_unlock_irqrestore(&d40c->lock, flags);
1725 return 0;
1726
1727 err_config:
1728 (void) d40_free_dma(d40c);
1729 err_alloc:
1730 spin_unlock_irqrestore(&d40c->lock, flags);
1731 dev_err(&d40c->chan.dev->device,
1732 "[%s] Channel allocation failed\n", __func__);
1733 return -EINVAL;
1734}
1735
1736static void d40_free_chan_resources(struct dma_chan *chan)
1737{
1738 struct d40_chan *d40c =
1739 container_of(chan, struct d40_chan, chan);
1740 int err;
1741 unsigned long flags;
1742
1743 spin_lock_irqsave(&d40c->lock, flags);
1744
1745 err = d40_free_dma(d40c);
1746
1747 if (err)
1748 dev_err(&d40c->chan.dev->device,
1749 "[%s] Failed to free channel\n", __func__);
1750 spin_unlock_irqrestore(&d40c->lock, flags);
1751}
1752
1753static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1754 dma_addr_t dst,
1755 dma_addr_t src,
1756 size_t size,
1757 unsigned long flags)
1758{
1759 struct d40_desc *d40d;
1760 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1761 chan);
1762 unsigned long flg;
1763 int err = 0;
1764
1765 spin_lock_irqsave(&d40c->lock, flg);
1766 d40d = d40_desc_get(d40c);
1767
1768 if (d40d == NULL) {
1769 dev_err(&d40c->chan.dev->device,
1770 "[%s] Descriptor is NULL\n", __func__);
1771 goto err;
1772 }
1773
1774 memset(d40d, 0, sizeof(struct d40_desc));
1775
1776 d40d->txd.flags = flags;
1777
1778 dma_async_tx_descriptor_init(&d40d->txd, chan);
1779
1780 d40d->txd.tx_submit = d40_tx_submit;
1781
1782 if (d40c->log_num != D40_PHY_CHAN) {
1783
1784 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1785 dev_err(&d40c->chan.dev->device,
1786 "[%s] Out of memory\n", __func__);
1787 goto err;
1788 }
1789 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001790 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001791
1792 d40_log_fill_lli(d40d->lli_log.src,
1793 src,
1794 size,
1795 0,
1796 d40c->log_def.lcsp1,
1797 d40c->dma_cfg.src_info.data_width,
1798 true, true);
1799
1800 d40_log_fill_lli(d40d->lli_log.dst,
1801 dst,
1802 size,
1803 0,
1804 d40c->log_def.lcsp3,
1805 d40c->dma_cfg.dst_info.data_width,
1806 true, true);
1807
1808 } else {
1809
1810 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1811 dev_err(&d40c->chan.dev->device,
1812 "[%s] Out of memory\n", __func__);
1813 goto err;
1814 }
1815
1816 err = d40_phy_fill_lli(d40d->lli_phy.src,
1817 src,
1818 size,
1819 d40c->dma_cfg.src_info.psize,
1820 0,
1821 d40c->src_def_cfg,
1822 true,
1823 d40c->dma_cfg.src_info.data_width,
1824 false);
1825 if (err)
1826 goto err_fill_lli;
1827
1828 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1829 dst,
1830 size,
1831 d40c->dma_cfg.dst_info.psize,
1832 0,
1833 d40c->dst_def_cfg,
1834 true,
1835 d40c->dma_cfg.dst_info.data_width,
1836 false);
1837
1838 if (err)
1839 goto err_fill_lli;
1840
1841 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1842 d40d->lli_pool.size, DMA_TO_DEVICE);
1843 }
1844
1845 spin_unlock_irqrestore(&d40c->lock, flg);
1846 return &d40d->txd;
1847
1848err_fill_lli:
1849 dev_err(&d40c->chan.dev->device,
1850 "[%s] Failed filling in PHY LLI\n", __func__);
1851 d40_pool_lli_free(d40d);
1852err:
1853 spin_unlock_irqrestore(&d40c->lock, flg);
1854 return NULL;
1855}
1856
1857static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1858 struct d40_chan *d40c,
1859 struct scatterlist *sgl,
1860 unsigned int sg_len,
1861 enum dma_data_direction direction,
1862 unsigned long flags)
1863{
1864 dma_addr_t dev_addr = 0;
1865 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001866
1867 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1868 dev_err(&d40c->chan.dev->device,
1869 "[%s] Out of memory\n", __func__);
1870 return -ENOMEM;
1871 }
1872
1873 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001874 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1875 d40d->lli_tx_len = d40d->lli_len;
1876 else
1877 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001878
1879 if (sg_len > 1)
1880 /*
1881 * Check if there is space available in lcla.
1882 * If not, split list into 1-length and run only
1883 * in lcpa space.
1884 */
1885 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001886 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001887
1888 if (direction == DMA_FROM_DEVICE) {
1889 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1890 total_size = d40_log_sg_to_dev(&d40c->lcla,
1891 sgl, sg_len,
1892 &d40d->lli_log,
1893 &d40c->log_def,
1894 d40c->dma_cfg.src_info.data_width,
1895 d40c->dma_cfg.dst_info.data_width,
1896 direction,
1897 flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001898 dev_addr, d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001899 d40c->base->plat_data->llis_per_log);
1900 } else if (direction == DMA_TO_DEVICE) {
1901 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1902 total_size = d40_log_sg_to_dev(&d40c->lcla,
1903 sgl, sg_len,
1904 &d40d->lli_log,
1905 &d40c->log_def,
1906 d40c->dma_cfg.src_info.data_width,
1907 d40c->dma_cfg.dst_info.data_width,
1908 direction,
1909 flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001910 dev_addr, d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001911 d40c->base->plat_data->llis_per_log);
1912 } else
1913 return -EINVAL;
1914 if (total_size < 0)
1915 return -EINVAL;
1916
1917 return 0;
1918}
1919
1920static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1921 struct d40_chan *d40c,
1922 struct scatterlist *sgl,
1923 unsigned int sgl_len,
1924 enum dma_data_direction direction,
1925 unsigned long flags)
1926{
1927 dma_addr_t src_dev_addr;
1928 dma_addr_t dst_dev_addr;
1929 int res;
1930
1931 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1932 dev_err(&d40c->chan.dev->device,
1933 "[%s] Out of memory\n", __func__);
1934 return -ENOMEM;
1935 }
1936
1937 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001938 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001939
1940 if (direction == DMA_FROM_DEVICE) {
1941 dst_dev_addr = 0;
1942 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1943 } else if (direction == DMA_TO_DEVICE) {
1944 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1945 src_dev_addr = 0;
1946 } else
1947 return -EINVAL;
1948
1949 res = d40_phy_sg_to_lli(sgl,
1950 sgl_len,
1951 src_dev_addr,
1952 d40d->lli_phy.src,
1953 d40d->lli_phy.src_addr,
1954 d40c->src_def_cfg,
1955 d40c->dma_cfg.src_info.data_width,
1956 d40c->dma_cfg.src_info.psize,
1957 true);
1958 if (res < 0)
1959 return res;
1960
1961 res = d40_phy_sg_to_lli(sgl,
1962 sgl_len,
1963 dst_dev_addr,
1964 d40d->lli_phy.dst,
1965 d40d->lli_phy.dst_addr,
1966 d40c->dst_def_cfg,
1967 d40c->dma_cfg.dst_info.data_width,
1968 d40c->dma_cfg.dst_info.psize,
1969 true);
1970 if (res < 0)
1971 return res;
1972
1973 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1974 d40d->lli_pool.size, DMA_TO_DEVICE);
1975 return 0;
1976}
1977
1978static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1979 struct scatterlist *sgl,
1980 unsigned int sg_len,
1981 enum dma_data_direction direction,
1982 unsigned long flags)
1983{
1984 struct d40_desc *d40d;
1985 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1986 chan);
1987 unsigned long flg;
1988 int err;
1989
1990 if (d40c->dma_cfg.pre_transfer)
1991 d40c->dma_cfg.pre_transfer(chan,
1992 d40c->dma_cfg.pre_transfer_data,
1993 sg_dma_len(sgl));
1994
1995 spin_lock_irqsave(&d40c->lock, flg);
1996 d40d = d40_desc_get(d40c);
1997 spin_unlock_irqrestore(&d40c->lock, flg);
1998
1999 if (d40d == NULL)
2000 return NULL;
2001
2002 memset(d40d, 0, sizeof(struct d40_desc));
2003
2004 if (d40c->log_num != D40_PHY_CHAN)
2005 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2006 direction, flags);
2007 else
2008 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2009 direction, flags);
2010 if (err) {
2011 dev_err(&d40c->chan.dev->device,
2012 "[%s] Failed to prepare %s slave sg job: %d\n",
2013 __func__,
2014 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2015 return NULL;
2016 }
2017
2018 d40d->txd.flags = flags;
2019
2020 dma_async_tx_descriptor_init(&d40d->txd, chan);
2021
2022 d40d->txd.tx_submit = d40_tx_submit;
2023
2024 return &d40d->txd;
2025}
2026
2027static enum dma_status d40_tx_status(struct dma_chan *chan,
2028 dma_cookie_t cookie,
2029 struct dma_tx_state *txstate)
2030{
2031 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2032 dma_cookie_t last_used;
2033 dma_cookie_t last_complete;
2034 int ret;
2035
2036 last_complete = d40c->completed;
2037 last_used = chan->cookie;
2038
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002039 if (d40_is_paused(d40c))
2040 ret = DMA_PAUSED;
2041 else
2042 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002043
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002044 dma_set_tx_state(txstate, last_complete, last_used,
2045 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002046
2047 return ret;
2048}
2049
2050static void d40_issue_pending(struct dma_chan *chan)
2051{
2052 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&d40c->lock, flags);
2056
2057 /* Busy means that pending jobs are already being processed */
2058 if (!d40c->busy)
2059 (void) d40_queue_start(d40c);
2060
2061 spin_unlock_irqrestore(&d40c->lock, flags);
2062}
2063
Linus Walleij05827632010-05-17 16:30:42 -07002064static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2065 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002066{
2067 unsigned long flags;
2068 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2069
2070 switch (cmd) {
2071 case DMA_TERMINATE_ALL:
2072 spin_lock_irqsave(&d40c->lock, flags);
2073 d40_term_all(d40c);
2074 spin_unlock_irqrestore(&d40c->lock, flags);
2075 return 0;
2076 case DMA_PAUSE:
2077 return d40_pause(chan);
2078 case DMA_RESUME:
2079 return d40_resume(chan);
2080 }
2081
2082 /* Other commands are unimplemented */
2083 return -ENXIO;
2084}
2085
2086/* Initialization functions */
2087
2088static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2089 struct d40_chan *chans, int offset,
2090 int num_chans)
2091{
2092 int i = 0;
2093 struct d40_chan *d40c;
2094
2095 INIT_LIST_HEAD(&dma->channels);
2096
2097 for (i = offset; i < offset + num_chans; i++) {
2098 d40c = &chans[i];
2099 d40c->base = base;
2100 d40c->chan.device = dma;
2101
2102 /* Invalidate lcla element */
2103 d40c->lcla.src_id = -1;
2104 d40c->lcla.dst_id = -1;
2105
2106 spin_lock_init(&d40c->lock);
2107
2108 d40c->log_num = D40_PHY_CHAN;
2109
2110 INIT_LIST_HEAD(&d40c->free);
2111 INIT_LIST_HEAD(&d40c->active);
2112 INIT_LIST_HEAD(&d40c->queue);
2113 INIT_LIST_HEAD(&d40c->client);
2114
2115 d40c->free_len = 0;
2116
2117 tasklet_init(&d40c->tasklet, dma_tasklet,
2118 (unsigned long) d40c);
2119
2120 list_add_tail(&d40c->chan.device_node,
2121 &dma->channels);
2122 }
2123}
2124
2125static int __init d40_dmaengine_init(struct d40_base *base,
2126 int num_reserved_chans)
2127{
2128 int err ;
2129
2130 d40_chan_init(base, &base->dma_slave, base->log_chans,
2131 0, base->num_log_chans);
2132
2133 dma_cap_zero(base->dma_slave.cap_mask);
2134 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2135
2136 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2137 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2138 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2139 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2140 base->dma_slave.device_tx_status = d40_tx_status;
2141 base->dma_slave.device_issue_pending = d40_issue_pending;
2142 base->dma_slave.device_control = d40_control;
2143 base->dma_slave.dev = base->dev;
2144
2145 err = dma_async_device_register(&base->dma_slave);
2146
2147 if (err) {
2148 dev_err(base->dev,
2149 "[%s] Failed to register slave channels\n",
2150 __func__);
2151 goto failure1;
2152 }
2153
2154 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2155 base->num_log_chans, base->plat_data->memcpy_len);
2156
2157 dma_cap_zero(base->dma_memcpy.cap_mask);
2158 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2159
2160 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2161 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2162 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2163 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2164 base->dma_memcpy.device_tx_status = d40_tx_status;
2165 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2166 base->dma_memcpy.device_control = d40_control;
2167 base->dma_memcpy.dev = base->dev;
2168 /*
2169 * This controller can only access address at even
2170 * 32bit boundaries, i.e. 2^2
2171 */
2172 base->dma_memcpy.copy_align = 2;
2173
2174 err = dma_async_device_register(&base->dma_memcpy);
2175
2176 if (err) {
2177 dev_err(base->dev,
2178 "[%s] Failed to regsiter memcpy only channels\n",
2179 __func__);
2180 goto failure2;
2181 }
2182
2183 d40_chan_init(base, &base->dma_both, base->phy_chans,
2184 0, num_reserved_chans);
2185
2186 dma_cap_zero(base->dma_both.cap_mask);
2187 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2188 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2189
2190 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2191 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2192 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2193 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2194 base->dma_both.device_tx_status = d40_tx_status;
2195 base->dma_both.device_issue_pending = d40_issue_pending;
2196 base->dma_both.device_control = d40_control;
2197 base->dma_both.dev = base->dev;
2198 base->dma_both.copy_align = 2;
2199 err = dma_async_device_register(&base->dma_both);
2200
2201 if (err) {
2202 dev_err(base->dev,
2203 "[%s] Failed to register logical and physical capable channels\n",
2204 __func__);
2205 goto failure3;
2206 }
2207 return 0;
2208failure3:
2209 dma_async_device_unregister(&base->dma_memcpy);
2210failure2:
2211 dma_async_device_unregister(&base->dma_slave);
2212failure1:
2213 return err;
2214}
2215
2216/* Initialization functions. */
2217
2218static int __init d40_phy_res_init(struct d40_base *base)
2219{
2220 int i;
2221 int num_phy_chans_avail = 0;
2222 u32 val[2];
2223 int odd_even_bit = -2;
2224
2225 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2226 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2227
2228 for (i = 0; i < base->num_phy_chans; i++) {
2229 base->phy_res[i].num = i;
2230 odd_even_bit += 2 * ((i % 2) == 0);
2231 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2232 /* Mark security only channels as occupied */
2233 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2234 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2235 } else {
2236 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2237 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2238 num_phy_chans_avail++;
2239 }
2240 spin_lock_init(&base->phy_res[i].lock);
2241 }
2242 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2243 num_phy_chans_avail, base->num_phy_chans);
2244
2245 /* Verify settings extended vs standard */
2246 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2247
2248 for (i = 0; i < base->num_phy_chans; i++) {
2249
2250 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2251 (val[0] & 0x3) != 1)
2252 dev_info(base->dev,
2253 "[%s] INFO: channel %d is misconfigured (%d)\n",
2254 __func__, i, val[0] & 0x3);
2255
2256 val[0] = val[0] >> 2;
2257 }
2258
2259 return num_phy_chans_avail;
2260}
2261
2262static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2263{
2264 static const struct d40_reg_val dma_id_regs[] = {
2265 /* Peripheral Id */
2266 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2267 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2268 /*
2269 * D40_DREG_PERIPHID2 Depends on HW revision:
2270 * MOP500/HREF ED has 0x0008,
2271 * ? has 0x0018,
2272 * HREF V1 has 0x0028
2273 */
2274 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2275
2276 /* PCell Id */
2277 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2278 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2279 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2280 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2281 };
2282 struct stedma40_platform_data *plat_data;
2283 struct clk *clk = NULL;
2284 void __iomem *virtbase = NULL;
2285 struct resource *res = NULL;
2286 struct d40_base *base = NULL;
2287 int num_log_chans = 0;
2288 int num_phy_chans;
2289 int i;
2290
2291 clk = clk_get(&pdev->dev, NULL);
2292
2293 if (IS_ERR(clk)) {
2294 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2295 __func__);
2296 goto failure;
2297 }
2298
2299 clk_enable(clk);
2300
2301 /* Get IO for DMAC base address */
2302 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2303 if (!res)
2304 goto failure;
2305
2306 if (request_mem_region(res->start, resource_size(res),
2307 D40_NAME " I/O base") == NULL)
2308 goto failure;
2309
2310 virtbase = ioremap(res->start, resource_size(res));
2311 if (!virtbase)
2312 goto failure;
2313
2314 /* HW version check */
2315 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2316 if (dma_id_regs[i].val !=
2317 readl(virtbase + dma_id_regs[i].reg)) {
2318 dev_err(&pdev->dev,
2319 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2320 __func__,
2321 dma_id_regs[i].val,
2322 dma_id_regs[i].reg,
2323 readl(virtbase + dma_id_regs[i].reg));
2324 goto failure;
2325 }
2326 }
2327
2328 i = readl(virtbase + D40_DREG_PERIPHID2);
2329
2330 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2331 dev_err(&pdev->dev,
2332 "[%s] Unknown designer! Got %x wanted %x\n",
2333 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2334 goto failure;
2335 }
2336
2337 /* The number of physical channels on this HW */
2338 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2339
2340 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2341 (i >> 4) & 0xf, res->start);
2342
2343 plat_data = pdev->dev.platform_data;
2344
2345 /* Count the number of logical channels in use */
2346 for (i = 0; i < plat_data->dev_len; i++)
2347 if (plat_data->dev_rx[i] != 0)
2348 num_log_chans++;
2349
2350 for (i = 0; i < plat_data->dev_len; i++)
2351 if (plat_data->dev_tx[i] != 0)
2352 num_log_chans++;
2353
2354 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2355 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2356 sizeof(struct d40_chan), GFP_KERNEL);
2357
2358 if (base == NULL) {
2359 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2360 goto failure;
2361 }
2362
2363 base->clk = clk;
2364 base->num_phy_chans = num_phy_chans;
2365 base->num_log_chans = num_log_chans;
2366 base->phy_start = res->start;
2367 base->phy_size = resource_size(res);
2368 base->virtbase = virtbase;
2369 base->plat_data = plat_data;
2370 base->dev = &pdev->dev;
2371 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2372 base->log_chans = &base->phy_chans[num_phy_chans];
2373
2374 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2375 GFP_KERNEL);
2376 if (!base->phy_res)
2377 goto failure;
2378
2379 base->lookup_phy_chans = kzalloc(num_phy_chans *
2380 sizeof(struct d40_chan *),
2381 GFP_KERNEL);
2382 if (!base->lookup_phy_chans)
2383 goto failure;
2384
2385 if (num_log_chans + plat_data->memcpy_len) {
2386 /*
2387 * The max number of logical channels are event lines for all
2388 * src devices and dst devices
2389 */
2390 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2391 sizeof(struct d40_chan *),
2392 GFP_KERNEL);
2393 if (!base->lookup_log_chans)
2394 goto failure;
2395 }
2396 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2397 GFP_KERNEL);
2398 if (!base->lcla_pool.alloc_map)
2399 goto failure;
2400
2401 return base;
2402
2403failure:
2404 if (clk) {
2405 clk_disable(clk);
2406 clk_put(clk);
2407 }
2408 if (virtbase)
2409 iounmap(virtbase);
2410 if (res)
2411 release_mem_region(res->start,
2412 resource_size(res));
2413 if (virtbase)
2414 iounmap(virtbase);
2415
2416 if (base) {
2417 kfree(base->lcla_pool.alloc_map);
2418 kfree(base->lookup_log_chans);
2419 kfree(base->lookup_phy_chans);
2420 kfree(base->phy_res);
2421 kfree(base);
2422 }
2423
2424 return NULL;
2425}
2426
2427static void __init d40_hw_init(struct d40_base *base)
2428{
2429
2430 static const struct d40_reg_val dma_init_reg[] = {
2431 /* Clock every part of the DMA block from start */
2432 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2433
2434 /* Interrupts on all logical channels */
2435 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2436 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2437 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2438 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2439 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2440 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2441 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2442 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2447 };
2448 int i;
2449 u32 prmseo[2] = {0, 0};
2450 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2451 u32 pcmis = 0;
2452 u32 pcicr = 0;
2453
2454 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2455 writel(dma_init_reg[i].val,
2456 base->virtbase + dma_init_reg[i].reg);
2457
2458 /* Configure all our dma channels to default settings */
2459 for (i = 0; i < base->num_phy_chans; i++) {
2460
2461 activeo[i % 2] = activeo[i % 2] << 2;
2462
2463 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2464 == D40_ALLOC_PHY) {
2465 activeo[i % 2] |= 3;
2466 continue;
2467 }
2468
2469 /* Enable interrupt # */
2470 pcmis = (pcmis << 1) | 1;
2471
2472 /* Clear interrupt # */
2473 pcicr = (pcicr << 1) | 1;
2474
2475 /* Set channel to physical mode */
2476 prmseo[i % 2] = prmseo[i % 2] << 2;
2477 prmseo[i % 2] |= 1;
2478
2479 }
2480
2481 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2482 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2483 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2484 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2485
2486 /* Write which interrupt to enable */
2487 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2488
2489 /* Write which interrupt to clear */
2490 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2491
2492}
2493
2494static int __init d40_probe(struct platform_device *pdev)
2495{
2496 int err;
2497 int ret = -ENOENT;
2498 struct d40_base *base;
2499 struct resource *res = NULL;
2500 int num_reserved_chans;
2501 u32 val;
2502
2503 base = d40_hw_detect_init(pdev);
2504
2505 if (!base)
2506 goto failure;
2507
2508 num_reserved_chans = d40_phy_res_init(base);
2509
2510 platform_set_drvdata(pdev, base);
2511
2512 spin_lock_init(&base->interrupt_lock);
2513 spin_lock_init(&base->execmd_lock);
2514
2515 /* Get IO for logical channel parameter address */
2516 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2517 if (!res) {
2518 ret = -ENOENT;
2519 dev_err(&pdev->dev,
2520 "[%s] No \"lcpa\" memory resource\n",
2521 __func__);
2522 goto failure;
2523 }
2524 base->lcpa_size = resource_size(res);
2525 base->phy_lcpa = res->start;
2526
2527 if (request_mem_region(res->start, resource_size(res),
2528 D40_NAME " I/O lcpa") == NULL) {
2529 ret = -EBUSY;
2530 dev_err(&pdev->dev,
2531 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2532 __func__, res->start, res->end);
2533 goto failure;
2534 }
2535
2536 /* We make use of ESRAM memory for this. */
2537 val = readl(base->virtbase + D40_DREG_LCPA);
2538 if (res->start != val && val != 0) {
2539 dev_warn(&pdev->dev,
2540 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2541 __func__, val, res->start);
2542 } else
2543 writel(res->start, base->virtbase + D40_DREG_LCPA);
2544
2545 base->lcpa_base = ioremap(res->start, resource_size(res));
2546 if (!base->lcpa_base) {
2547 ret = -ENOMEM;
2548 dev_err(&pdev->dev,
2549 "[%s] Failed to ioremap LCPA region\n",
2550 __func__);
2551 goto failure;
2552 }
2553 /* Get IO for logical channel link address */
2554 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2555 if (!res) {
2556 ret = -ENOENT;
2557 dev_err(&pdev->dev,
2558 "[%s] No \"lcla\" resource defined\n",
2559 __func__);
2560 goto failure;
2561 }
2562
2563 base->lcla_pool.base_size = resource_size(res);
2564 base->lcla_pool.phy = res->start;
2565
2566 if (request_mem_region(res->start, resource_size(res),
2567 D40_NAME " I/O lcla") == NULL) {
2568 ret = -EBUSY;
2569 dev_err(&pdev->dev,
2570 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2571 __func__, res->start, res->end);
2572 goto failure;
2573 }
2574 val = readl(base->virtbase + D40_DREG_LCLA);
2575 if (res->start != val && val != 0) {
2576 dev_warn(&pdev->dev,
2577 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2578 __func__, val, res->start);
2579 } else
2580 writel(res->start, base->virtbase + D40_DREG_LCLA);
2581
2582 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2583 if (!base->lcla_pool.base) {
2584 ret = -ENOMEM;
2585 dev_err(&pdev->dev,
2586 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2587 __func__, res->start, res->end);
2588 goto failure;
2589 }
2590
2591 spin_lock_init(&base->lcla_pool.lock);
2592
2593 base->lcla_pool.num_blocks = base->num_phy_chans;
2594
2595 base->irq = platform_get_irq(pdev, 0);
2596
2597 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2598
2599 if (ret) {
2600 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2601 goto failure;
2602 }
2603
2604 err = d40_dmaengine_init(base, num_reserved_chans);
2605 if (err)
2606 goto failure;
2607
2608 d40_hw_init(base);
2609
2610 dev_info(base->dev, "initialized\n");
2611 return 0;
2612
2613failure:
2614 if (base) {
2615 if (base->virtbase)
2616 iounmap(base->virtbase);
2617 if (base->lcla_pool.phy)
2618 release_mem_region(base->lcla_pool.phy,
2619 base->lcla_pool.base_size);
2620 if (base->phy_lcpa)
2621 release_mem_region(base->phy_lcpa,
2622 base->lcpa_size);
2623 if (base->phy_start)
2624 release_mem_region(base->phy_start,
2625 base->phy_size);
2626 if (base->clk) {
2627 clk_disable(base->clk);
2628 clk_put(base->clk);
2629 }
2630
2631 kfree(base->lcla_pool.alloc_map);
2632 kfree(base->lookup_log_chans);
2633 kfree(base->lookup_phy_chans);
2634 kfree(base->phy_res);
2635 kfree(base);
2636 }
2637
2638 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2639 return ret;
2640}
2641
2642static struct platform_driver d40_driver = {
2643 .driver = {
2644 .owner = THIS_MODULE,
2645 .name = D40_NAME,
2646 },
2647};
2648
2649int __init stedma40_init(void)
2650{
2651 return platform_driver_probe(&d40_driver, d40_probe);
2652}
2653arch_initcall(stedma40_init);