blob: a2a252db024bdd6e9a354988187a5209f4b59f33 [file] [log] [blame]
Colin Cross4de3a8f2010-04-05 13:16:42 -07001/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <mach/dma.h>
31#include <mach/irqs.h>
32#include <mach/iomap.h>
Colin Cross2ea67fd2010-10-04 08:49:49 -070033#include <mach/suspend.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070034
35#define APB_DMA_GEN 0x000
36#define GEN_ENABLE (1<<31)
37
38#define APB_DMA_CNTRL 0x010
39
40#define APB_DMA_IRQ_MASK 0x01c
41
42#define APB_DMA_IRQ_MASK_SET 0x020
43
44#define APB_DMA_CHAN_CSR 0x000
45#define CSR_ENB (1<<31)
46#define CSR_IE_EOC (1<<30)
47#define CSR_HOLD (1<<29)
48#define CSR_DIR (1<<28)
49#define CSR_ONCE (1<<27)
50#define CSR_FLOW (1<<21)
51#define CSR_REQ_SEL_SHIFT 16
52#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
53#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
54#define CSR_WCOUNT_SHIFT 2
55#define CSR_WCOUNT_MASK 0xFFFC
56
57#define APB_DMA_CHAN_STA 0x004
58#define STA_BUSY (1<<31)
59#define STA_ISE_EOC (1<<30)
60#define STA_HALT (1<<29)
61#define STA_PING_PONG (1<<28)
62#define STA_COUNT_SHIFT 2
63#define STA_COUNT_MASK 0xFFFC
64
65#define APB_DMA_CHAN_AHB_PTR 0x010
66
67#define APB_DMA_CHAN_AHB_SEQ 0x014
68#define AHB_SEQ_INTR_ENB (1<<31)
69#define AHB_SEQ_BUS_WIDTH_SHIFT 28
70#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
71#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
72#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
73#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
76#define AHB_SEQ_DATA_SWAP (1<<27)
77#define AHB_SEQ_BURST_MASK (0x7<<24)
78#define AHB_SEQ_BURST_1 (4<<24)
79#define AHB_SEQ_BURST_4 (5<<24)
80#define AHB_SEQ_BURST_8 (6<<24)
81#define AHB_SEQ_DBL_BUF (1<<19)
82#define AHB_SEQ_WRAP_SHIFT 16
83#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
84
85#define APB_DMA_CHAN_APB_PTR 0x018
86
87#define APB_DMA_CHAN_APB_SEQ 0x01c
88#define APB_SEQ_BUS_WIDTH_SHIFT 28
89#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
90#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
91#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
92#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
95#define APB_SEQ_DATA_SWAP (1<<27)
96#define APB_SEQ_WRAP_SHIFT 16
97#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
98
99#define TEGRA_SYSTEM_DMA_CH_NR 16
100#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
101#define TEGRA_SYSTEM_DMA_CH_MIN 0
102#define TEGRA_SYSTEM_DMA_CH_MAX \
103 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
104
105#define NV_DMA_MAX_TRASFER_SIZE 0x10000
106
107const unsigned int ahb_addr_wrap_table[8] = {
108 0, 32, 64, 128, 256, 512, 1024, 2048
109};
110
111const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
112
113const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
114
115#define TEGRA_DMA_NAME_SIZE 16
116struct tegra_dma_channel {
117 struct list_head list;
118 int id;
119 spinlock_t lock;
120 char name[TEGRA_DMA_NAME_SIZE];
121 void __iomem *addr;
122 int mode;
123 int irq;
124
125 /* Register shadow */
126 u32 csr;
127 u32 ahb_seq;
128 u32 ahb_ptr;
129 u32 apb_seq;
130 u32 apb_ptr;
131};
132
133#define NV_DMA_MAX_CHANNELS 32
134
135static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
136static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
137
138static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
139 struct tegra_dma_req *req);
140static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
141 struct tegra_dma_req *req);
142static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
143static void tegra_dma_stop(struct tegra_dma_channel *ch);
144
145void tegra_dma_flush(struct tegra_dma_channel *ch)
146{
147}
148EXPORT_SYMBOL(tegra_dma_flush);
149
150void tegra_dma_dequeue(struct tegra_dma_channel *ch)
151{
152 struct tegra_dma_req *req;
153
154 req = list_entry(ch->list.next, typeof(*req), node);
155
156 tegra_dma_dequeue_req(ch, req);
157 return;
158}
159
160void tegra_dma_stop(struct tegra_dma_channel *ch)
161{
162 unsigned int csr;
163 unsigned int status;
164
165 csr = ch->csr;
166 csr &= ~CSR_IE_EOC;
167 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
168
169 csr &= ~CSR_ENB;
170 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
171
172 status = readl(ch->addr + APB_DMA_CHAN_STA);
173 if (status & STA_ISE_EOC)
174 writel(status, ch->addr + APB_DMA_CHAN_STA);
175}
176
177int tegra_dma_cancel(struct tegra_dma_channel *ch)
178{
179 unsigned int csr;
180 unsigned long irq_flags;
181
182 spin_lock_irqsave(&ch->lock, irq_flags);
183 while (!list_empty(&ch->list))
184 list_del(ch->list.next);
185
186 csr = ch->csr;
187 csr &= ~CSR_REQ_SEL_MASK;
188 csr |= CSR_REQ_SEL_INVALID;
189
190 /* Set the enable as that is not shadowed */
191 csr |= CSR_ENB;
192 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
193
194 tegra_dma_stop(ch);
195
196 spin_unlock_irqrestore(&ch->lock, irq_flags);
197 return 0;
198}
199
200int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
201 struct tegra_dma_req *_req)
202{
203 unsigned int csr;
204 unsigned int status;
205 struct tegra_dma_req *req = NULL;
206 int found = 0;
207 unsigned long irq_flags;
208 int to_transfer;
209 int req_transfer_count;
210
211 spin_lock_irqsave(&ch->lock, irq_flags);
212 list_for_each_entry(req, &ch->list, node) {
213 if (req == _req) {
214 list_del(&req->node);
215 found = 1;
216 break;
217 }
218 }
219 if (!found) {
220 spin_unlock_irqrestore(&ch->lock, irq_flags);
221 return 0;
222 }
223
224 /* STOP the DMA and get the transfer count.
225 * Getting the transfer count is tricky.
226 * - Change the source selector to invalid to stop the DMA from
227 * FIFO to memory.
228 * - Read the status register to know the number of pending
229 * bytes to be transfered.
230 * - Finally stop or program the DMA to the next buffer in the
231 * list.
232 */
233 csr = ch->csr;
234 csr &= ~CSR_REQ_SEL_MASK;
235 csr |= CSR_REQ_SEL_INVALID;
236
237 /* Set the enable as that is not shadowed */
238 csr |= CSR_ENB;
239 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
240
241 /* Get the transfer count */
242 status = readl(ch->addr + APB_DMA_CHAN_STA);
243 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
244 req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
245 req_transfer_count += 1;
246 to_transfer += 1;
247
248 req->bytes_transferred = req_transfer_count;
249
250 if (status & STA_BUSY)
251 req->bytes_transferred -= to_transfer;
252
253 /* In continous transfer mode, DMA only tracks the count of the
254 * half DMA buffer. So, if the DMA already finished half the DMA
255 * then add the half buffer to the completed count.
256 *
257 * FIXME: There can be a race here. What if the req to
258 * dequue happens at the same time as the DMA just moved to
259 * the new buffer and SW didn't yet received the interrupt?
260 */
261 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
262 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
263 req->bytes_transferred += req_transfer_count;
264
265 req->bytes_transferred *= 4;
266
267 tegra_dma_stop(ch);
268 if (!list_empty(&ch->list)) {
269 /* if the list is not empty, queue the next request */
270 struct tegra_dma_req *next_req;
271 next_req = list_entry(ch->list.next,
272 typeof(*next_req), node);
273 tegra_dma_update_hw(ch, next_req);
274 }
275 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
276
277 spin_unlock_irqrestore(&ch->lock, irq_flags);
278
279 /* Callback should be called without any lock */
280 req->complete(req);
281 return 0;
282}
283EXPORT_SYMBOL(tegra_dma_dequeue_req);
284
285bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
286{
287 unsigned long irq_flags;
288 bool is_empty;
289
290 spin_lock_irqsave(&ch->lock, irq_flags);
291 if (list_empty(&ch->list))
292 is_empty = true;
293 else
294 is_empty = false;
295 spin_unlock_irqrestore(&ch->lock, irq_flags);
296 return is_empty;
297}
298EXPORT_SYMBOL(tegra_dma_is_empty);
299
300bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
301 struct tegra_dma_req *_req)
302{
303 unsigned long irq_flags;
304 struct tegra_dma_req *req;
305
306 spin_lock_irqsave(&ch->lock, irq_flags);
307 list_for_each_entry(req, &ch->list, node) {
308 if (req == _req) {
309 spin_unlock_irqrestore(&ch->lock, irq_flags);
310 return true;
311 }
312 }
313 spin_unlock_irqrestore(&ch->lock, irq_flags);
314 return false;
315}
316EXPORT_SYMBOL(tegra_dma_is_req_inflight);
317
318int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
319 struct tegra_dma_req *req)
320{
321 unsigned long irq_flags;
322 int start_dma = 0;
323
324 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
325 req->source_addr & 0x3 || req->dest_addr & 0x3) {
326 pr_err("Invalid DMA request for channel %d\n", ch->id);
327 return -EINVAL;
328 }
329
330 spin_lock_irqsave(&ch->lock, irq_flags);
331
332 req->bytes_transferred = 0;
333 req->status = 0;
334 req->buffer_status = 0;
335 if (list_empty(&ch->list))
336 start_dma = 1;
337
338 list_add_tail(&req->node, &ch->list);
339
340 if (start_dma)
341 tegra_dma_update_hw(ch, req);
342
343 spin_unlock_irqrestore(&ch->lock, irq_flags);
344
345 return 0;
346}
347EXPORT_SYMBOL(tegra_dma_enqueue_req);
348
349struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
350{
351 int channel;
352 struct tegra_dma_channel *ch;
353
354 /* first channel is the shared channel */
355 if (mode & TEGRA_DMA_SHARED) {
356 channel = TEGRA_SYSTEM_DMA_CH_MIN;
357 } else {
358 channel = find_first_zero_bit(channel_usage,
359 ARRAY_SIZE(dma_channels));
360 if (channel >= ARRAY_SIZE(dma_channels))
361 return NULL;
362 }
363 __set_bit(channel, channel_usage);
364 ch = &dma_channels[channel];
365 ch->mode = mode;
366 return ch;
367}
368EXPORT_SYMBOL(tegra_dma_allocate_channel);
369
370void tegra_dma_free_channel(struct tegra_dma_channel *ch)
371{
372 if (ch->mode & TEGRA_DMA_SHARED)
373 return;
374 tegra_dma_cancel(ch);
375 __clear_bit(ch->id, channel_usage);
376}
377EXPORT_SYMBOL(tegra_dma_free_channel);
378
379static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
380 struct tegra_dma_req *req)
381{
382 if (req->to_memory) {
383 ch->apb_ptr = req->source_addr;
384 ch->ahb_ptr = req->dest_addr;
385 } else {
386 ch->apb_ptr = req->dest_addr;
387 ch->ahb_ptr = req->source_addr;
388 }
389 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
390 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
391
392 req->status = TEGRA_DMA_REQ_INFLIGHT;
393 return;
394}
395
396static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
397 struct tegra_dma_req *req)
398{
399 int ahb_addr_wrap;
400 int apb_addr_wrap;
401 int ahb_bus_width;
402 int apb_bus_width;
403 int index;
404 unsigned long csr;
405
406
407 ch->csr |= CSR_FLOW;
408 ch->csr &= ~CSR_REQ_SEL_MASK;
409 ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
410 ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
411 ch->ahb_seq |= AHB_SEQ_BURST_1;
412
413 /* One shot mode is always single buffered,
414 * continuous mode is always double buffered
415 * */
416 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
417 ch->csr |= CSR_ONCE;
418 ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
419 ch->csr &= ~CSR_WCOUNT_MASK;
420 ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
421 } else {
422 ch->csr &= ~CSR_ONCE;
423 ch->ahb_seq |= AHB_SEQ_DBL_BUF;
424
425 /* In double buffered mode, we set the size to half the
426 * requested size and interrupt when half the buffer
427 * is full */
428 ch->csr &= ~CSR_WCOUNT_MASK;
429 ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
430 }
431
432 if (req->to_memory) {
433 ch->csr &= ~CSR_DIR;
434 ch->apb_ptr = req->source_addr;
435 ch->ahb_ptr = req->dest_addr;
436
437 apb_addr_wrap = req->source_wrap;
438 ahb_addr_wrap = req->dest_wrap;
439 apb_bus_width = req->source_bus_width;
440 ahb_bus_width = req->dest_bus_width;
441
442 } else {
443 ch->csr |= CSR_DIR;
444 ch->apb_ptr = req->dest_addr;
445 ch->ahb_ptr = req->source_addr;
446
447 apb_addr_wrap = req->dest_wrap;
448 ahb_addr_wrap = req->source_wrap;
449 apb_bus_width = req->dest_bus_width;
450 ahb_bus_width = req->source_bus_width;
451 }
452
453 apb_addr_wrap >>= 2;
454 ahb_addr_wrap >>= 2;
455
456 /* set address wrap for APB size */
457 index = 0;
458 do {
459 if (apb_addr_wrap_table[index] == apb_addr_wrap)
460 break;
461 index++;
462 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
463 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
464 ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
465 ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
466
467 /* set address wrap for AHB size */
468 index = 0;
469 do {
470 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
471 break;
472 index++;
473 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
474 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
475 ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
476 ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
477
478 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
479 if (bus_width_table[index] == ahb_bus_width)
480 break;
481 }
482 BUG_ON(index == ARRAY_SIZE(bus_width_table));
483 ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
484 ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
485
486 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
487 if (bus_width_table[index] == apb_bus_width)
488 break;
489 }
490 BUG_ON(index == ARRAY_SIZE(bus_width_table));
491 ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
492 ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
493
494 ch->csr |= CSR_IE_EOC;
495
496 /* update hw registers with the shadow */
497 writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
498 writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
499 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
500 writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
501 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
502
503 csr = ch->csr | CSR_ENB;
504 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
505
506 req->status = TEGRA_DMA_REQ_INFLIGHT;
507}
508
509static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
510{
511 /* One shot with an interrupt to CPU after transfer */
512 ch->csr = CSR_ONCE | CSR_IE_EOC;
513 ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
514 ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
515}
516
517static void handle_oneshot_dma(struct tegra_dma_channel *ch)
518{
519 struct tegra_dma_req *req;
520
521 spin_lock(&ch->lock);
522 if (list_empty(&ch->list)) {
523 spin_unlock(&ch->lock);
524 return;
525 }
526
527 req = list_entry(ch->list.next, typeof(*req), node);
528 if (req) {
529 int bytes_transferred;
530
531 bytes_transferred =
532 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
533 bytes_transferred += 1;
534 bytes_transferred <<= 2;
535
536 list_del(&req->node);
537 req->bytes_transferred = bytes_transferred;
538 req->status = TEGRA_DMA_REQ_SUCCESS;
539
540 spin_unlock(&ch->lock);
541 /* Callback should be called without any lock */
542 pr_debug("%s: transferred %d bytes\n", __func__,
543 req->bytes_transferred);
544 req->complete(req);
545 spin_lock(&ch->lock);
546 }
547
548 if (!list_empty(&ch->list)) {
549 req = list_entry(ch->list.next, typeof(*req), node);
550 /* the complete function we just called may have enqueued
551 another req, in which case dma has already started */
552 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
553 tegra_dma_update_hw(ch, req);
554 }
555 spin_unlock(&ch->lock);
556}
557
558static void handle_continuous_dma(struct tegra_dma_channel *ch)
559{
560 struct tegra_dma_req *req;
561
562 spin_lock(&ch->lock);
563 if (list_empty(&ch->list)) {
564 spin_unlock(&ch->lock);
565 return;
566 }
567
568 req = list_entry(ch->list.next, typeof(*req), node);
569 if (req) {
570 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
571 /* Load the next request into the hardware, if available
572 * */
573 if (!list_is_last(&req->node, &ch->list)) {
574 struct tegra_dma_req *next_req;
575
576 next_req = list_entry(req->node.next,
577 typeof(*next_req), node);
578 tegra_dma_update_hw_partial(ch, next_req);
579 }
580 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
581 req->status = TEGRA_DMA_REQ_SUCCESS;
582 /* DMA lock is NOT held when callback is called */
583 spin_unlock(&ch->lock);
584 if (likely(req->threshold))
585 req->threshold(req);
586 return;
587
588 } else if (req->buffer_status ==
589 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
590 /* Callback when the buffer is completely full (i.e on
591 * the second interrupt */
592 int bytes_transferred;
593
594 bytes_transferred =
595 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
596 bytes_transferred += 1;
597 bytes_transferred <<= 3;
598
599 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
600 req->bytes_transferred = bytes_transferred;
601 req->status = TEGRA_DMA_REQ_SUCCESS;
602 list_del(&req->node);
603
604 /* DMA lock is NOT held when callbak is called */
605 spin_unlock(&ch->lock);
606 req->complete(req);
607 return;
608
609 } else {
610 BUG();
611 }
612 }
613 spin_unlock(&ch->lock);
614}
615
616static irqreturn_t dma_isr(int irq, void *data)
617{
618 struct tegra_dma_channel *ch = data;
619 unsigned long status;
620
621 status = readl(ch->addr + APB_DMA_CHAN_STA);
622 if (status & STA_ISE_EOC)
623 writel(status, ch->addr + APB_DMA_CHAN_STA);
624 else {
625 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
626 return IRQ_HANDLED;
627 }
628 return IRQ_WAKE_THREAD;
629}
630
631static irqreturn_t dma_thread_fn(int irq, void *data)
632{
633 struct tegra_dma_channel *ch = data;
634
635 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
636 handle_oneshot_dma(ch);
637 else
638 handle_continuous_dma(ch);
639
640
641 return IRQ_HANDLED;
642}
643
644int __init tegra_dma_init(void)
645{
646 int ret = 0;
647 int i;
648 unsigned int irq;
649 void __iomem *addr;
650
651 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
652 writel(GEN_ENABLE, addr + APB_DMA_GEN);
653 writel(0, addr + APB_DMA_CNTRL);
654 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
655 addr + APB_DMA_IRQ_MASK_SET);
656
657 memset(channel_usage, 0, sizeof(channel_usage));
658 memset(dma_channels, 0, sizeof(dma_channels));
659
660 /* Reserve all the channels we are not supposed to touch */
661 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
662 __set_bit(i, channel_usage);
663
664 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
665 struct tegra_dma_channel *ch = &dma_channels[i];
666
667 __clear_bit(i, channel_usage);
668
669 ch->id = i;
670 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
671
672 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
673 TEGRA_APB_DMA_CH0_SIZE * i);
674
675 spin_lock_init(&ch->lock);
676 INIT_LIST_HEAD(&ch->list);
677 tegra_dma_init_hw(ch);
678
679 irq = INT_APB_DMA_CH0 + i;
680 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
681 dma_channels[i].name, ch);
682 if (ret) {
683 pr_err("Failed to register IRQ %d for DMA %d\n",
684 irq, i);
685 goto fail;
686 }
687 ch->irq = irq;
688 }
689 /* mark the shared channel allocated */
690 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
691
692 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
693 __set_bit(i, channel_usage);
694
695 return ret;
696fail:
697 writel(0, addr + APB_DMA_GEN);
698 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
699 struct tegra_dma_channel *ch = &dma_channels[i];
700 if (ch->irq)
701 free_irq(ch->irq, ch);
702 }
703 return ret;
704}
705
706#ifdef CONFIG_PM
707static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
708
709void tegra_dma_suspend(void)
710{
711 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
712 u32 *ctx = apb_dma;
713 int i;
714
715 *ctx++ = readl(addr + APB_DMA_GEN);
716 *ctx++ = readl(addr + APB_DMA_CNTRL);
717 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
718
719 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
720 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
721 TEGRA_APB_DMA_CH0_SIZE * i);
722
723 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
724 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
725 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
726 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
727 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
728 }
729}
730
731void tegra_dma_resume(void)
732{
733 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
734 u32 *ctx = apb_dma;
735 int i;
736
737 writel(*ctx++, addr + APB_DMA_GEN);
738 writel(*ctx++, addr + APB_DMA_CNTRL);
739 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
740
741 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
742 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
743 TEGRA_APB_DMA_CH0_SIZE * i);
744
745 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
746 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
747 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
748 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
749 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
750 }
751}
752
753#endif