blob: 122e46770d9354be19b8a79ff90341b0df5ad77c [file] [log] [blame]
Colin Cross4de3a8f2010-04-05 13:16:42 -07001/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
Stephen Warren1ca00342011-01-05 14:32:20 -070030#include <linux/clk.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070031#include <mach/dma.h>
32#include <mach/irqs.h>
33#include <mach/iomap.h>
Colin Cross2ea67fd2010-10-04 08:49:49 -070034#include <mach/suspend.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070035
Olof Johanssone2f91572011-10-12 23:52:29 -070036#include "apbio.h"
37
Colin Cross4de3a8f2010-04-05 13:16:42 -070038#define APB_DMA_GEN 0x000
39#define GEN_ENABLE (1<<31)
40
41#define APB_DMA_CNTRL 0x010
42
43#define APB_DMA_IRQ_MASK 0x01c
44
45#define APB_DMA_IRQ_MASK_SET 0x020
46
47#define APB_DMA_CHAN_CSR 0x000
48#define CSR_ENB (1<<31)
49#define CSR_IE_EOC (1<<30)
50#define CSR_HOLD (1<<29)
51#define CSR_DIR (1<<28)
52#define CSR_ONCE (1<<27)
53#define CSR_FLOW (1<<21)
54#define CSR_REQ_SEL_SHIFT 16
55#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
56#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
57#define CSR_WCOUNT_SHIFT 2
58#define CSR_WCOUNT_MASK 0xFFFC
59
60#define APB_DMA_CHAN_STA 0x004
61#define STA_BUSY (1<<31)
62#define STA_ISE_EOC (1<<30)
63#define STA_HALT (1<<29)
64#define STA_PING_PONG (1<<28)
65#define STA_COUNT_SHIFT 2
66#define STA_COUNT_MASK 0xFFFC
67
68#define APB_DMA_CHAN_AHB_PTR 0x010
69
70#define APB_DMA_CHAN_AHB_SEQ 0x014
71#define AHB_SEQ_INTR_ENB (1<<31)
72#define AHB_SEQ_BUS_WIDTH_SHIFT 28
73#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
76#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
77#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
78#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
79#define AHB_SEQ_DATA_SWAP (1<<27)
80#define AHB_SEQ_BURST_MASK (0x7<<24)
81#define AHB_SEQ_BURST_1 (4<<24)
82#define AHB_SEQ_BURST_4 (5<<24)
83#define AHB_SEQ_BURST_8 (6<<24)
84#define AHB_SEQ_DBL_BUF (1<<19)
85#define AHB_SEQ_WRAP_SHIFT 16
86#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
87
88#define APB_DMA_CHAN_APB_PTR 0x018
89
90#define APB_DMA_CHAN_APB_SEQ 0x01c
91#define APB_SEQ_BUS_WIDTH_SHIFT 28
92#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
95#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
96#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
97#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
98#define APB_SEQ_DATA_SWAP (1<<27)
99#define APB_SEQ_WRAP_SHIFT 16
100#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
101
102#define TEGRA_SYSTEM_DMA_CH_NR 16
103#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
104#define TEGRA_SYSTEM_DMA_CH_MIN 0
105#define TEGRA_SYSTEM_DMA_CH_MAX \
106 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
107
108#define NV_DMA_MAX_TRASFER_SIZE 0x10000
109
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700110static const unsigned int ahb_addr_wrap_table[8] = {
Colin Cross4de3a8f2010-04-05 13:16:42 -0700111 0, 32, 64, 128, 256, 512, 1024, 2048
112};
113
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700114static const unsigned int apb_addr_wrap_table[8] = {
115 0, 1, 2, 4, 8, 16, 32, 64
116};
Colin Cross4de3a8f2010-04-05 13:16:42 -0700117
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700118static const unsigned int bus_width_table[5] = {
119 8, 16, 32, 64, 128
120};
Colin Cross4de3a8f2010-04-05 13:16:42 -0700121
122#define TEGRA_DMA_NAME_SIZE 16
123struct tegra_dma_channel {
124 struct list_head list;
125 int id;
126 spinlock_t lock;
127 char name[TEGRA_DMA_NAME_SIZE];
128 void __iomem *addr;
129 int mode;
130 int irq;
Colin Cross5789fee2010-08-18 00:19:12 -0700131 int req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700132};
133
134#define NV_DMA_MAX_CHANNELS 32
135
Stephen Warrenccac0512011-02-23 14:49:30 -0700136static bool tegra_dma_initialized;
Colin Cross5789fee2010-08-18 00:19:12 -0700137static DEFINE_MUTEX(tegra_dma_lock);
138
Colin Cross4de3a8f2010-04-05 13:16:42 -0700139static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
140static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
141
142static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
143 struct tegra_dma_req *req);
144static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
145 struct tegra_dma_req *req);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700146static void tegra_dma_stop(struct tegra_dma_channel *ch);
147
148void tegra_dma_flush(struct tegra_dma_channel *ch)
149{
150}
151EXPORT_SYMBOL(tegra_dma_flush);
152
153void tegra_dma_dequeue(struct tegra_dma_channel *ch)
154{
155 struct tegra_dma_req *req;
156
Colin Cross5789fee2010-08-18 00:19:12 -0700157 if (tegra_dma_is_empty(ch))
158 return;
159
Colin Cross4de3a8f2010-04-05 13:16:42 -0700160 req = list_entry(ch->list.next, typeof(*req), node);
161
162 tegra_dma_dequeue_req(ch, req);
163 return;
164}
165
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700166static void tegra_dma_stop(struct tegra_dma_channel *ch)
Colin Cross4de3a8f2010-04-05 13:16:42 -0700167{
Colin Cross5789fee2010-08-18 00:19:12 -0700168 u32 csr;
169 u32 status;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700170
Colin Cross5789fee2010-08-18 00:19:12 -0700171 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700172 csr &= ~CSR_IE_EOC;
173 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
174
175 csr &= ~CSR_ENB;
176 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
177
178 status = readl(ch->addr + APB_DMA_CHAN_STA);
179 if (status & STA_ISE_EOC)
180 writel(status, ch->addr + APB_DMA_CHAN_STA);
181}
182
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700183static int tegra_dma_cancel(struct tegra_dma_channel *ch)
Colin Cross4de3a8f2010-04-05 13:16:42 -0700184{
Colin Cross5789fee2010-08-18 00:19:12 -0700185 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700186 unsigned long irq_flags;
187
188 spin_lock_irqsave(&ch->lock, irq_flags);
189 while (!list_empty(&ch->list))
190 list_del(ch->list.next);
191
Colin Cross5789fee2010-08-18 00:19:12 -0700192 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700193 csr &= ~CSR_REQ_SEL_MASK;
194 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700195 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
196
197 tegra_dma_stop(ch);
198
199 spin_unlock_irqrestore(&ch->lock, irq_flags);
200 return 0;
201}
202
203int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
204 struct tegra_dma_req *_req)
205{
206 unsigned int csr;
207 unsigned int status;
208 struct tegra_dma_req *req = NULL;
209 int found = 0;
210 unsigned long irq_flags;
211 int to_transfer;
212 int req_transfer_count;
213
214 spin_lock_irqsave(&ch->lock, irq_flags);
215 list_for_each_entry(req, &ch->list, node) {
216 if (req == _req) {
217 list_del(&req->node);
218 found = 1;
219 break;
220 }
221 }
222 if (!found) {
223 spin_unlock_irqrestore(&ch->lock, irq_flags);
224 return 0;
225 }
226
227 /* STOP the DMA and get the transfer count.
228 * Getting the transfer count is tricky.
229 * - Change the source selector to invalid to stop the DMA from
230 * FIFO to memory.
231 * - Read the status register to know the number of pending
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300232 * bytes to be transferred.
Colin Cross4de3a8f2010-04-05 13:16:42 -0700233 * - Finally stop or program the DMA to the next buffer in the
234 * list.
235 */
Colin Cross5789fee2010-08-18 00:19:12 -0700236 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700237 csr &= ~CSR_REQ_SEL_MASK;
238 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700239 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
240
241 /* Get the transfer count */
242 status = readl(ch->addr + APB_DMA_CHAN_STA);
243 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
Colin Cross5789fee2010-08-18 00:19:12 -0700244 req_transfer_count = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700245 req_transfer_count += 1;
246 to_transfer += 1;
247
248 req->bytes_transferred = req_transfer_count;
249
250 if (status & STA_BUSY)
251 req->bytes_transferred -= to_transfer;
252
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300253 /* In continuous transfer mode, DMA only tracks the count of the
Colin Cross4de3a8f2010-04-05 13:16:42 -0700254 * half DMA buffer. So, if the DMA already finished half the DMA
255 * then add the half buffer to the completed count.
256 *
257 * FIXME: There can be a race here. What if the req to
258 * dequue happens at the same time as the DMA just moved to
259 * the new buffer and SW didn't yet received the interrupt?
260 */
261 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
262 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
263 req->bytes_transferred += req_transfer_count;
264
265 req->bytes_transferred *= 4;
266
267 tegra_dma_stop(ch);
268 if (!list_empty(&ch->list)) {
269 /* if the list is not empty, queue the next request */
270 struct tegra_dma_req *next_req;
271 next_req = list_entry(ch->list.next,
272 typeof(*next_req), node);
273 tegra_dma_update_hw(ch, next_req);
274 }
275 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
276
277 spin_unlock_irqrestore(&ch->lock, irq_flags);
278
279 /* Callback should be called without any lock */
280 req->complete(req);
281 return 0;
282}
283EXPORT_SYMBOL(tegra_dma_dequeue_req);
284
285bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
286{
287 unsigned long irq_flags;
288 bool is_empty;
289
290 spin_lock_irqsave(&ch->lock, irq_flags);
291 if (list_empty(&ch->list))
292 is_empty = true;
293 else
294 is_empty = false;
295 spin_unlock_irqrestore(&ch->lock, irq_flags);
296 return is_empty;
297}
298EXPORT_SYMBOL(tegra_dma_is_empty);
299
300bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
301 struct tegra_dma_req *_req)
302{
303 unsigned long irq_flags;
304 struct tegra_dma_req *req;
305
306 spin_lock_irqsave(&ch->lock, irq_flags);
307 list_for_each_entry(req, &ch->list, node) {
308 if (req == _req) {
309 spin_unlock_irqrestore(&ch->lock, irq_flags);
310 return true;
311 }
312 }
313 spin_unlock_irqrestore(&ch->lock, irq_flags);
314 return false;
315}
316EXPORT_SYMBOL(tegra_dma_is_req_inflight);
317
318int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
319 struct tegra_dma_req *req)
320{
321 unsigned long irq_flags;
Stephen Warren499ef7a2011-01-05 14:24:12 -0700322 struct tegra_dma_req *_req;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700323 int start_dma = 0;
324
325 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
326 req->source_addr & 0x3 || req->dest_addr & 0x3) {
327 pr_err("Invalid DMA request for channel %d\n", ch->id);
328 return -EINVAL;
329 }
330
331 spin_lock_irqsave(&ch->lock, irq_flags);
332
Stephen Warren499ef7a2011-01-05 14:24:12 -0700333 list_for_each_entry(_req, &ch->list, node) {
334 if (req == _req) {
335 spin_unlock_irqrestore(&ch->lock, irq_flags);
336 return -EEXIST;
337 }
338 }
339
Colin Cross4de3a8f2010-04-05 13:16:42 -0700340 req->bytes_transferred = 0;
341 req->status = 0;
342 req->buffer_status = 0;
343 if (list_empty(&ch->list))
344 start_dma = 1;
345
346 list_add_tail(&req->node, &ch->list);
347
348 if (start_dma)
349 tegra_dma_update_hw(ch, req);
350
351 spin_unlock_irqrestore(&ch->lock, irq_flags);
352
353 return 0;
354}
355EXPORT_SYMBOL(tegra_dma_enqueue_req);
356
357struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
358{
359 int channel;
Colin Cross5789fee2010-08-18 00:19:12 -0700360 struct tegra_dma_channel *ch = NULL;
361
Olof Johansson13ae3d52011-12-22 14:17:40 +0000362 if (!tegra_dma_initialized)
Stephen Warrenccac0512011-02-23 14:49:30 -0700363 return NULL;
364
Colin Cross5789fee2010-08-18 00:19:12 -0700365 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700366
367 /* first channel is the shared channel */
368 if (mode & TEGRA_DMA_SHARED) {
369 channel = TEGRA_SYSTEM_DMA_CH_MIN;
370 } else {
371 channel = find_first_zero_bit(channel_usage,
372 ARRAY_SIZE(dma_channels));
373 if (channel >= ARRAY_SIZE(dma_channels))
Colin Cross5789fee2010-08-18 00:19:12 -0700374 goto out;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700375 }
376 __set_bit(channel, channel_usage);
377 ch = &dma_channels[channel];
378 ch->mode = mode;
Colin Cross5789fee2010-08-18 00:19:12 -0700379
380out:
381 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700382 return ch;
383}
384EXPORT_SYMBOL(tegra_dma_allocate_channel);
385
386void tegra_dma_free_channel(struct tegra_dma_channel *ch)
387{
388 if (ch->mode & TEGRA_DMA_SHARED)
389 return;
390 tegra_dma_cancel(ch);
Colin Cross5789fee2010-08-18 00:19:12 -0700391 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700392 __clear_bit(ch->id, channel_usage);
Colin Cross5789fee2010-08-18 00:19:12 -0700393 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700394}
395EXPORT_SYMBOL(tegra_dma_free_channel);
396
397static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
398 struct tegra_dma_req *req)
399{
Colin Cross5789fee2010-08-18 00:19:12 -0700400 u32 apb_ptr;
401 u32 ahb_ptr;
402
Colin Cross4de3a8f2010-04-05 13:16:42 -0700403 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700404 apb_ptr = req->source_addr;
405 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700406 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700407 apb_ptr = req->dest_addr;
408 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700409 }
Colin Cross5789fee2010-08-18 00:19:12 -0700410 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
411 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700412
413 req->status = TEGRA_DMA_REQ_INFLIGHT;
414 return;
415}
416
417static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
418 struct tegra_dma_req *req)
419{
420 int ahb_addr_wrap;
421 int apb_addr_wrap;
422 int ahb_bus_width;
423 int apb_bus_width;
424 int index;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700425
Colin Cross5789fee2010-08-18 00:19:12 -0700426 u32 ahb_seq;
427 u32 apb_seq;
428 u32 ahb_ptr;
429 u32 apb_ptr;
430 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700431
Colin Cross5789fee2010-08-18 00:19:12 -0700432 csr = CSR_IE_EOC | CSR_FLOW;
433 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
434 apb_seq = 0;
435
436 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700437
438 /* One shot mode is always single buffered,
439 * continuous mode is always double buffered
440 * */
441 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
Colin Cross5789fee2010-08-18 00:19:12 -0700442 csr |= CSR_ONCE;
443 ch->req_transfer_count = (req->size >> 2) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700444 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700445 ahb_seq |= AHB_SEQ_DBL_BUF;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700446
447 /* In double buffered mode, we set the size to half the
448 * requested size and interrupt when half the buffer
449 * is full */
Colin Cross5789fee2010-08-18 00:19:12 -0700450 ch->req_transfer_count = (req->size >> 3) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700451 }
452
Colin Cross5789fee2010-08-18 00:19:12 -0700453 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
454
Colin Cross4de3a8f2010-04-05 13:16:42 -0700455 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700456 apb_ptr = req->source_addr;
457 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700458
459 apb_addr_wrap = req->source_wrap;
460 ahb_addr_wrap = req->dest_wrap;
461 apb_bus_width = req->source_bus_width;
462 ahb_bus_width = req->dest_bus_width;
463
464 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700465 csr |= CSR_DIR;
466 apb_ptr = req->dest_addr;
467 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700468
469 apb_addr_wrap = req->dest_wrap;
470 ahb_addr_wrap = req->source_wrap;
471 apb_bus_width = req->dest_bus_width;
472 ahb_bus_width = req->source_bus_width;
473 }
474
475 apb_addr_wrap >>= 2;
476 ahb_addr_wrap >>= 2;
477
478 /* set address wrap for APB size */
479 index = 0;
480 do {
481 if (apb_addr_wrap_table[index] == apb_addr_wrap)
482 break;
483 index++;
484 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
485 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700486 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700487
488 /* set address wrap for AHB size */
489 index = 0;
490 do {
491 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
492 break;
493 index++;
494 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
495 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700496 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700497
498 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
499 if (bus_width_table[index] == ahb_bus_width)
500 break;
501 }
502 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700503 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700504
505 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
506 if (bus_width_table[index] == apb_bus_width)
507 break;
508 }
509 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700510 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700511
Colin Cross5789fee2010-08-18 00:19:12 -0700512 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
513 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
514 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
515 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
516 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700517
Colin Cross5789fee2010-08-18 00:19:12 -0700518 csr |= CSR_ENB;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700519 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
520
521 req->status = TEGRA_DMA_REQ_INFLIGHT;
522}
523
Colin Cross4de3a8f2010-04-05 13:16:42 -0700524static void handle_oneshot_dma(struct tegra_dma_channel *ch)
525{
526 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700527 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700528
Colin Cross5789fee2010-08-18 00:19:12 -0700529 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700530 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700531 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700532 return;
533 }
534
535 req = list_entry(ch->list.next, typeof(*req), node);
536 if (req) {
537 int bytes_transferred;
538
Colin Cross5789fee2010-08-18 00:19:12 -0700539 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700540 bytes_transferred += 1;
541 bytes_transferred <<= 2;
542
543 list_del(&req->node);
544 req->bytes_transferred = bytes_transferred;
545 req->status = TEGRA_DMA_REQ_SUCCESS;
546
Colin Cross5789fee2010-08-18 00:19:12 -0700547 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700548 /* Callback should be called without any lock */
549 pr_debug("%s: transferred %d bytes\n", __func__,
550 req->bytes_transferred);
551 req->complete(req);
Colin Cross5789fee2010-08-18 00:19:12 -0700552 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700553 }
554
555 if (!list_empty(&ch->list)) {
556 req = list_entry(ch->list.next, typeof(*req), node);
557 /* the complete function we just called may have enqueued
558 another req, in which case dma has already started */
559 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
560 tegra_dma_update_hw(ch, req);
561 }
Colin Cross5789fee2010-08-18 00:19:12 -0700562 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700563}
564
565static void handle_continuous_dma(struct tegra_dma_channel *ch)
566{
567 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700568 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700569
Colin Cross5789fee2010-08-18 00:19:12 -0700570 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700571 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700572 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700573 return;
574 }
575
576 req = list_entry(ch->list.next, typeof(*req), node);
577 if (req) {
578 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
Colin Cross5789fee2010-08-18 00:19:12 -0700579 bool is_dma_ping_complete;
580 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
581 & STA_PING_PONG) ? true : false;
582 if (req->to_memory)
583 is_dma_ping_complete = !is_dma_ping_complete;
584 /* Out of sync - Release current buffer */
585 if (!is_dma_ping_complete) {
586 int bytes_transferred;
587
588 bytes_transferred = ch->req_transfer_count;
589 bytes_transferred += 1;
590 bytes_transferred <<= 3;
591 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
592 req->bytes_transferred = bytes_transferred;
593 req->status = TEGRA_DMA_REQ_SUCCESS;
594 tegra_dma_stop(ch);
595
596 if (!list_is_last(&req->node, &ch->list)) {
597 struct tegra_dma_req *next_req;
598
599 next_req = list_entry(req->node.next,
600 typeof(*next_req), node);
601 tegra_dma_update_hw(ch, next_req);
602 }
603
604 list_del(&req->node);
605
606 /* DMA lock is NOT held when callbak is called */
607 spin_unlock_irqrestore(&ch->lock, irq_flags);
608 req->complete(req);
609 return;
610 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700611 /* Load the next request into the hardware, if available
612 * */
613 if (!list_is_last(&req->node, &ch->list)) {
614 struct tegra_dma_req *next_req;
615
616 next_req = list_entry(req->node.next,
617 typeof(*next_req), node);
618 tegra_dma_update_hw_partial(ch, next_req);
619 }
620 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
621 req->status = TEGRA_DMA_REQ_SUCCESS;
622 /* DMA lock is NOT held when callback is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700623 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700624 if (likely(req->threshold))
625 req->threshold(req);
626 return;
627
628 } else if (req->buffer_status ==
629 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
630 /* Callback when the buffer is completely full (i.e on
631 * the second interrupt */
632 int bytes_transferred;
633
Colin Cross5789fee2010-08-18 00:19:12 -0700634 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700635 bytes_transferred += 1;
636 bytes_transferred <<= 3;
637
638 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
639 req->bytes_transferred = bytes_transferred;
640 req->status = TEGRA_DMA_REQ_SUCCESS;
641 list_del(&req->node);
642
643 /* DMA lock is NOT held when callbak is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700644 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700645 req->complete(req);
646 return;
647
648 } else {
649 BUG();
650 }
651 }
Colin Cross5789fee2010-08-18 00:19:12 -0700652 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700653}
654
655static irqreturn_t dma_isr(int irq, void *data)
656{
657 struct tegra_dma_channel *ch = data;
658 unsigned long status;
659
660 status = readl(ch->addr + APB_DMA_CHAN_STA);
661 if (status & STA_ISE_EOC)
662 writel(status, ch->addr + APB_DMA_CHAN_STA);
663 else {
664 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
665 return IRQ_HANDLED;
666 }
667 return IRQ_WAKE_THREAD;
668}
669
670static irqreturn_t dma_thread_fn(int irq, void *data)
671{
672 struct tegra_dma_channel *ch = data;
673
674 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
675 handle_oneshot_dma(ch);
676 else
677 handle_continuous_dma(ch);
678
679
680 return IRQ_HANDLED;
681}
682
683int __init tegra_dma_init(void)
684{
685 int ret = 0;
686 int i;
687 unsigned int irq;
688 void __iomem *addr;
Stephen Warren1ca00342011-01-05 14:32:20 -0700689 struct clk *c;
690
Stephen Warrenccac0512011-02-23 14:49:30 -0700691 bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
692
Stephen Warren1ca00342011-01-05 14:32:20 -0700693 c = clk_get_sys("tegra-dma", NULL);
694 if (IS_ERR(c)) {
695 pr_err("Unable to get clock for APB DMA\n");
696 ret = PTR_ERR(c);
697 goto fail;
698 }
699 ret = clk_enable(c);
700 if (ret != 0) {
701 pr_err("Unable to enable clock for APB DMA\n");
702 goto fail;
703 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700704
705 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
706 writel(GEN_ENABLE, addr + APB_DMA_GEN);
707 writel(0, addr + APB_DMA_CNTRL);
708 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
709 addr + APB_DMA_IRQ_MASK_SET);
710
Colin Cross4de3a8f2010-04-05 13:16:42 -0700711 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
712 struct tegra_dma_channel *ch = &dma_channels[i];
713
Colin Cross4de3a8f2010-04-05 13:16:42 -0700714 ch->id = i;
715 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
716
717 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
718 TEGRA_APB_DMA_CH0_SIZE * i);
719
720 spin_lock_init(&ch->lock);
721 INIT_LIST_HEAD(&ch->list);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700722
723 irq = INT_APB_DMA_CH0 + i;
724 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
725 dma_channels[i].name, ch);
726 if (ret) {
727 pr_err("Failed to register IRQ %d for DMA %d\n",
728 irq, i);
729 goto fail;
730 }
731 ch->irq = irq;
Stephen Warrenccac0512011-02-23 14:49:30 -0700732
733 __clear_bit(i, channel_usage);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700734 }
735 /* mark the shared channel allocated */
736 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
737
Stephen Warrenccac0512011-02-23 14:49:30 -0700738 tegra_dma_initialized = true;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700739
Stephen Warrenccac0512011-02-23 14:49:30 -0700740 return 0;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700741fail:
742 writel(0, addr + APB_DMA_GEN);
743 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
744 struct tegra_dma_channel *ch = &dma_channels[i];
745 if (ch->irq)
746 free_irq(ch->irq, ch);
747 }
748 return ret;
749}
Stephen Warrendc54c232011-02-23 10:41:29 -0700750postcore_initcall(tegra_dma_init);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700751
752#ifdef CONFIG_PM
753static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
754
755void tegra_dma_suspend(void)
756{
757 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
758 u32 *ctx = apb_dma;
759 int i;
760
761 *ctx++ = readl(addr + APB_DMA_GEN);
762 *ctx++ = readl(addr + APB_DMA_CNTRL);
763 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
764
765 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
766 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
767 TEGRA_APB_DMA_CH0_SIZE * i);
768
769 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
770 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
771 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
772 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
773 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
774 }
775}
776
777void tegra_dma_resume(void)
778{
779 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
780 u32 *ctx = apb_dma;
781 int i;
782
783 writel(*ctx++, addr + APB_DMA_GEN);
784 writel(*ctx++, addr + APB_DMA_CNTRL);
785 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
786
787 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
788 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
789 TEGRA_APB_DMA_CH0_SIZE * i);
790
791 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
792 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
793 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
794 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
795 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
796 }
797}
798
799#endif