blob: 998c55ddca421844da3cb64829155a330fb8974c [file] [log] [blame]
Colin Cross4de3a8f2010-04-05 13:16:42 -07001/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
Stephen Warren1ca00342011-01-05 14:32:20 -070030#include <linux/clk.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070031#include <mach/dma.h>
32#include <mach/irqs.h>
33#include <mach/iomap.h>
Colin Cross2ea67fd2010-10-04 08:49:49 -070034#include <mach/suspend.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070035
Olof Johanssone2f91572011-10-12 23:52:29 -070036#include "apbio.h"
37
Colin Cross4de3a8f2010-04-05 13:16:42 -070038#define APB_DMA_GEN 0x000
39#define GEN_ENABLE (1<<31)
40
41#define APB_DMA_CNTRL 0x010
42
43#define APB_DMA_IRQ_MASK 0x01c
44
45#define APB_DMA_IRQ_MASK_SET 0x020
46
47#define APB_DMA_CHAN_CSR 0x000
48#define CSR_ENB (1<<31)
49#define CSR_IE_EOC (1<<30)
50#define CSR_HOLD (1<<29)
51#define CSR_DIR (1<<28)
52#define CSR_ONCE (1<<27)
53#define CSR_FLOW (1<<21)
54#define CSR_REQ_SEL_SHIFT 16
55#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
56#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
57#define CSR_WCOUNT_SHIFT 2
58#define CSR_WCOUNT_MASK 0xFFFC
59
60#define APB_DMA_CHAN_STA 0x004
61#define STA_BUSY (1<<31)
62#define STA_ISE_EOC (1<<30)
63#define STA_HALT (1<<29)
64#define STA_PING_PONG (1<<28)
65#define STA_COUNT_SHIFT 2
66#define STA_COUNT_MASK 0xFFFC
67
68#define APB_DMA_CHAN_AHB_PTR 0x010
69
70#define APB_DMA_CHAN_AHB_SEQ 0x014
71#define AHB_SEQ_INTR_ENB (1<<31)
72#define AHB_SEQ_BUS_WIDTH_SHIFT 28
73#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
76#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
77#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
78#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
79#define AHB_SEQ_DATA_SWAP (1<<27)
80#define AHB_SEQ_BURST_MASK (0x7<<24)
81#define AHB_SEQ_BURST_1 (4<<24)
82#define AHB_SEQ_BURST_4 (5<<24)
83#define AHB_SEQ_BURST_8 (6<<24)
84#define AHB_SEQ_DBL_BUF (1<<19)
85#define AHB_SEQ_WRAP_SHIFT 16
86#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
87
88#define APB_DMA_CHAN_APB_PTR 0x018
89
90#define APB_DMA_CHAN_APB_SEQ 0x01c
91#define APB_SEQ_BUS_WIDTH_SHIFT 28
92#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
95#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
96#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
97#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
98#define APB_SEQ_DATA_SWAP (1<<27)
99#define APB_SEQ_WRAP_SHIFT 16
100#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
101
102#define TEGRA_SYSTEM_DMA_CH_NR 16
103#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
104#define TEGRA_SYSTEM_DMA_CH_MIN 0
105#define TEGRA_SYSTEM_DMA_CH_MAX \
106 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
107
108#define NV_DMA_MAX_TRASFER_SIZE 0x10000
109
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700110static const unsigned int ahb_addr_wrap_table[8] = {
Colin Cross4de3a8f2010-04-05 13:16:42 -0700111 0, 32, 64, 128, 256, 512, 1024, 2048
112};
113
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700114static const unsigned int apb_addr_wrap_table[8] = {
115 0, 1, 2, 4, 8, 16, 32, 64
116};
Colin Cross4de3a8f2010-04-05 13:16:42 -0700117
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700118static const unsigned int bus_width_table[5] = {
119 8, 16, 32, 64, 128
120};
Colin Cross4de3a8f2010-04-05 13:16:42 -0700121
122#define TEGRA_DMA_NAME_SIZE 16
123struct tegra_dma_channel {
124 struct list_head list;
125 int id;
126 spinlock_t lock;
127 char name[TEGRA_DMA_NAME_SIZE];
128 void __iomem *addr;
129 int mode;
130 int irq;
Colin Cross5789fee2010-08-18 00:19:12 -0700131 int req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700132};
133
134#define NV_DMA_MAX_CHANNELS 32
135
Stephen Warrenccac0512011-02-23 14:49:30 -0700136static bool tegra_dma_initialized;
Colin Cross5789fee2010-08-18 00:19:12 -0700137static DEFINE_MUTEX(tegra_dma_lock);
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000138static DEFINE_SPINLOCK(enable_lock);
Colin Cross5789fee2010-08-18 00:19:12 -0700139
Colin Cross4de3a8f2010-04-05 13:16:42 -0700140static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
141static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
142
143static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
144 struct tegra_dma_req *req);
145static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
146 struct tegra_dma_req *req);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700147static void tegra_dma_stop(struct tegra_dma_channel *ch);
148
149void tegra_dma_flush(struct tegra_dma_channel *ch)
150{
151}
152EXPORT_SYMBOL(tegra_dma_flush);
153
154void tegra_dma_dequeue(struct tegra_dma_channel *ch)
155{
156 struct tegra_dma_req *req;
157
Colin Cross5789fee2010-08-18 00:19:12 -0700158 if (tegra_dma_is_empty(ch))
159 return;
160
Colin Cross4de3a8f2010-04-05 13:16:42 -0700161 req = list_entry(ch->list.next, typeof(*req), node);
162
163 tegra_dma_dequeue_req(ch, req);
164 return;
165}
166
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700167static void tegra_dma_stop(struct tegra_dma_channel *ch)
Colin Cross4de3a8f2010-04-05 13:16:42 -0700168{
Colin Cross5789fee2010-08-18 00:19:12 -0700169 u32 csr;
170 u32 status;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700171
Colin Cross5789fee2010-08-18 00:19:12 -0700172 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700173 csr &= ~CSR_IE_EOC;
174 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
175
176 csr &= ~CSR_ENB;
177 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
178
179 status = readl(ch->addr + APB_DMA_CHAN_STA);
180 if (status & STA_ISE_EOC)
181 writel(status, ch->addr + APB_DMA_CHAN_STA);
182}
183
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700184static int tegra_dma_cancel(struct tegra_dma_channel *ch)
Colin Cross4de3a8f2010-04-05 13:16:42 -0700185{
Colin Cross5789fee2010-08-18 00:19:12 -0700186 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700187 unsigned long irq_flags;
188
189 spin_lock_irqsave(&ch->lock, irq_flags);
190 while (!list_empty(&ch->list))
191 list_del(ch->list.next);
192
Colin Cross5789fee2010-08-18 00:19:12 -0700193 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700194 csr &= ~CSR_REQ_SEL_MASK;
195 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700196 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
197
198 tegra_dma_stop(ch);
199
200 spin_unlock_irqrestore(&ch->lock, irq_flags);
201 return 0;
202}
203
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000204static unsigned int get_channel_status(struct tegra_dma_channel *ch,
205 struct tegra_dma_req *req, bool is_stop_dma)
206{
207 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
208 unsigned int status;
209
210 if (is_stop_dma) {
211 /*
212 * STOP the DMA and get the transfer count.
213 * Getting the transfer count is tricky.
214 * - Globally disable DMA on all channels
215 * - Read the channel's status register to know the number
216 * of pending bytes to be transfered.
217 * - Stop the dma channel
218 * - Globally re-enable DMA to resume other transfers
219 */
220 spin_lock(&enable_lock);
221 writel(0, addr + APB_DMA_GEN);
222 udelay(20);
223 status = readl(ch->addr + APB_DMA_CHAN_STA);
224 tegra_dma_stop(ch);
225 writel(GEN_ENABLE, addr + APB_DMA_GEN);
226 spin_unlock(&enable_lock);
227 if (status & STA_ISE_EOC) {
228 pr_err("Got Dma Int here clearing");
229 writel(status, ch->addr + APB_DMA_CHAN_STA);
230 }
231 req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
232 } else {
233 status = readl(ch->addr + APB_DMA_CHAN_STA);
234 }
235 return status;
236}
237
238/* should be called with the channel lock held */
239static unsigned int dma_active_count(struct tegra_dma_channel *ch,
240 struct tegra_dma_req *req, unsigned int status)
241{
242 unsigned int to_transfer;
243 unsigned int req_transfer_count;
244 unsigned int bytes_transferred;
245
246 to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
247 req_transfer_count = ch->req_transfer_count + 1;
248 bytes_transferred = req_transfer_count;
249 if (status & STA_BUSY)
250 bytes_transferred -= to_transfer;
251 /*
252 * In continuous transfer mode, DMA only tracks the count of the
253 * half DMA buffer. So, if the DMA already finished half the DMA
254 * then add the half buffer to the completed count.
255 */
256 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) {
257 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
258 bytes_transferred += req_transfer_count;
259 if (status & STA_ISE_EOC)
260 bytes_transferred += req_transfer_count;
261 }
262 bytes_transferred *= 4;
263 return bytes_transferred;
264}
265
Colin Cross4de3a8f2010-04-05 13:16:42 -0700266int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
267 struct tegra_dma_req *_req)
268{
Colin Cross4de3a8f2010-04-05 13:16:42 -0700269 unsigned int status;
270 struct tegra_dma_req *req = NULL;
271 int found = 0;
272 unsigned long irq_flags;
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000273 int stop = 0;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700274
275 spin_lock_irqsave(&ch->lock, irq_flags);
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000276
277 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
278 stop = 1;
279
Colin Cross4de3a8f2010-04-05 13:16:42 -0700280 list_for_each_entry(req, &ch->list, node) {
281 if (req == _req) {
282 list_del(&req->node);
283 found = 1;
284 break;
285 }
286 }
287 if (!found) {
288 spin_unlock_irqrestore(&ch->lock, irq_flags);
289 return 0;
290 }
291
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000292 if (!stop)
293 goto skip_stop_dma;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700294
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000295 status = get_channel_status(ch, req, true);
296 req->bytes_transferred = dma_active_count(ch, req, status);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700297
Colin Cross4de3a8f2010-04-05 13:16:42 -0700298 if (!list_empty(&ch->list)) {
299 /* if the list is not empty, queue the next request */
300 struct tegra_dma_req *next_req;
301 next_req = list_entry(ch->list.next,
302 typeof(*next_req), node);
303 tegra_dma_update_hw(ch, next_req);
304 }
Laxman Dewangancb3732d2012-01-09 20:05:11 +0000305
306skip_stop_dma:
Colin Cross4de3a8f2010-04-05 13:16:42 -0700307 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
308
309 spin_unlock_irqrestore(&ch->lock, irq_flags);
310
311 /* Callback should be called without any lock */
312 req->complete(req);
313 return 0;
314}
315EXPORT_SYMBOL(tegra_dma_dequeue_req);
316
317bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
318{
319 unsigned long irq_flags;
320 bool is_empty;
321
322 spin_lock_irqsave(&ch->lock, irq_flags);
323 if (list_empty(&ch->list))
324 is_empty = true;
325 else
326 is_empty = false;
327 spin_unlock_irqrestore(&ch->lock, irq_flags);
328 return is_empty;
329}
330EXPORT_SYMBOL(tegra_dma_is_empty);
331
332bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
333 struct tegra_dma_req *_req)
334{
335 unsigned long irq_flags;
336 struct tegra_dma_req *req;
337
338 spin_lock_irqsave(&ch->lock, irq_flags);
339 list_for_each_entry(req, &ch->list, node) {
340 if (req == _req) {
341 spin_unlock_irqrestore(&ch->lock, irq_flags);
342 return true;
343 }
344 }
345 spin_unlock_irqrestore(&ch->lock, irq_flags);
346 return false;
347}
348EXPORT_SYMBOL(tegra_dma_is_req_inflight);
349
350int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
351 struct tegra_dma_req *req)
352{
353 unsigned long irq_flags;
Stephen Warren499ef7a2011-01-05 14:24:12 -0700354 struct tegra_dma_req *_req;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700355 int start_dma = 0;
356
357 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
358 req->source_addr & 0x3 || req->dest_addr & 0x3) {
359 pr_err("Invalid DMA request for channel %d\n", ch->id);
360 return -EINVAL;
361 }
362
363 spin_lock_irqsave(&ch->lock, irq_flags);
364
Stephen Warren499ef7a2011-01-05 14:24:12 -0700365 list_for_each_entry(_req, &ch->list, node) {
366 if (req == _req) {
367 spin_unlock_irqrestore(&ch->lock, irq_flags);
368 return -EEXIST;
369 }
370 }
371
Colin Cross4de3a8f2010-04-05 13:16:42 -0700372 req->bytes_transferred = 0;
373 req->status = 0;
374 req->buffer_status = 0;
375 if (list_empty(&ch->list))
376 start_dma = 1;
377
378 list_add_tail(&req->node, &ch->list);
379
380 if (start_dma)
381 tegra_dma_update_hw(ch, req);
382
383 spin_unlock_irqrestore(&ch->lock, irq_flags);
384
385 return 0;
386}
387EXPORT_SYMBOL(tegra_dma_enqueue_req);
388
389struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
390{
391 int channel;
Colin Cross5789fee2010-08-18 00:19:12 -0700392 struct tegra_dma_channel *ch = NULL;
393
Olof Johansson13ae3d52011-12-22 14:17:40 +0000394 if (!tegra_dma_initialized)
Stephen Warrenccac0512011-02-23 14:49:30 -0700395 return NULL;
396
Colin Cross5789fee2010-08-18 00:19:12 -0700397 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700398
399 /* first channel is the shared channel */
400 if (mode & TEGRA_DMA_SHARED) {
401 channel = TEGRA_SYSTEM_DMA_CH_MIN;
402 } else {
403 channel = find_first_zero_bit(channel_usage,
404 ARRAY_SIZE(dma_channels));
405 if (channel >= ARRAY_SIZE(dma_channels))
Colin Cross5789fee2010-08-18 00:19:12 -0700406 goto out;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700407 }
408 __set_bit(channel, channel_usage);
409 ch = &dma_channels[channel];
410 ch->mode = mode;
Colin Cross5789fee2010-08-18 00:19:12 -0700411
412out:
413 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700414 return ch;
415}
416EXPORT_SYMBOL(tegra_dma_allocate_channel);
417
418void tegra_dma_free_channel(struct tegra_dma_channel *ch)
419{
420 if (ch->mode & TEGRA_DMA_SHARED)
421 return;
422 tegra_dma_cancel(ch);
Colin Cross5789fee2010-08-18 00:19:12 -0700423 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700424 __clear_bit(ch->id, channel_usage);
Colin Cross5789fee2010-08-18 00:19:12 -0700425 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700426}
427EXPORT_SYMBOL(tegra_dma_free_channel);
428
429static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
430 struct tegra_dma_req *req)
431{
Colin Cross5789fee2010-08-18 00:19:12 -0700432 u32 apb_ptr;
433 u32 ahb_ptr;
434
Colin Cross4de3a8f2010-04-05 13:16:42 -0700435 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700436 apb_ptr = req->source_addr;
437 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700438 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700439 apb_ptr = req->dest_addr;
440 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700441 }
Colin Cross5789fee2010-08-18 00:19:12 -0700442 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
443 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700444
445 req->status = TEGRA_DMA_REQ_INFLIGHT;
446 return;
447}
448
449static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
450 struct tegra_dma_req *req)
451{
452 int ahb_addr_wrap;
453 int apb_addr_wrap;
454 int ahb_bus_width;
455 int apb_bus_width;
456 int index;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700457
Colin Cross5789fee2010-08-18 00:19:12 -0700458 u32 ahb_seq;
459 u32 apb_seq;
460 u32 ahb_ptr;
461 u32 apb_ptr;
462 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700463
Colin Cross5789fee2010-08-18 00:19:12 -0700464 csr = CSR_IE_EOC | CSR_FLOW;
465 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
466 apb_seq = 0;
467
468 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700469
470 /* One shot mode is always single buffered,
471 * continuous mode is always double buffered
472 * */
473 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
Colin Cross5789fee2010-08-18 00:19:12 -0700474 csr |= CSR_ONCE;
475 ch->req_transfer_count = (req->size >> 2) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700476 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700477 ahb_seq |= AHB_SEQ_DBL_BUF;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700478
479 /* In double buffered mode, we set the size to half the
480 * requested size and interrupt when half the buffer
481 * is full */
Colin Cross5789fee2010-08-18 00:19:12 -0700482 ch->req_transfer_count = (req->size >> 3) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700483 }
484
Colin Cross5789fee2010-08-18 00:19:12 -0700485 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
486
Colin Cross4de3a8f2010-04-05 13:16:42 -0700487 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700488 apb_ptr = req->source_addr;
489 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700490
491 apb_addr_wrap = req->source_wrap;
492 ahb_addr_wrap = req->dest_wrap;
493 apb_bus_width = req->source_bus_width;
494 ahb_bus_width = req->dest_bus_width;
495
496 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700497 csr |= CSR_DIR;
498 apb_ptr = req->dest_addr;
499 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700500
501 apb_addr_wrap = req->dest_wrap;
502 ahb_addr_wrap = req->source_wrap;
503 apb_bus_width = req->dest_bus_width;
504 ahb_bus_width = req->source_bus_width;
505 }
506
507 apb_addr_wrap >>= 2;
508 ahb_addr_wrap >>= 2;
509
510 /* set address wrap for APB size */
511 index = 0;
512 do {
513 if (apb_addr_wrap_table[index] == apb_addr_wrap)
514 break;
515 index++;
516 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
517 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700518 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700519
520 /* set address wrap for AHB size */
521 index = 0;
522 do {
523 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
524 break;
525 index++;
526 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
527 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700528 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700529
530 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
531 if (bus_width_table[index] == ahb_bus_width)
532 break;
533 }
534 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700535 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700536
537 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
538 if (bus_width_table[index] == apb_bus_width)
539 break;
540 }
541 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700542 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700543
Colin Cross5789fee2010-08-18 00:19:12 -0700544 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
545 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
546 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
547 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
548 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700549
Colin Cross5789fee2010-08-18 00:19:12 -0700550 csr |= CSR_ENB;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700551 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
552
553 req->status = TEGRA_DMA_REQ_INFLIGHT;
554}
555
Colin Cross4de3a8f2010-04-05 13:16:42 -0700556static void handle_oneshot_dma(struct tegra_dma_channel *ch)
557{
558 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700559 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700560
Colin Cross5789fee2010-08-18 00:19:12 -0700561 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700562 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700563 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700564 return;
565 }
566
567 req = list_entry(ch->list.next, typeof(*req), node);
568 if (req) {
569 int bytes_transferred;
570
Colin Cross5789fee2010-08-18 00:19:12 -0700571 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700572 bytes_transferred += 1;
573 bytes_transferred <<= 2;
574
575 list_del(&req->node);
576 req->bytes_transferred = bytes_transferred;
577 req->status = TEGRA_DMA_REQ_SUCCESS;
578
Colin Cross5789fee2010-08-18 00:19:12 -0700579 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700580 /* Callback should be called without any lock */
581 pr_debug("%s: transferred %d bytes\n", __func__,
582 req->bytes_transferred);
583 req->complete(req);
Colin Cross5789fee2010-08-18 00:19:12 -0700584 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700585 }
586
587 if (!list_empty(&ch->list)) {
588 req = list_entry(ch->list.next, typeof(*req), node);
589 /* the complete function we just called may have enqueued
590 another req, in which case dma has already started */
591 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
592 tegra_dma_update_hw(ch, req);
593 }
Colin Cross5789fee2010-08-18 00:19:12 -0700594 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700595}
596
597static void handle_continuous_dma(struct tegra_dma_channel *ch)
598{
599 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700600 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700601
Colin Cross5789fee2010-08-18 00:19:12 -0700602 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700603 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700604 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700605 return;
606 }
607
608 req = list_entry(ch->list.next, typeof(*req), node);
609 if (req) {
610 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
Colin Cross5789fee2010-08-18 00:19:12 -0700611 bool is_dma_ping_complete;
612 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
613 & STA_PING_PONG) ? true : false;
614 if (req->to_memory)
615 is_dma_ping_complete = !is_dma_ping_complete;
616 /* Out of sync - Release current buffer */
617 if (!is_dma_ping_complete) {
618 int bytes_transferred;
619
620 bytes_transferred = ch->req_transfer_count;
621 bytes_transferred += 1;
622 bytes_transferred <<= 3;
623 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
624 req->bytes_transferred = bytes_transferred;
625 req->status = TEGRA_DMA_REQ_SUCCESS;
626 tegra_dma_stop(ch);
627
628 if (!list_is_last(&req->node, &ch->list)) {
629 struct tegra_dma_req *next_req;
630
631 next_req = list_entry(req->node.next,
632 typeof(*next_req), node);
633 tegra_dma_update_hw(ch, next_req);
634 }
635
636 list_del(&req->node);
637
638 /* DMA lock is NOT held when callbak is called */
639 spin_unlock_irqrestore(&ch->lock, irq_flags);
640 req->complete(req);
641 return;
642 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700643 /* Load the next request into the hardware, if available
644 * */
645 if (!list_is_last(&req->node, &ch->list)) {
646 struct tegra_dma_req *next_req;
647
648 next_req = list_entry(req->node.next,
649 typeof(*next_req), node);
650 tegra_dma_update_hw_partial(ch, next_req);
651 }
652 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
653 req->status = TEGRA_DMA_REQ_SUCCESS;
654 /* DMA lock is NOT held when callback is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700655 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700656 if (likely(req->threshold))
657 req->threshold(req);
658 return;
659
660 } else if (req->buffer_status ==
661 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
662 /* Callback when the buffer is completely full (i.e on
663 * the second interrupt */
664 int bytes_transferred;
665
Colin Cross5789fee2010-08-18 00:19:12 -0700666 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700667 bytes_transferred += 1;
668 bytes_transferred <<= 3;
669
670 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
671 req->bytes_transferred = bytes_transferred;
672 req->status = TEGRA_DMA_REQ_SUCCESS;
673 list_del(&req->node);
674
675 /* DMA lock is NOT held when callbak is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700676 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700677 req->complete(req);
678 return;
679
680 } else {
681 BUG();
682 }
683 }
Colin Cross5789fee2010-08-18 00:19:12 -0700684 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700685}
686
687static irqreturn_t dma_isr(int irq, void *data)
688{
689 struct tegra_dma_channel *ch = data;
690 unsigned long status;
691
692 status = readl(ch->addr + APB_DMA_CHAN_STA);
693 if (status & STA_ISE_EOC)
694 writel(status, ch->addr + APB_DMA_CHAN_STA);
695 else {
696 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
697 return IRQ_HANDLED;
698 }
699 return IRQ_WAKE_THREAD;
700}
701
702static irqreturn_t dma_thread_fn(int irq, void *data)
703{
704 struct tegra_dma_channel *ch = data;
705
706 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
707 handle_oneshot_dma(ch);
708 else
709 handle_continuous_dma(ch);
710
711
712 return IRQ_HANDLED;
713}
714
715int __init tegra_dma_init(void)
716{
717 int ret = 0;
718 int i;
719 unsigned int irq;
720 void __iomem *addr;
Stephen Warren1ca00342011-01-05 14:32:20 -0700721 struct clk *c;
722
Stephen Warrenccac0512011-02-23 14:49:30 -0700723 bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
724
Stephen Warren1ca00342011-01-05 14:32:20 -0700725 c = clk_get_sys("tegra-dma", NULL);
726 if (IS_ERR(c)) {
727 pr_err("Unable to get clock for APB DMA\n");
728 ret = PTR_ERR(c);
729 goto fail;
730 }
731 ret = clk_enable(c);
732 if (ret != 0) {
733 pr_err("Unable to enable clock for APB DMA\n");
734 goto fail;
735 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700736
737 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
738 writel(GEN_ENABLE, addr + APB_DMA_GEN);
739 writel(0, addr + APB_DMA_CNTRL);
740 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
741 addr + APB_DMA_IRQ_MASK_SET);
742
Colin Cross4de3a8f2010-04-05 13:16:42 -0700743 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
744 struct tegra_dma_channel *ch = &dma_channels[i];
745
Colin Cross4de3a8f2010-04-05 13:16:42 -0700746 ch->id = i;
747 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
748
749 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
750 TEGRA_APB_DMA_CH0_SIZE * i);
751
752 spin_lock_init(&ch->lock);
753 INIT_LIST_HEAD(&ch->list);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700754
755 irq = INT_APB_DMA_CH0 + i;
756 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
757 dma_channels[i].name, ch);
758 if (ret) {
759 pr_err("Failed to register IRQ %d for DMA %d\n",
760 irq, i);
761 goto fail;
762 }
763 ch->irq = irq;
Stephen Warrenccac0512011-02-23 14:49:30 -0700764
765 __clear_bit(i, channel_usage);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700766 }
767 /* mark the shared channel allocated */
768 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
769
Stephen Warrenccac0512011-02-23 14:49:30 -0700770 tegra_dma_initialized = true;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700771
Stephen Warrenccac0512011-02-23 14:49:30 -0700772 return 0;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700773fail:
774 writel(0, addr + APB_DMA_GEN);
775 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
776 struct tegra_dma_channel *ch = &dma_channels[i];
777 if (ch->irq)
778 free_irq(ch->irq, ch);
779 }
780 return ret;
781}
Stephen Warrendc54c232011-02-23 10:41:29 -0700782postcore_initcall(tegra_dma_init);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700783
784#ifdef CONFIG_PM
785static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
786
787void tegra_dma_suspend(void)
788{
789 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
790 u32 *ctx = apb_dma;
791 int i;
792
793 *ctx++ = readl(addr + APB_DMA_GEN);
794 *ctx++ = readl(addr + APB_DMA_CNTRL);
795 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
796
797 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
798 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
799 TEGRA_APB_DMA_CH0_SIZE * i);
800
801 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
802 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
803 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
804 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
805 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
806 }
807}
808
809void tegra_dma_resume(void)
810{
811 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
812 u32 *ctx = apb_dma;
813 int i;
814
815 writel(*ctx++, addr + APB_DMA_GEN);
816 writel(*ctx++, addr + APB_DMA_CNTRL);
817 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
818
819 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
820 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
821 TEGRA_APB_DMA_CH0_SIZE * i);
822
823 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
824 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
825 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
826 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
827 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
828 }
829}
830
831#endif