| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * arch/arm/mach-tegra/dma.c | 
 | 3 |  * | 
 | 4 |  * System DMA driver for NVIDIA Tegra SoCs | 
 | 5 |  * | 
 | 6 |  * Copyright (c) 2008-2009, NVIDIA Corporation. | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or modify | 
 | 9 |  * it under the terms of the GNU General Public License as published by | 
 | 10 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 11 |  * (at your option) any later version. | 
 | 12 |  * | 
 | 13 |  * This program is distributed in the hope that it will be useful, but WITHOUT | 
 | 14 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 15 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 16 |  * more details. | 
 | 17 |  * | 
 | 18 |  * You should have received a copy of the GNU General Public License along | 
 | 19 |  * with this program; if not, write to the Free Software Foundation, Inc., | 
 | 20 |  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 | #include <linux/io.h> | 
 | 24 | #include <linux/interrupt.h> | 
 | 25 | #include <linux/module.h> | 
 | 26 | #include <linux/spinlock.h> | 
 | 27 | #include <linux/err.h> | 
 | 28 | #include <linux/irq.h> | 
 | 29 | #include <linux/delay.h> | 
| Stephen Warren | 1ca0034 | 2011-01-05 14:32:20 -0700 | [diff] [blame] | 30 | #include <linux/clk.h> | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 31 | #include <mach/dma.h> | 
 | 32 | #include <mach/irqs.h> | 
 | 33 | #include <mach/iomap.h> | 
| Colin Cross | 2ea67fd | 2010-10-04 08:49:49 -0700 | [diff] [blame] | 34 | #include <mach/suspend.h> | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 35 |  | 
 | 36 | #define APB_DMA_GEN				0x000 | 
 | 37 | #define GEN_ENABLE				(1<<31) | 
 | 38 |  | 
 | 39 | #define APB_DMA_CNTRL				0x010 | 
 | 40 |  | 
 | 41 | #define APB_DMA_IRQ_MASK			0x01c | 
 | 42 |  | 
 | 43 | #define APB_DMA_IRQ_MASK_SET			0x020 | 
 | 44 |  | 
 | 45 | #define APB_DMA_CHAN_CSR			0x000 | 
 | 46 | #define CSR_ENB					(1<<31) | 
 | 47 | #define CSR_IE_EOC				(1<<30) | 
 | 48 | #define CSR_HOLD				(1<<29) | 
 | 49 | #define CSR_DIR					(1<<28) | 
 | 50 | #define CSR_ONCE				(1<<27) | 
 | 51 | #define CSR_FLOW				(1<<21) | 
 | 52 | #define CSR_REQ_SEL_SHIFT			16 | 
 | 53 | #define CSR_REQ_SEL_MASK			(0x1F<<CSR_REQ_SEL_SHIFT) | 
 | 54 | #define CSR_REQ_SEL_INVALID			(31<<CSR_REQ_SEL_SHIFT) | 
 | 55 | #define CSR_WCOUNT_SHIFT			2 | 
 | 56 | #define CSR_WCOUNT_MASK				0xFFFC | 
 | 57 |  | 
 | 58 | #define APB_DMA_CHAN_STA				0x004 | 
 | 59 | #define STA_BUSY				(1<<31) | 
 | 60 | #define STA_ISE_EOC				(1<<30) | 
 | 61 | #define STA_HALT				(1<<29) | 
 | 62 | #define STA_PING_PONG				(1<<28) | 
 | 63 | #define STA_COUNT_SHIFT				2 | 
 | 64 | #define STA_COUNT_MASK				0xFFFC | 
 | 65 |  | 
 | 66 | #define APB_DMA_CHAN_AHB_PTR				0x010 | 
 | 67 |  | 
 | 68 | #define APB_DMA_CHAN_AHB_SEQ				0x014 | 
 | 69 | #define AHB_SEQ_INTR_ENB			(1<<31) | 
 | 70 | #define AHB_SEQ_BUS_WIDTH_SHIFT			28 | 
 | 71 | #define AHB_SEQ_BUS_WIDTH_MASK			(0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
 | 72 | #define AHB_SEQ_BUS_WIDTH_8			(0<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
 | 73 | #define AHB_SEQ_BUS_WIDTH_16			(1<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
 | 74 | #define AHB_SEQ_BUS_WIDTH_32			(2<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
 | 75 | #define AHB_SEQ_BUS_WIDTH_64			(3<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
 | 76 | #define AHB_SEQ_BUS_WIDTH_128			(4<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
 | 77 | #define AHB_SEQ_DATA_SWAP			(1<<27) | 
 | 78 | #define AHB_SEQ_BURST_MASK			(0x7<<24) | 
 | 79 | #define AHB_SEQ_BURST_1				(4<<24) | 
 | 80 | #define AHB_SEQ_BURST_4				(5<<24) | 
 | 81 | #define AHB_SEQ_BURST_8				(6<<24) | 
 | 82 | #define AHB_SEQ_DBL_BUF				(1<<19) | 
 | 83 | #define AHB_SEQ_WRAP_SHIFT			16 | 
 | 84 | #define AHB_SEQ_WRAP_MASK			(0x7<<AHB_SEQ_WRAP_SHIFT) | 
 | 85 |  | 
 | 86 | #define APB_DMA_CHAN_APB_PTR				0x018 | 
 | 87 |  | 
 | 88 | #define APB_DMA_CHAN_APB_SEQ				0x01c | 
 | 89 | #define APB_SEQ_BUS_WIDTH_SHIFT			28 | 
 | 90 | #define APB_SEQ_BUS_WIDTH_MASK			(0x7<<APB_SEQ_BUS_WIDTH_SHIFT) | 
 | 91 | #define APB_SEQ_BUS_WIDTH_8			(0<<APB_SEQ_BUS_WIDTH_SHIFT) | 
 | 92 | #define APB_SEQ_BUS_WIDTH_16			(1<<APB_SEQ_BUS_WIDTH_SHIFT) | 
 | 93 | #define APB_SEQ_BUS_WIDTH_32			(2<<APB_SEQ_BUS_WIDTH_SHIFT) | 
 | 94 | #define APB_SEQ_BUS_WIDTH_64			(3<<APB_SEQ_BUS_WIDTH_SHIFT) | 
 | 95 | #define APB_SEQ_BUS_WIDTH_128			(4<<APB_SEQ_BUS_WIDTH_SHIFT) | 
 | 96 | #define APB_SEQ_DATA_SWAP			(1<<27) | 
 | 97 | #define APB_SEQ_WRAP_SHIFT			16 | 
 | 98 | #define APB_SEQ_WRAP_MASK			(0x7<<APB_SEQ_WRAP_SHIFT) | 
 | 99 |  | 
 | 100 | #define TEGRA_SYSTEM_DMA_CH_NR			16 | 
 | 101 | #define TEGRA_SYSTEM_DMA_AVP_CH_NUM		4 | 
 | 102 | #define TEGRA_SYSTEM_DMA_CH_MIN			0 | 
 | 103 | #define TEGRA_SYSTEM_DMA_CH_MAX	\ | 
 | 104 | 	(TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) | 
 | 105 |  | 
 | 106 | #define NV_DMA_MAX_TRASFER_SIZE 0x10000 | 
 | 107 |  | 
 | 108 | const unsigned int ahb_addr_wrap_table[8] = { | 
 | 109 | 	0, 32, 64, 128, 256, 512, 1024, 2048 | 
 | 110 | }; | 
 | 111 |  | 
 | 112 | const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64}; | 
 | 113 |  | 
 | 114 | const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128}; | 
 | 115 |  | 
 | 116 | #define TEGRA_DMA_NAME_SIZE 16 | 
 | 117 | struct tegra_dma_channel { | 
 | 118 | 	struct list_head	list; | 
 | 119 | 	int			id; | 
 | 120 | 	spinlock_t		lock; | 
 | 121 | 	char			name[TEGRA_DMA_NAME_SIZE]; | 
 | 122 | 	void  __iomem		*addr; | 
 | 123 | 	int			mode; | 
 | 124 | 	int			irq; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 125 | 	int			req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 126 | }; | 
 | 127 |  | 
 | 128 | #define  NV_DMA_MAX_CHANNELS  32 | 
 | 129 |  | 
| Stephen Warren | ccac051 | 2011-02-23 14:49:30 -0700 | [diff] [blame] | 130 | static bool tegra_dma_initialized; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 131 | static DEFINE_MUTEX(tegra_dma_lock); | 
 | 132 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 133 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | 
 | 134 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | 
 | 135 |  | 
 | 136 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | 
 | 137 | 	struct tegra_dma_req *req); | 
 | 138 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 
 | 139 | 	struct tegra_dma_req *req); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 140 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | 
 | 141 |  | 
 | 142 | void tegra_dma_flush(struct tegra_dma_channel *ch) | 
 | 143 | { | 
 | 144 | } | 
 | 145 | EXPORT_SYMBOL(tegra_dma_flush); | 
 | 146 |  | 
 | 147 | void tegra_dma_dequeue(struct tegra_dma_channel *ch) | 
 | 148 | { | 
 | 149 | 	struct tegra_dma_req *req; | 
 | 150 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 151 | 	if (tegra_dma_is_empty(ch)) | 
 | 152 | 		return; | 
 | 153 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 154 | 	req = list_entry(ch->list.next, typeof(*req), node); | 
 | 155 |  | 
 | 156 | 	tegra_dma_dequeue_req(ch, req); | 
 | 157 | 	return; | 
 | 158 | } | 
 | 159 |  | 
 | 160 | void tegra_dma_stop(struct tegra_dma_channel *ch) | 
 | 161 | { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 162 | 	u32 csr; | 
 | 163 | 	u32 status; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 164 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 165 | 	csr = readl(ch->addr + APB_DMA_CHAN_CSR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 166 | 	csr &= ~CSR_IE_EOC; | 
 | 167 | 	writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
 | 168 |  | 
 | 169 | 	csr &= ~CSR_ENB; | 
 | 170 | 	writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
 | 171 |  | 
 | 172 | 	status = readl(ch->addr + APB_DMA_CHAN_STA); | 
 | 173 | 	if (status & STA_ISE_EOC) | 
 | 174 | 		writel(status, ch->addr + APB_DMA_CHAN_STA); | 
 | 175 | } | 
 | 176 |  | 
 | 177 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | 
 | 178 | { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 179 | 	u32 csr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 180 | 	unsigned long irq_flags; | 
 | 181 |  | 
 | 182 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
 | 183 | 	while (!list_empty(&ch->list)) | 
 | 184 | 		list_del(ch->list.next); | 
 | 185 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 186 | 	csr = readl(ch->addr + APB_DMA_CHAN_CSR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 187 | 	csr &= ~CSR_REQ_SEL_MASK; | 
 | 188 | 	csr |= CSR_REQ_SEL_INVALID; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 189 | 	writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
 | 190 |  | 
 | 191 | 	tegra_dma_stop(ch); | 
 | 192 |  | 
 | 193 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 194 | 	return 0; | 
 | 195 | } | 
 | 196 |  | 
 | 197 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | 
 | 198 | 	struct tegra_dma_req *_req) | 
 | 199 | { | 
 | 200 | 	unsigned int csr; | 
 | 201 | 	unsigned int status; | 
 | 202 | 	struct tegra_dma_req *req = NULL; | 
 | 203 | 	int found = 0; | 
 | 204 | 	unsigned long irq_flags; | 
 | 205 | 	int to_transfer; | 
 | 206 | 	int req_transfer_count; | 
 | 207 |  | 
 | 208 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
 | 209 | 	list_for_each_entry(req, &ch->list, node) { | 
 | 210 | 		if (req == _req) { | 
 | 211 | 			list_del(&req->node); | 
 | 212 | 			found = 1; | 
 | 213 | 			break; | 
 | 214 | 		} | 
 | 215 | 	} | 
 | 216 | 	if (!found) { | 
 | 217 | 		spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 218 | 		return 0; | 
 | 219 | 	} | 
 | 220 |  | 
 | 221 | 	/* STOP the DMA and get the transfer count. | 
 | 222 | 	 * Getting the transfer count is tricky. | 
 | 223 | 	 *  - Change the source selector to invalid to stop the DMA from | 
 | 224 | 	 *    FIFO to memory. | 
 | 225 | 	 *  - Read the status register to know the number of pending | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 226 | 	 *    bytes to be transferred. | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 227 | 	 *  - Finally stop or program the DMA to the next buffer in the | 
 | 228 | 	 *    list. | 
 | 229 | 	 */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 230 | 	csr = readl(ch->addr + APB_DMA_CHAN_CSR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 231 | 	csr &= ~CSR_REQ_SEL_MASK; | 
 | 232 | 	csr |= CSR_REQ_SEL_INVALID; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 233 | 	writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
 | 234 |  | 
 | 235 | 	/* Get the transfer count */ | 
 | 236 | 	status = readl(ch->addr + APB_DMA_CHAN_STA); | 
 | 237 | 	to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 238 | 	req_transfer_count = ch->req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 239 | 	req_transfer_count += 1; | 
 | 240 | 	to_transfer += 1; | 
 | 241 |  | 
 | 242 | 	req->bytes_transferred = req_transfer_count; | 
 | 243 |  | 
 | 244 | 	if (status & STA_BUSY) | 
 | 245 | 		req->bytes_transferred -= to_transfer; | 
 | 246 |  | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 247 | 	/* In continuous transfer mode, DMA only tracks the count of the | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 248 | 	 * half DMA buffer. So, if the DMA already finished half the DMA | 
 | 249 | 	 * then add the half buffer to the completed count. | 
 | 250 | 	 * | 
 | 251 | 	 *	FIXME: There can be a race here. What if the req to | 
 | 252 | 	 *	dequue happens at the same time as the DMA just moved to | 
 | 253 | 	 *	the new buffer and SW didn't yet received the interrupt? | 
 | 254 | 	 */ | 
 | 255 | 	if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) | 
 | 256 | 		if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | 
 | 257 | 			req->bytes_transferred += req_transfer_count; | 
 | 258 |  | 
 | 259 | 	req->bytes_transferred *= 4; | 
 | 260 |  | 
 | 261 | 	tegra_dma_stop(ch); | 
 | 262 | 	if (!list_empty(&ch->list)) { | 
 | 263 | 		/* if the list is not empty, queue the next request */ | 
 | 264 | 		struct tegra_dma_req *next_req; | 
 | 265 | 		next_req = list_entry(ch->list.next, | 
 | 266 | 			typeof(*next_req), node); | 
 | 267 | 		tegra_dma_update_hw(ch, next_req); | 
 | 268 | 	} | 
 | 269 | 	req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; | 
 | 270 |  | 
 | 271 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 272 |  | 
 | 273 | 	/* Callback should be called without any lock */ | 
 | 274 | 	req->complete(req); | 
 | 275 | 	return 0; | 
 | 276 | } | 
 | 277 | EXPORT_SYMBOL(tegra_dma_dequeue_req); | 
 | 278 |  | 
 | 279 | bool tegra_dma_is_empty(struct tegra_dma_channel *ch) | 
 | 280 | { | 
 | 281 | 	unsigned long irq_flags; | 
 | 282 | 	bool is_empty; | 
 | 283 |  | 
 | 284 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
 | 285 | 	if (list_empty(&ch->list)) | 
 | 286 | 		is_empty = true; | 
 | 287 | 	else | 
 | 288 | 		is_empty = false; | 
 | 289 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 290 | 	return is_empty; | 
 | 291 | } | 
 | 292 | EXPORT_SYMBOL(tegra_dma_is_empty); | 
 | 293 |  | 
 | 294 | bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, | 
 | 295 | 	struct tegra_dma_req *_req) | 
 | 296 | { | 
 | 297 | 	unsigned long irq_flags; | 
 | 298 | 	struct tegra_dma_req *req; | 
 | 299 |  | 
 | 300 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
 | 301 | 	list_for_each_entry(req, &ch->list, node) { | 
 | 302 | 		if (req == _req) { | 
 | 303 | 			spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 304 | 			return true; | 
 | 305 | 		} | 
 | 306 | 	} | 
 | 307 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 308 | 	return false; | 
 | 309 | } | 
 | 310 | EXPORT_SYMBOL(tegra_dma_is_req_inflight); | 
 | 311 |  | 
 | 312 | int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | 
 | 313 | 	struct tegra_dma_req *req) | 
 | 314 | { | 
 | 315 | 	unsigned long irq_flags; | 
| Stephen Warren | 499ef7a | 2011-01-05 14:24:12 -0700 | [diff] [blame] | 316 | 	struct tegra_dma_req *_req; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 317 | 	int start_dma = 0; | 
 | 318 |  | 
 | 319 | 	if (req->size > NV_DMA_MAX_TRASFER_SIZE || | 
 | 320 | 		req->source_addr & 0x3 || req->dest_addr & 0x3) { | 
 | 321 | 		pr_err("Invalid DMA request for channel %d\n", ch->id); | 
 | 322 | 		return -EINVAL; | 
 | 323 | 	} | 
 | 324 |  | 
 | 325 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
 | 326 |  | 
| Stephen Warren | 499ef7a | 2011-01-05 14:24:12 -0700 | [diff] [blame] | 327 | 	list_for_each_entry(_req, &ch->list, node) { | 
 | 328 | 		if (req == _req) { | 
 | 329 | 		    spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 330 | 		    return -EEXIST; | 
 | 331 | 		} | 
 | 332 | 	} | 
 | 333 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 334 | 	req->bytes_transferred = 0; | 
 | 335 | 	req->status = 0; | 
 | 336 | 	req->buffer_status = 0; | 
 | 337 | 	if (list_empty(&ch->list)) | 
 | 338 | 		start_dma = 1; | 
 | 339 |  | 
 | 340 | 	list_add_tail(&req->node, &ch->list); | 
 | 341 |  | 
 | 342 | 	if (start_dma) | 
 | 343 | 		tegra_dma_update_hw(ch, req); | 
 | 344 |  | 
 | 345 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 346 |  | 
 | 347 | 	return 0; | 
 | 348 | } | 
 | 349 | EXPORT_SYMBOL(tegra_dma_enqueue_req); | 
 | 350 |  | 
 | 351 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | 
 | 352 | { | 
 | 353 | 	int channel; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 354 | 	struct tegra_dma_channel *ch = NULL; | 
 | 355 |  | 
| Stephen Warren | ccac051 | 2011-02-23 14:49:30 -0700 | [diff] [blame] | 356 | 	if (WARN_ON(!tegra_dma_initialized)) | 
 | 357 | 		return NULL; | 
 | 358 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 359 | 	mutex_lock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 360 |  | 
 | 361 | 	/* first channel is the shared channel */ | 
 | 362 | 	if (mode & TEGRA_DMA_SHARED) { | 
 | 363 | 		channel = TEGRA_SYSTEM_DMA_CH_MIN; | 
 | 364 | 	} else { | 
 | 365 | 		channel = find_first_zero_bit(channel_usage, | 
 | 366 | 			ARRAY_SIZE(dma_channels)); | 
 | 367 | 		if (channel >= ARRAY_SIZE(dma_channels)) | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 368 | 			goto out; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 369 | 	} | 
 | 370 | 	__set_bit(channel, channel_usage); | 
 | 371 | 	ch = &dma_channels[channel]; | 
 | 372 | 	ch->mode = mode; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 373 |  | 
 | 374 | out: | 
 | 375 | 	mutex_unlock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 376 | 	return ch; | 
 | 377 | } | 
 | 378 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | 
 | 379 |  | 
 | 380 | void tegra_dma_free_channel(struct tegra_dma_channel *ch) | 
 | 381 | { | 
 | 382 | 	if (ch->mode & TEGRA_DMA_SHARED) | 
 | 383 | 		return; | 
 | 384 | 	tegra_dma_cancel(ch); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 385 | 	mutex_lock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 386 | 	__clear_bit(ch->id, channel_usage); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 387 | 	mutex_unlock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 388 | } | 
 | 389 | EXPORT_SYMBOL(tegra_dma_free_channel); | 
 | 390 |  | 
 | 391 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 
 | 392 | 	struct tegra_dma_req *req) | 
 | 393 | { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 394 | 	u32 apb_ptr; | 
 | 395 | 	u32 ahb_ptr; | 
 | 396 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 397 | 	if (req->to_memory) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 398 | 		apb_ptr = req->source_addr; | 
 | 399 | 		ahb_ptr = req->dest_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 400 | 	} else { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 401 | 		apb_ptr = req->dest_addr; | 
 | 402 | 		ahb_ptr = req->source_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 403 | 	} | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 404 | 	writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 
 | 405 | 	writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 406 |  | 
 | 407 | 	req->status = TEGRA_DMA_REQ_INFLIGHT; | 
 | 408 | 	return; | 
 | 409 | } | 
 | 410 |  | 
 | 411 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | 
 | 412 | 	struct tegra_dma_req *req) | 
 | 413 | { | 
 | 414 | 	int ahb_addr_wrap; | 
 | 415 | 	int apb_addr_wrap; | 
 | 416 | 	int ahb_bus_width; | 
 | 417 | 	int apb_bus_width; | 
 | 418 | 	int index; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 419 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 420 | 	u32 ahb_seq; | 
 | 421 | 	u32 apb_seq; | 
 | 422 | 	u32 ahb_ptr; | 
 | 423 | 	u32 apb_ptr; | 
 | 424 | 	u32 csr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 425 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 426 | 	csr = CSR_IE_EOC | CSR_FLOW; | 
 | 427 | 	ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; | 
 | 428 | 	apb_seq = 0; | 
 | 429 |  | 
 | 430 | 	csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 431 |  | 
 | 432 | 	/* One shot mode is always single buffered, | 
 | 433 | 	 * continuous mode is always double buffered | 
 | 434 | 	 * */ | 
 | 435 | 	if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 436 | 		csr |= CSR_ONCE; | 
 | 437 | 		ch->req_transfer_count = (req->size >> 2) - 1; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 438 | 	} else { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 439 | 		ahb_seq |= AHB_SEQ_DBL_BUF; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 440 |  | 
 | 441 | 		/* In double buffered mode, we set the size to half the | 
 | 442 | 		 * requested size and interrupt when half the buffer | 
 | 443 | 		 * is full */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 444 | 		ch->req_transfer_count = (req->size >> 3) - 1; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 445 | 	} | 
 | 446 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 447 | 	csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; | 
 | 448 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 449 | 	if (req->to_memory) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 450 | 		apb_ptr = req->source_addr; | 
 | 451 | 		ahb_ptr = req->dest_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 452 |  | 
 | 453 | 		apb_addr_wrap = req->source_wrap; | 
 | 454 | 		ahb_addr_wrap = req->dest_wrap; | 
 | 455 | 		apb_bus_width = req->source_bus_width; | 
 | 456 | 		ahb_bus_width = req->dest_bus_width; | 
 | 457 |  | 
 | 458 | 	} else { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 459 | 		csr |= CSR_DIR; | 
 | 460 | 		apb_ptr = req->dest_addr; | 
 | 461 | 		ahb_ptr = req->source_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 462 |  | 
 | 463 | 		apb_addr_wrap = req->dest_wrap; | 
 | 464 | 		ahb_addr_wrap = req->source_wrap; | 
 | 465 | 		apb_bus_width = req->dest_bus_width; | 
 | 466 | 		ahb_bus_width = req->source_bus_width; | 
 | 467 | 	} | 
 | 468 |  | 
 | 469 | 	apb_addr_wrap >>= 2; | 
 | 470 | 	ahb_addr_wrap >>= 2; | 
 | 471 |  | 
 | 472 | 	/* set address wrap for APB size */ | 
 | 473 | 	index = 0; | 
 | 474 | 	do  { | 
 | 475 | 		if (apb_addr_wrap_table[index] == apb_addr_wrap) | 
 | 476 | 			break; | 
 | 477 | 		index++; | 
 | 478 | 	} while (index < ARRAY_SIZE(apb_addr_wrap_table)); | 
 | 479 | 	BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 480 | 	apb_seq |= index << APB_SEQ_WRAP_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 481 |  | 
 | 482 | 	/* set address wrap for AHB size */ | 
 | 483 | 	index = 0; | 
 | 484 | 	do  { | 
 | 485 | 		if (ahb_addr_wrap_table[index] == ahb_addr_wrap) | 
 | 486 | 			break; | 
 | 487 | 		index++; | 
 | 488 | 	} while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | 
 | 489 | 	BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 490 | 	ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 491 |  | 
 | 492 | 	for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 
 | 493 | 		if (bus_width_table[index] == ahb_bus_width) | 
 | 494 | 			break; | 
 | 495 | 	} | 
 | 496 | 	BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 497 | 	ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 498 |  | 
 | 499 | 	for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 
 | 500 | 		if (bus_width_table[index] == apb_bus_width) | 
 | 501 | 			break; | 
 | 502 | 	} | 
 | 503 | 	BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 504 | 	apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 505 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 506 | 	writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
 | 507 | 	writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | 
 | 508 | 	writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 
 | 509 | 	writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | 
 | 510 | 	writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 511 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 512 | 	csr |= CSR_ENB; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 513 | 	writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
 | 514 |  | 
 | 515 | 	req->status = TEGRA_DMA_REQ_INFLIGHT; | 
 | 516 | } | 
 | 517 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 518 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | 
 | 519 | { | 
 | 520 | 	struct tegra_dma_req *req; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 521 | 	unsigned long irq_flags; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 522 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 523 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 524 | 	if (list_empty(&ch->list)) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 525 | 		spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 526 | 		return; | 
 | 527 | 	} | 
 | 528 |  | 
 | 529 | 	req = list_entry(ch->list.next, typeof(*req), node); | 
 | 530 | 	if (req) { | 
 | 531 | 		int bytes_transferred; | 
 | 532 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 533 | 		bytes_transferred = ch->req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 534 | 		bytes_transferred += 1; | 
 | 535 | 		bytes_transferred <<= 2; | 
 | 536 |  | 
 | 537 | 		list_del(&req->node); | 
 | 538 | 		req->bytes_transferred = bytes_transferred; | 
 | 539 | 		req->status = TEGRA_DMA_REQ_SUCCESS; | 
 | 540 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 541 | 		spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 542 | 		/* Callback should be called without any lock */ | 
 | 543 | 		pr_debug("%s: transferred %d bytes\n", __func__, | 
 | 544 | 			req->bytes_transferred); | 
 | 545 | 		req->complete(req); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 546 | 		spin_lock_irqsave(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 547 | 	} | 
 | 548 |  | 
 | 549 | 	if (!list_empty(&ch->list)) { | 
 | 550 | 		req = list_entry(ch->list.next, typeof(*req), node); | 
 | 551 | 		/* the complete function we just called may have enqueued | 
 | 552 | 		   another req, in which case dma has already started */ | 
 | 553 | 		if (req->status != TEGRA_DMA_REQ_INFLIGHT) | 
 | 554 | 			tegra_dma_update_hw(ch, req); | 
 | 555 | 	} | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 556 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 557 | } | 
 | 558 |  | 
 | 559 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | 
 | 560 | { | 
 | 561 | 	struct tegra_dma_req *req; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 562 | 	unsigned long irq_flags; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 563 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 564 | 	spin_lock_irqsave(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 565 | 	if (list_empty(&ch->list)) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 566 | 		spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 567 | 		return; | 
 | 568 | 	} | 
 | 569 |  | 
 | 570 | 	req = list_entry(ch->list.next, typeof(*req), node); | 
 | 571 | 	if (req) { | 
 | 572 | 		if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 573 | 			bool is_dma_ping_complete; | 
 | 574 | 			is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) | 
 | 575 | 						& STA_PING_PONG) ? true : false; | 
 | 576 | 			if (req->to_memory) | 
 | 577 | 				is_dma_ping_complete = !is_dma_ping_complete; | 
 | 578 | 			/* Out of sync - Release current buffer */ | 
 | 579 | 			if (!is_dma_ping_complete) { | 
 | 580 | 				int bytes_transferred; | 
 | 581 |  | 
 | 582 | 				bytes_transferred = ch->req_transfer_count; | 
 | 583 | 				bytes_transferred += 1; | 
 | 584 | 				bytes_transferred <<= 3; | 
 | 585 | 				req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | 
 | 586 | 				req->bytes_transferred = bytes_transferred; | 
 | 587 | 				req->status = TEGRA_DMA_REQ_SUCCESS; | 
 | 588 | 				tegra_dma_stop(ch); | 
 | 589 |  | 
 | 590 | 				if (!list_is_last(&req->node, &ch->list)) { | 
 | 591 | 					struct tegra_dma_req *next_req; | 
 | 592 |  | 
 | 593 | 					next_req = list_entry(req->node.next, | 
 | 594 | 						typeof(*next_req), node); | 
 | 595 | 					tegra_dma_update_hw(ch, next_req); | 
 | 596 | 				} | 
 | 597 |  | 
 | 598 | 				list_del(&req->node); | 
 | 599 |  | 
 | 600 | 				/* DMA lock is NOT held when callbak is called */ | 
 | 601 | 				spin_unlock_irqrestore(&ch->lock, irq_flags); | 
 | 602 | 				req->complete(req); | 
 | 603 | 				return; | 
 | 604 | 			} | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 605 | 			/* Load the next request into the hardware, if available | 
 | 606 | 			 * */ | 
 | 607 | 			if (!list_is_last(&req->node, &ch->list)) { | 
 | 608 | 				struct tegra_dma_req *next_req; | 
 | 609 |  | 
 | 610 | 				next_req = list_entry(req->node.next, | 
 | 611 | 					typeof(*next_req), node); | 
 | 612 | 				tegra_dma_update_hw_partial(ch, next_req); | 
 | 613 | 			} | 
 | 614 | 			req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | 
 | 615 | 			req->status = TEGRA_DMA_REQ_SUCCESS; | 
 | 616 | 			/* DMA lock is NOT held when callback is called */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 617 | 			spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 618 | 			if (likely(req->threshold)) | 
 | 619 | 				req->threshold(req); | 
 | 620 | 			return; | 
 | 621 |  | 
 | 622 | 		} else if (req->buffer_status == | 
 | 623 | 			TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { | 
 | 624 | 			/* Callback when the buffer is completely full (i.e on | 
 | 625 | 			 * the second  interrupt */ | 
 | 626 | 			int bytes_transferred; | 
 | 627 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 628 | 			bytes_transferred = ch->req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 629 | 			bytes_transferred += 1; | 
 | 630 | 			bytes_transferred <<= 3; | 
 | 631 |  | 
 | 632 | 			req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | 
 | 633 | 			req->bytes_transferred = bytes_transferred; | 
 | 634 | 			req->status = TEGRA_DMA_REQ_SUCCESS; | 
 | 635 | 			list_del(&req->node); | 
 | 636 |  | 
 | 637 | 			/* DMA lock is NOT held when callbak is called */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 638 | 			spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 639 | 			req->complete(req); | 
 | 640 | 			return; | 
 | 641 |  | 
 | 642 | 		} else { | 
 | 643 | 			BUG(); | 
 | 644 | 		} | 
 | 645 | 	} | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 646 | 	spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 647 | } | 
 | 648 |  | 
 | 649 | static irqreturn_t dma_isr(int irq, void *data) | 
 | 650 | { | 
 | 651 | 	struct tegra_dma_channel *ch = data; | 
 | 652 | 	unsigned long status; | 
 | 653 |  | 
 | 654 | 	status = readl(ch->addr + APB_DMA_CHAN_STA); | 
 | 655 | 	if (status & STA_ISE_EOC) | 
 | 656 | 		writel(status, ch->addr + APB_DMA_CHAN_STA); | 
 | 657 | 	else { | 
 | 658 | 		pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); | 
 | 659 | 		return IRQ_HANDLED; | 
 | 660 | 	} | 
 | 661 | 	return IRQ_WAKE_THREAD; | 
 | 662 | } | 
 | 663 |  | 
 | 664 | static irqreturn_t dma_thread_fn(int irq, void *data) | 
 | 665 | { | 
 | 666 | 	struct tegra_dma_channel *ch = data; | 
 | 667 |  | 
 | 668 | 	if (ch->mode & TEGRA_DMA_MODE_ONESHOT) | 
 | 669 | 		handle_oneshot_dma(ch); | 
 | 670 | 	else | 
 | 671 | 		handle_continuous_dma(ch); | 
 | 672 |  | 
 | 673 |  | 
 | 674 | 	return IRQ_HANDLED; | 
 | 675 | } | 
 | 676 |  | 
 | 677 | int __init tegra_dma_init(void) | 
 | 678 | { | 
 | 679 | 	int ret = 0; | 
 | 680 | 	int i; | 
 | 681 | 	unsigned int irq; | 
 | 682 | 	void __iomem *addr; | 
| Stephen Warren | 1ca0034 | 2011-01-05 14:32:20 -0700 | [diff] [blame] | 683 | 	struct clk *c; | 
 | 684 |  | 
| Stephen Warren | ccac051 | 2011-02-23 14:49:30 -0700 | [diff] [blame] | 685 | 	bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); | 
 | 686 |  | 
| Stephen Warren | 1ca0034 | 2011-01-05 14:32:20 -0700 | [diff] [blame] | 687 | 	c = clk_get_sys("tegra-dma", NULL); | 
 | 688 | 	if (IS_ERR(c)) { | 
 | 689 | 		pr_err("Unable to get clock for APB DMA\n"); | 
 | 690 | 		ret = PTR_ERR(c); | 
 | 691 | 		goto fail; | 
 | 692 | 	} | 
 | 693 | 	ret = clk_enable(c); | 
 | 694 | 	if (ret != 0) { | 
 | 695 | 		pr_err("Unable to enable clock for APB DMA\n"); | 
 | 696 | 		goto fail; | 
 | 697 | 	} | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 698 |  | 
 | 699 | 	addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 
 | 700 | 	writel(GEN_ENABLE, addr + APB_DMA_GEN); | 
 | 701 | 	writel(0, addr + APB_DMA_CNTRL); | 
 | 702 | 	writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | 
 | 703 | 	       addr + APB_DMA_IRQ_MASK_SET); | 
 | 704 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 705 | 	for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 
 | 706 | 		struct tegra_dma_channel *ch = &dma_channels[i]; | 
 | 707 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 708 | 		ch->id = i; | 
 | 709 | 		snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | 
 | 710 |  | 
 | 711 | 		ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | 
 | 712 | 			TEGRA_APB_DMA_CH0_SIZE * i); | 
 | 713 |  | 
 | 714 | 		spin_lock_init(&ch->lock); | 
 | 715 | 		INIT_LIST_HEAD(&ch->list); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 716 |  | 
 | 717 | 		irq = INT_APB_DMA_CH0 + i; | 
 | 718 | 		ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | 
 | 719 | 			dma_channels[i].name, ch); | 
 | 720 | 		if (ret) { | 
 | 721 | 			pr_err("Failed to register IRQ %d for DMA %d\n", | 
 | 722 | 				irq, i); | 
 | 723 | 			goto fail; | 
 | 724 | 		} | 
 | 725 | 		ch->irq = irq; | 
| Stephen Warren | ccac051 | 2011-02-23 14:49:30 -0700 | [diff] [blame] | 726 |  | 
 | 727 | 		__clear_bit(i, channel_usage); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 728 | 	} | 
 | 729 | 	/* mark the shared channel allocated */ | 
 | 730 | 	__set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | 
 | 731 |  | 
| Stephen Warren | ccac051 | 2011-02-23 14:49:30 -0700 | [diff] [blame] | 732 | 	tegra_dma_initialized = true; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 733 |  | 
| Stephen Warren | ccac051 | 2011-02-23 14:49:30 -0700 | [diff] [blame] | 734 | 	return 0; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 735 | fail: | 
 | 736 | 	writel(0, addr + APB_DMA_GEN); | 
 | 737 | 	for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 
 | 738 | 		struct tegra_dma_channel *ch = &dma_channels[i]; | 
 | 739 | 		if (ch->irq) | 
 | 740 | 			free_irq(ch->irq, ch); | 
 | 741 | 	} | 
 | 742 | 	return ret; | 
 | 743 | } | 
| Stephen Warren | dc54c23 | 2011-02-23 10:41:29 -0700 | [diff] [blame] | 744 | postcore_initcall(tegra_dma_init); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 745 |  | 
 | 746 | #ifdef CONFIG_PM | 
 | 747 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | 
 | 748 |  | 
 | 749 | void tegra_dma_suspend(void) | 
 | 750 | { | 
 | 751 | 	void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 
 | 752 | 	u32 *ctx = apb_dma; | 
 | 753 | 	int i; | 
 | 754 |  | 
 | 755 | 	*ctx++ = readl(addr + APB_DMA_GEN); | 
 | 756 | 	*ctx++ = readl(addr + APB_DMA_CNTRL); | 
 | 757 | 	*ctx++ = readl(addr + APB_DMA_IRQ_MASK); | 
 | 758 |  | 
 | 759 | 	for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | 
 | 760 | 		addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | 
 | 761 | 				  TEGRA_APB_DMA_CH0_SIZE * i); | 
 | 762 |  | 
 | 763 | 		*ctx++ = readl(addr + APB_DMA_CHAN_CSR); | 
 | 764 | 		*ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR); | 
 | 765 | 		*ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ); | 
 | 766 | 		*ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); | 
 | 767 | 		*ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); | 
 | 768 | 	} | 
 | 769 | } | 
 | 770 |  | 
 | 771 | void tegra_dma_resume(void) | 
 | 772 | { | 
 | 773 | 	void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 
 | 774 | 	u32 *ctx = apb_dma; | 
 | 775 | 	int i; | 
 | 776 |  | 
 | 777 | 	writel(*ctx++, addr + APB_DMA_GEN); | 
 | 778 | 	writel(*ctx++, addr + APB_DMA_CNTRL); | 
 | 779 | 	writel(*ctx++, addr + APB_DMA_IRQ_MASK); | 
 | 780 |  | 
 | 781 | 	for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | 
 | 782 | 		addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | 
 | 783 | 				  TEGRA_APB_DMA_CH0_SIZE * i); | 
 | 784 |  | 
 | 785 | 		writel(*ctx++, addr + APB_DMA_CHAN_CSR); | 
 | 786 | 		writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR); | 
 | 787 | 		writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ); | 
 | 788 | 		writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR); | 
 | 789 | 		writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ); | 
 | 790 | 	} | 
 | 791 | } | 
 | 792 |  | 
 | 793 | #endif |