Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/plat-omap/dma.c |
| 3 | * |
| 4 | * Copyright (C) 2003 Nokia Corporation |
| 5 | * Author: Juha Yrjölä <juha.yrjola@nokia.com> |
| 6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> |
| 7 | * Graphics DMA and LCD DMA graphics tranformations |
| 8 | * by Imre Deak <imre.deak@nokia.com> |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 9 | * OMAP2 support Copyright (C) 2004-2005 Texas Instruments, Inc. |
| 10 | * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com> |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 11 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. |
| 12 | * |
| 13 | * Support functions for the OMAP internal DMA channels. |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or modify |
| 16 | * it under the terms of the GNU General Public License version 2 as |
| 17 | * published by the Free Software Foundation. |
| 18 | * |
| 19 | */ |
| 20 | |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/sched.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/errno.h> |
| 26 | #include <linux/interrupt.h> |
| 27 | |
| 28 | #include <asm/system.h> |
| 29 | #include <asm/irq.h> |
| 30 | #include <asm/hardware.h> |
| 31 | #include <asm/dma.h> |
| 32 | #include <asm/io.h> |
| 33 | |
| 34 | #include <asm/arch/tc.h> |
| 35 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 36 | #define DEBUG_PRINTS |
| 37 | #undef DEBUG_PRINTS |
| 38 | #ifdef DEBUG_PRINTS |
| 39 | #define debug_printk(x) printk x |
| 40 | #else |
| 41 | #define debug_printk(x) |
| 42 | #endif |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 43 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 44 | #define OMAP_DMA_ACTIVE 0x01 |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 45 | #define OMAP_DMA_CCR_EN (1 << 7) |
| 46 | |
| 47 | #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) |
| 48 | |
| 49 | static int enable_1510_mode = 0; |
| 50 | |
| 51 | struct omap_dma_lch { |
| 52 | int next_lch; |
| 53 | int dev_id; |
| 54 | u16 saved_csr; |
| 55 | u16 enabled_irqs; |
| 56 | const char *dev_name; |
| 57 | void (* callback)(int lch, u16 ch_status, void *data); |
| 58 | void *data; |
| 59 | long flags; |
| 60 | }; |
| 61 | |
| 62 | static int dma_chan_count; |
| 63 | |
| 64 | static spinlock_t dma_chan_lock; |
| 65 | static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT]; |
| 66 | |
Jesper Juhl | 3c6bee1 | 2006-01-09 20:54:01 -0800 | [diff] [blame] | 67 | static const u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = { |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 68 | INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3, |
| 69 | INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7, |
| 70 | INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10, |
| 71 | INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13, |
| 72 | INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD |
| 73 | }; |
| 74 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 75 | #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ |
| 76 | __FUNCTION__); |
| 77 | |
| 78 | #ifdef CONFIG_ARCH_OMAP15XX |
| 79 | /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ |
| 80 | int omap_dma_in_1510_mode(void) |
| 81 | { |
| 82 | return enable_1510_mode; |
| 83 | } |
| 84 | #else |
| 85 | #define omap_dma_in_1510_mode() 0 |
| 86 | #endif |
| 87 | |
| 88 | #ifdef CONFIG_ARCH_OMAP1 |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 89 | static inline int get_gdma_dev(int req) |
| 90 | { |
| 91 | u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4; |
| 92 | int shift = ((req - 1) % 5) * 6; |
| 93 | |
| 94 | return ((omap_readl(reg) >> shift) & 0x3f) + 1; |
| 95 | } |
| 96 | |
| 97 | static inline void set_gdma_dev(int req, int dev) |
| 98 | { |
| 99 | u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4; |
| 100 | int shift = ((req - 1) % 5) * 6; |
| 101 | u32 l; |
| 102 | |
| 103 | l = omap_readl(reg); |
| 104 | l &= ~(0x3f << shift); |
| 105 | l |= (dev - 1) << shift; |
| 106 | omap_writel(l, reg); |
| 107 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 108 | #else |
| 109 | #define set_gdma_dev(req, dev) do {} while (0) |
| 110 | #endif |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 111 | |
| 112 | static void clear_lch_regs(int lch) |
| 113 | { |
| 114 | int i; |
| 115 | u32 lch_base = OMAP_DMA_BASE + lch * 0x40; |
| 116 | |
| 117 | for (i = 0; i < 0x2c; i += 2) |
| 118 | omap_writew(0, lch_base + i); |
| 119 | } |
| 120 | |
| 121 | void omap_set_dma_priority(int dst_port, int priority) |
| 122 | { |
| 123 | unsigned long reg; |
| 124 | u32 l; |
| 125 | |
| 126 | switch (dst_port) { |
| 127 | case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */ |
| 128 | reg = OMAP_TC_OCPT1_PRIOR; |
| 129 | break; |
| 130 | case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */ |
| 131 | reg = OMAP_TC_OCPT2_PRIOR; |
| 132 | break; |
| 133 | case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */ |
| 134 | reg = OMAP_TC_EMIFF_PRIOR; |
| 135 | break; |
| 136 | case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */ |
| 137 | reg = OMAP_TC_EMIFS_PRIOR; |
| 138 | break; |
| 139 | default: |
| 140 | BUG(); |
| 141 | return; |
| 142 | } |
| 143 | l = omap_readl(reg); |
| 144 | l &= ~(0xf << 8); |
| 145 | l |= (priority & 0xf) << 8; |
| 146 | omap_writel(l, reg); |
| 147 | } |
| 148 | |
| 149 | void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 150 | int frame_count, int sync_mode, |
| 151 | int dma_trigger, int src_or_dst_synch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 152 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 153 | OMAP_DMA_CSDP_REG(lch) &= ~0x03; |
| 154 | OMAP_DMA_CSDP_REG(lch) |= data_type; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 155 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 156 | if (cpu_class_is_omap1()) { |
| 157 | OMAP_DMA_CCR_REG(lch) &= ~(1 << 5); |
| 158 | if (sync_mode == OMAP_DMA_SYNC_FRAME) |
| 159 | OMAP_DMA_CCR_REG(lch) |= 1 << 5; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 160 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 161 | OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2); |
| 162 | if (sync_mode == OMAP_DMA_SYNC_BLOCK) |
| 163 | OMAP1_DMA_CCR2_REG(lch) |= 1 << 2; |
| 164 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 165 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 166 | if (cpu_is_omap24xx() && dma_trigger) { |
| 167 | u32 val = OMAP_DMA_CCR_REG(lch); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 168 | |
Peter Ujfalusi | eca9e56 | 2006-06-26 16:16:06 -0700 | [diff] [blame] | 169 | val &= ~(3 << 19); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 170 | if (dma_trigger > 63) |
| 171 | val |= 1 << 20; |
| 172 | if (dma_trigger > 31) |
| 173 | val |= 1 << 19; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 174 | |
Peter Ujfalusi | eca9e56 | 2006-06-26 16:16:06 -0700 | [diff] [blame] | 175 | val &= ~(0x1f); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 176 | val |= (dma_trigger & 0x1f); |
| 177 | |
| 178 | if (sync_mode & OMAP_DMA_SYNC_FRAME) |
| 179 | val |= 1 << 5; |
Peter Ujfalusi | eca9e56 | 2006-06-26 16:16:06 -0700 | [diff] [blame] | 180 | else |
| 181 | val &= ~(1 << 5); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 182 | |
| 183 | if (sync_mode & OMAP_DMA_SYNC_BLOCK) |
| 184 | val |= 1 << 18; |
Peter Ujfalusi | eca9e56 | 2006-06-26 16:16:06 -0700 | [diff] [blame] | 185 | else |
| 186 | val &= ~(1 << 18); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 187 | |
| 188 | if (src_or_dst_synch) |
| 189 | val |= 1 << 24; /* source synch */ |
| 190 | else |
| 191 | val &= ~(1 << 24); /* dest synch */ |
| 192 | |
| 193 | OMAP_DMA_CCR_REG(lch) = val; |
| 194 | } |
| 195 | |
| 196 | OMAP_DMA_CEN_REG(lch) = elem_count; |
| 197 | OMAP_DMA_CFN_REG(lch) = frame_count; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 198 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 199 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 200 | void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) |
| 201 | { |
| 202 | u16 w; |
| 203 | |
| 204 | BUG_ON(omap_dma_in_1510_mode()); |
| 205 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 206 | if (cpu_is_omap24xx()) { |
| 207 | REVISIT_24XX(); |
| 208 | return; |
| 209 | } |
| 210 | |
| 211 | w = OMAP1_DMA_CCR2_REG(lch) & ~0x03; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 212 | switch (mode) { |
| 213 | case OMAP_DMA_CONSTANT_FILL: |
| 214 | w |= 0x01; |
| 215 | break; |
| 216 | case OMAP_DMA_TRANSPARENT_COPY: |
| 217 | w |= 0x02; |
| 218 | break; |
| 219 | case OMAP_DMA_COLOR_DIS: |
| 220 | break; |
| 221 | default: |
| 222 | BUG(); |
| 223 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 224 | OMAP1_DMA_CCR2_REG(lch) = w; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 225 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 226 | w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 227 | /* Default is channel type 2D */ |
| 228 | if (mode) { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 229 | OMAP1_DMA_COLOR_L_REG(lch) = (u16)color; |
| 230 | OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 231 | w |= 1; /* Channel type G */ |
| 232 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 233 | OMAP1_DMA_LCH_CTRL_REG(lch) = w; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 234 | } |
| 235 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 236 | /* Note that src_port is only for omap1 */ |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 237 | void omap_set_dma_src_params(int lch, int src_port, int src_amode, |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 238 | unsigned long src_start, |
| 239 | int src_ei, int src_fi) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 240 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 241 | if (cpu_class_is_omap1()) { |
| 242 | OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2); |
| 243 | OMAP_DMA_CSDP_REG(lch) |= src_port << 2; |
| 244 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 245 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 246 | OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12); |
| 247 | OMAP_DMA_CCR_REG(lch) |= src_amode << 12; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 248 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 249 | if (cpu_class_is_omap1()) { |
| 250 | OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16; |
| 251 | OMAP1_DMA_CSSA_L_REG(lch) = src_start; |
| 252 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 253 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 254 | if (cpu_is_omap24xx()) |
| 255 | OMAP2_DMA_CSSA_REG(lch) = src_start; |
| 256 | |
| 257 | OMAP_DMA_CSEI_REG(lch) = src_ei; |
| 258 | OMAP_DMA_CSFI_REG(lch) = src_fi; |
| 259 | } |
| 260 | |
| 261 | void omap_set_dma_params(int lch, struct omap_dma_channel_params * params) |
| 262 | { |
| 263 | omap_set_dma_transfer_params(lch, params->data_type, |
| 264 | params->elem_count, params->frame_count, |
| 265 | params->sync_mode, params->trigger, |
| 266 | params->src_or_dst_synch); |
| 267 | omap_set_dma_src_params(lch, params->src_port, |
| 268 | params->src_amode, params->src_start, |
| 269 | params->src_ei, params->src_fi); |
| 270 | |
| 271 | omap_set_dma_dest_params(lch, params->dst_port, |
| 272 | params->dst_amode, params->dst_start, |
| 273 | params->dst_ei, params->dst_fi); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | void omap_set_dma_src_index(int lch, int eidx, int fidx) |
| 277 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 278 | if (cpu_is_omap24xx()) { |
| 279 | REVISIT_24XX(); |
| 280 | return; |
| 281 | } |
| 282 | OMAP_DMA_CSEI_REG(lch) = eidx; |
| 283 | OMAP_DMA_CSFI_REG(lch) = fidx; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | void omap_set_dma_src_data_pack(int lch, int enable) |
| 287 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 288 | OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6); |
| 289 | if (enable) |
| 290 | OMAP_DMA_CSDP_REG(lch) |= (1 << 6); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) |
| 294 | { |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 295 | unsigned int burst = 0; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 296 | OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 297 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 298 | switch (burst_mode) { |
| 299 | case OMAP_DMA_DATA_BURST_DIS: |
| 300 | break; |
| 301 | case OMAP_DMA_DATA_BURST_4: |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 302 | if (cpu_is_omap24xx()) |
| 303 | burst = 0x1; |
| 304 | else |
| 305 | burst = 0x2; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 306 | break; |
| 307 | case OMAP_DMA_DATA_BURST_8: |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 308 | if (cpu_is_omap24xx()) { |
| 309 | burst = 0x2; |
| 310 | break; |
| 311 | } |
| 312 | /* not supported by current hardware on OMAP1 |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 313 | * w |= (0x03 << 7); |
| 314 | * fall through |
| 315 | */ |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 316 | case OMAP_DMA_DATA_BURST_16: |
| 317 | if (cpu_is_omap24xx()) { |
| 318 | burst = 0x3; |
| 319 | break; |
| 320 | } |
| 321 | /* OMAP1 don't support burst 16 |
| 322 | * fall through |
| 323 | */ |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 324 | default: |
| 325 | BUG(); |
| 326 | } |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 327 | OMAP_DMA_CSDP_REG(lch) |= (burst << 7); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 328 | } |
| 329 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 330 | /* Note that dest_port is only for OMAP1 */ |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 331 | void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 332 | unsigned long dest_start, |
| 333 | int dst_ei, int dst_fi) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 334 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 335 | if (cpu_class_is_omap1()) { |
| 336 | OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9); |
| 337 | OMAP_DMA_CSDP_REG(lch) |= dest_port << 9; |
| 338 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 339 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 340 | OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14); |
| 341 | OMAP_DMA_CCR_REG(lch) |= dest_amode << 14; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 342 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 343 | if (cpu_class_is_omap1()) { |
| 344 | OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16; |
| 345 | OMAP1_DMA_CDSA_L_REG(lch) = dest_start; |
| 346 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 347 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 348 | if (cpu_is_omap24xx()) |
| 349 | OMAP2_DMA_CDSA_REG(lch) = dest_start; |
| 350 | |
| 351 | OMAP_DMA_CDEI_REG(lch) = dst_ei; |
| 352 | OMAP_DMA_CDFI_REG(lch) = dst_fi; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | void omap_set_dma_dest_index(int lch, int eidx, int fidx) |
| 356 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 357 | if (cpu_is_omap24xx()) { |
| 358 | REVISIT_24XX(); |
| 359 | return; |
| 360 | } |
| 361 | OMAP_DMA_CDEI_REG(lch) = eidx; |
| 362 | OMAP_DMA_CDFI_REG(lch) = fidx; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | void omap_set_dma_dest_data_pack(int lch, int enable) |
| 366 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 367 | OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13); |
| 368 | if (enable) |
| 369 | OMAP_DMA_CSDP_REG(lch) |= 1 << 13; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 370 | } |
| 371 | |
| 372 | void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) |
| 373 | { |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 374 | unsigned int burst = 0; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 375 | OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 376 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 377 | switch (burst_mode) { |
| 378 | case OMAP_DMA_DATA_BURST_DIS: |
| 379 | break; |
| 380 | case OMAP_DMA_DATA_BURST_4: |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 381 | if (cpu_is_omap24xx()) |
| 382 | burst = 0x1; |
| 383 | else |
| 384 | burst = 0x2; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 385 | break; |
| 386 | case OMAP_DMA_DATA_BURST_8: |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 387 | if (cpu_is_omap24xx()) |
| 388 | burst = 0x2; |
| 389 | else |
| 390 | burst = 0x3; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 391 | break; |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 392 | case OMAP_DMA_DATA_BURST_16: |
| 393 | if (cpu_is_omap24xx()) { |
| 394 | burst = 0x3; |
| 395 | break; |
| 396 | } |
| 397 | /* OMAP1 don't support burst 16 |
| 398 | * fall through |
| 399 | */ |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 400 | default: |
| 401 | printk(KERN_ERR "Invalid DMA burst mode\n"); |
| 402 | BUG(); |
| 403 | return; |
| 404 | } |
Kyungmin Park | 6dc3c8f | 2006-06-26 16:16:14 -0700 | [diff] [blame^] | 405 | OMAP_DMA_CSDP_REG(lch) |= (burst << 14); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 406 | } |
| 407 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 408 | static inline void omap_enable_channel_irq(int lch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 409 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 410 | u32 status; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 411 | |
| 412 | /* Read CSR to make sure it's cleared. */ |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 413 | status = OMAP_DMA_CSR_REG(lch); |
| 414 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 415 | /* Enable some nice interrupts. */ |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 416 | OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs; |
| 417 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 418 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; |
| 419 | } |
| 420 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 421 | static void omap_disable_channel_irq(int lch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 422 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 423 | if (cpu_is_omap24xx()) |
| 424 | OMAP_DMA_CICR_REG(lch) = 0; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | void omap_enable_dma_irq(int lch, u16 bits) |
| 428 | { |
| 429 | dma_chan[lch].enabled_irqs |= bits; |
| 430 | } |
| 431 | |
| 432 | void omap_disable_dma_irq(int lch, u16 bits) |
| 433 | { |
| 434 | dma_chan[lch].enabled_irqs &= ~bits; |
| 435 | } |
| 436 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 437 | static inline void enable_lnk(int lch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 438 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 439 | if (cpu_class_is_omap1()) |
| 440 | OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 441 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 442 | /* Set the ENABLE_LNK bits */ |
| 443 | if (dma_chan[lch].next_lch != -1) |
| 444 | OMAP_DMA_CLNK_CTRL_REG(lch) = |
| 445 | dma_chan[lch].next_lch | (1 << 15); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 446 | } |
| 447 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 448 | static inline void disable_lnk(int lch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 449 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 450 | /* Disable interrupts */ |
| 451 | if (cpu_class_is_omap1()) { |
| 452 | OMAP_DMA_CICR_REG(lch) = 0; |
| 453 | /* Set the STOP_LNK bit */ |
| 454 | OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 455 | } |
| 456 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 457 | if (cpu_is_omap24xx()) { |
| 458 | omap_disable_channel_irq(lch); |
| 459 | /* Clear the ENABLE_LNK bit */ |
| 460 | OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15); |
| 461 | } |
| 462 | |
| 463 | dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; |
| 464 | } |
| 465 | |
| 466 | static inline void omap2_enable_irq_lch(int lch) |
| 467 | { |
| 468 | u32 val; |
| 469 | |
| 470 | if (!cpu_is_omap24xx()) |
| 471 | return; |
| 472 | |
| 473 | val = omap_readl(OMAP_DMA4_IRQENABLE_L0); |
| 474 | val |= 1 << lch; |
| 475 | omap_writel(val, OMAP_DMA4_IRQENABLE_L0); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | int omap_request_dma(int dev_id, const char *dev_name, |
| 479 | void (* callback)(int lch, u16 ch_status, void *data), |
| 480 | void *data, int *dma_ch_out) |
| 481 | { |
| 482 | int ch, free_ch = -1; |
| 483 | unsigned long flags; |
| 484 | struct omap_dma_lch *chan; |
| 485 | |
| 486 | spin_lock_irqsave(&dma_chan_lock, flags); |
| 487 | for (ch = 0; ch < dma_chan_count; ch++) { |
| 488 | if (free_ch == -1 && dma_chan[ch].dev_id == -1) { |
| 489 | free_ch = ch; |
| 490 | if (dev_id == 0) |
| 491 | break; |
| 492 | } |
| 493 | } |
| 494 | if (free_ch == -1) { |
| 495 | spin_unlock_irqrestore(&dma_chan_lock, flags); |
| 496 | return -EBUSY; |
| 497 | } |
| 498 | chan = dma_chan + free_ch; |
| 499 | chan->dev_id = dev_id; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 500 | |
| 501 | if (cpu_class_is_omap1()) |
| 502 | clear_lch_regs(free_ch); |
| 503 | |
| 504 | if (cpu_is_omap24xx()) |
| 505 | omap_clear_dma(free_ch); |
| 506 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 507 | spin_unlock_irqrestore(&dma_chan_lock, flags); |
| 508 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 509 | chan->dev_name = dev_name; |
| 510 | chan->callback = callback; |
| 511 | chan->data = data; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 512 | chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | |
| 513 | OMAP_DMA_BLOCK_IRQ; |
| 514 | |
| 515 | if (cpu_is_omap24xx()) |
| 516 | chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 517 | |
| 518 | if (cpu_is_omap16xx()) { |
| 519 | /* If the sync device is set, configure it dynamically. */ |
| 520 | if (dev_id != 0) { |
| 521 | set_gdma_dev(free_ch + 1, dev_id); |
| 522 | dev_id = free_ch + 1; |
| 523 | } |
| 524 | /* Disable the 1510 compatibility mode and set the sync device |
| 525 | * id. */ |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 526 | OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10); |
| 527 | } else if (cpu_is_omap730() || cpu_is_omap15xx()) { |
| 528 | OMAP_DMA_CCR_REG(free_ch) = dev_id; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 529 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 530 | |
| 531 | if (cpu_is_omap24xx()) { |
| 532 | omap2_enable_irq_lch(free_ch); |
| 533 | |
| 534 | omap_enable_channel_irq(free_ch); |
| 535 | /* Clear the CSR register and IRQ status register */ |
| 536 | OMAP_DMA_CSR_REG(free_ch) = 0x0; |
| 537 | omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0); |
| 538 | } |
| 539 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 540 | *dma_ch_out = free_ch; |
| 541 | |
| 542 | return 0; |
| 543 | } |
| 544 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 545 | void omap_free_dma(int lch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 546 | { |
| 547 | unsigned long flags; |
| 548 | |
| 549 | spin_lock_irqsave(&dma_chan_lock, flags); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 550 | if (dma_chan[lch].dev_id == -1) { |
| 551 | printk("omap_dma: trying to free nonallocated DMA channel %d\n", |
| 552 | lch); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 553 | spin_unlock_irqrestore(&dma_chan_lock, flags); |
| 554 | return; |
| 555 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 556 | dma_chan[lch].dev_id = -1; |
| 557 | dma_chan[lch].next_lch = -1; |
| 558 | dma_chan[lch].callback = NULL; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 559 | spin_unlock_irqrestore(&dma_chan_lock, flags); |
| 560 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 561 | if (cpu_class_is_omap1()) { |
| 562 | /* Disable all DMA interrupts for the channel. */ |
| 563 | OMAP_DMA_CICR_REG(lch) = 0; |
| 564 | /* Make sure the DMA transfer is stopped. */ |
| 565 | OMAP_DMA_CCR_REG(lch) = 0; |
| 566 | } |
| 567 | |
| 568 | if (cpu_is_omap24xx()) { |
| 569 | u32 val; |
| 570 | /* Disable interrupts */ |
| 571 | val = omap_readl(OMAP_DMA4_IRQENABLE_L0); |
| 572 | val &= ~(1 << lch); |
| 573 | omap_writel(val, OMAP_DMA4_IRQENABLE_L0); |
| 574 | |
| 575 | /* Clear the CSR register and IRQ status register */ |
| 576 | OMAP_DMA_CSR_REG(lch) = 0x0; |
| 577 | |
| 578 | val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); |
| 579 | val |= 1 << lch; |
| 580 | omap_writel(val, OMAP_DMA4_IRQSTATUS_L0); |
| 581 | |
| 582 | /* Disable all DMA interrupts for the channel. */ |
| 583 | OMAP_DMA_CICR_REG(lch) = 0; |
| 584 | |
| 585 | /* Make sure the DMA transfer is stopped. */ |
| 586 | OMAP_DMA_CCR_REG(lch) = 0; |
| 587 | omap_clear_dma(lch); |
| 588 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 589 | } |
| 590 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 591 | /* |
| 592 | * Clears any DMA state so the DMA engine is ready to restart with new buffers |
| 593 | * through omap_start_dma(). Any buffers in flight are discarded. |
| 594 | */ |
| 595 | void omap_clear_dma(int lch) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 596 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 597 | unsigned long flags; |
| 598 | |
| 599 | local_irq_save(flags); |
| 600 | |
| 601 | if (cpu_class_is_omap1()) { |
| 602 | int status; |
| 603 | OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN; |
| 604 | |
| 605 | /* Clear pending interrupts */ |
| 606 | status = OMAP_DMA_CSR_REG(lch); |
| 607 | } |
| 608 | |
| 609 | if (cpu_is_omap24xx()) { |
| 610 | int i; |
| 611 | u32 lch_base = OMAP24XX_DMA_BASE + lch * 0x60 + 0x80; |
| 612 | for (i = 0; i < 0x44; i += 4) |
| 613 | omap_writel(0, lch_base + i); |
| 614 | } |
| 615 | |
| 616 | local_irq_restore(flags); |
| 617 | } |
| 618 | |
| 619 | void omap_start_dma(int lch) |
| 620 | { |
| 621 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { |
| 622 | int next_lch, cur_lch; |
| 623 | char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT]; |
| 624 | |
| 625 | dma_chan_link_map[lch] = 1; |
| 626 | /* Set the link register of the first channel */ |
| 627 | enable_lnk(lch); |
| 628 | |
| 629 | memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); |
| 630 | cur_lch = dma_chan[lch].next_lch; |
| 631 | do { |
| 632 | next_lch = dma_chan[cur_lch].next_lch; |
| 633 | |
| 634 | /* The loop case: we've been here already */ |
| 635 | if (dma_chan_link_map[cur_lch]) |
| 636 | break; |
| 637 | /* Mark the current channel */ |
| 638 | dma_chan_link_map[cur_lch] = 1; |
| 639 | |
| 640 | enable_lnk(cur_lch); |
| 641 | omap_enable_channel_irq(cur_lch); |
| 642 | |
| 643 | cur_lch = next_lch; |
| 644 | } while (next_lch != -1); |
| 645 | } else if (cpu_is_omap24xx()) { |
| 646 | /* Errata: Need to write lch even if not using chaining */ |
| 647 | OMAP_DMA_CLNK_CTRL_REG(lch) = lch; |
| 648 | } |
| 649 | |
| 650 | omap_enable_channel_irq(lch); |
| 651 | |
| 652 | /* Errata: On ES2.0 BUFFERING disable must be set. |
| 653 | * This will always fail on ES1.0 */ |
| 654 | if (cpu_is_omap24xx()) { |
| 655 | OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN; |
| 656 | } |
| 657 | |
| 658 | OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN; |
| 659 | |
| 660 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; |
| 661 | } |
| 662 | |
| 663 | void omap_stop_dma(int lch) |
| 664 | { |
| 665 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { |
| 666 | int next_lch, cur_lch = lch; |
| 667 | char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT]; |
| 668 | |
| 669 | memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); |
| 670 | do { |
| 671 | /* The loop case: we've been here already */ |
| 672 | if (dma_chan_link_map[cur_lch]) |
| 673 | break; |
| 674 | /* Mark the current channel */ |
| 675 | dma_chan_link_map[cur_lch] = 1; |
| 676 | |
| 677 | disable_lnk(cur_lch); |
| 678 | |
| 679 | next_lch = dma_chan[cur_lch].next_lch; |
| 680 | cur_lch = next_lch; |
| 681 | } while (next_lch != -1); |
| 682 | |
| 683 | return; |
| 684 | } |
| 685 | |
| 686 | /* Disable all interrupts on the channel */ |
| 687 | if (cpu_class_is_omap1()) |
| 688 | OMAP_DMA_CICR_REG(lch) = 0; |
| 689 | |
| 690 | OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN; |
| 691 | dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; |
| 692 | } |
| 693 | |
| 694 | /* |
| 695 | * Returns current physical source address for the given DMA channel. |
| 696 | * If the channel is running the caller must disable interrupts prior calling |
| 697 | * this function and process the returned value before re-enabling interrupt to |
| 698 | * prevent races with the interrupt handler. Note that in continuous mode there |
| 699 | * is a chance for CSSA_L register overflow inbetween the two reads resulting |
| 700 | * in incorrect return value. |
| 701 | */ |
| 702 | dma_addr_t omap_get_dma_src_pos(int lch) |
| 703 | { |
| 704 | dma_addr_t offset; |
| 705 | |
| 706 | if (cpu_class_is_omap1()) |
| 707 | offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) | |
| 708 | (OMAP1_DMA_CSSA_U_REG(lch) << 16)); |
| 709 | |
| 710 | if (cpu_is_omap24xx()) |
| 711 | offset = OMAP_DMA_CSAC_REG(lch); |
| 712 | |
| 713 | return offset; |
| 714 | } |
| 715 | |
| 716 | /* |
| 717 | * Returns current physical destination address for the given DMA channel. |
| 718 | * If the channel is running the caller must disable interrupts prior calling |
| 719 | * this function and process the returned value before re-enabling interrupt to |
| 720 | * prevent races with the interrupt handler. Note that in continuous mode there |
| 721 | * is a chance for CDSA_L register overflow inbetween the two reads resulting |
| 722 | * in incorrect return value. |
| 723 | */ |
| 724 | dma_addr_t omap_get_dma_dst_pos(int lch) |
| 725 | { |
| 726 | dma_addr_t offset; |
| 727 | |
| 728 | if (cpu_class_is_omap1()) |
| 729 | offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) | |
| 730 | (OMAP1_DMA_CDSA_U_REG(lch) << 16)); |
| 731 | |
| 732 | if (cpu_is_omap24xx()) |
| 733 | offset = OMAP2_DMA_CDSA_REG(lch); |
| 734 | |
| 735 | return offset; |
| 736 | } |
| 737 | |
| 738 | /* |
| 739 | * Returns current source transfer counting for the given DMA channel. |
| 740 | * Can be used to monitor the progress of a transfer inside a block. |
| 741 | * It must be called with disabled interrupts. |
| 742 | */ |
| 743 | int omap_get_dma_src_addr_counter(int lch) |
| 744 | { |
| 745 | return (dma_addr_t) OMAP_DMA_CSAC_REG(lch); |
| 746 | } |
| 747 | |
| 748 | int omap_dma_running(void) |
| 749 | { |
| 750 | int lch; |
| 751 | |
| 752 | /* Check if LCD DMA is running */ |
| 753 | if (cpu_is_omap16xx()) |
| 754 | if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN) |
| 755 | return 1; |
| 756 | |
| 757 | for (lch = 0; lch < dma_chan_count; lch++) |
| 758 | if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN) |
| 759 | return 1; |
| 760 | |
| 761 | return 0; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 762 | } |
| 763 | |
| 764 | /* |
| 765 | * lch_queue DMA will start right after lch_head one is finished. |
| 766 | * For this DMA link to start, you still need to start (see omap_start_dma) |
| 767 | * the first one. That will fire up the entire queue. |
| 768 | */ |
| 769 | void omap_dma_link_lch (int lch_head, int lch_queue) |
| 770 | { |
| 771 | if (omap_dma_in_1510_mode()) { |
| 772 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
| 773 | BUG(); |
| 774 | return; |
| 775 | } |
| 776 | |
| 777 | if ((dma_chan[lch_head].dev_id == -1) || |
| 778 | (dma_chan[lch_queue].dev_id == -1)) { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 779 | printk(KERN_ERR "omap_dma: trying to link " |
| 780 | "non requested channels\n"); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 781 | dump_stack(); |
| 782 | } |
| 783 | |
| 784 | dma_chan[lch_head].next_lch = lch_queue; |
| 785 | } |
| 786 | |
| 787 | /* |
| 788 | * Once the DMA queue is stopped, we can destroy it. |
| 789 | */ |
| 790 | void omap_dma_unlink_lch (int lch_head, int lch_queue) |
| 791 | { |
| 792 | if (omap_dma_in_1510_mode()) { |
| 793 | printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); |
| 794 | BUG(); |
| 795 | return; |
| 796 | } |
| 797 | |
| 798 | if (dma_chan[lch_head].next_lch != lch_queue || |
| 799 | dma_chan[lch_head].next_lch == -1) { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 800 | printk(KERN_ERR "omap_dma: trying to unlink " |
| 801 | "non linked channels\n"); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 802 | dump_stack(); |
| 803 | } |
| 804 | |
| 805 | |
| 806 | if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || |
| 807 | (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 808 | printk(KERN_ERR "omap_dma: You need to stop the DMA channels " |
| 809 | "before unlinking\n"); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 810 | dump_stack(); |
| 811 | } |
| 812 | |
| 813 | dma_chan[lch_head].next_lch = -1; |
| 814 | } |
| 815 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 816 | /*----------------------------------------------------------------------------*/ |
| 817 | |
| 818 | #ifdef CONFIG_ARCH_OMAP1 |
| 819 | |
| 820 | static int omap1_dma_handle_ch(int ch) |
| 821 | { |
| 822 | u16 csr; |
| 823 | |
| 824 | if (enable_1510_mode && ch >= 6) { |
| 825 | csr = dma_chan[ch].saved_csr; |
| 826 | dma_chan[ch].saved_csr = 0; |
| 827 | } else |
| 828 | csr = OMAP_DMA_CSR_REG(ch); |
| 829 | if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { |
| 830 | dma_chan[ch + 6].saved_csr = csr >> 7; |
| 831 | csr &= 0x7f; |
| 832 | } |
| 833 | if ((csr & 0x3f) == 0) |
| 834 | return 0; |
| 835 | if (unlikely(dma_chan[ch].dev_id == -1)) { |
| 836 | printk(KERN_WARNING "Spurious interrupt from DMA channel " |
| 837 | "%d (CSR %04x)\n", ch, csr); |
| 838 | return 0; |
| 839 | } |
| 840 | if (unlikely(csr & OMAP_DMA_TOUT_IRQ)) |
| 841 | printk(KERN_WARNING "DMA timeout with device %d\n", |
| 842 | dma_chan[ch].dev_id); |
| 843 | if (unlikely(csr & OMAP_DMA_DROP_IRQ)) |
| 844 | printk(KERN_WARNING "DMA synchronization event drop occurred " |
| 845 | "with device %d\n", dma_chan[ch].dev_id); |
| 846 | if (likely(csr & OMAP_DMA_BLOCK_IRQ)) |
| 847 | dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; |
| 848 | if (likely(dma_chan[ch].callback != NULL)) |
| 849 | dma_chan[ch].callback(ch, csr, dma_chan[ch].data); |
| 850 | return 1; |
| 851 | } |
| 852 | |
| 853 | static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id, |
| 854 | struct pt_regs *regs) |
| 855 | { |
| 856 | int ch = ((int) dev_id) - 1; |
| 857 | int handled = 0; |
| 858 | |
| 859 | for (;;) { |
| 860 | int handled_now = 0; |
| 861 | |
| 862 | handled_now += omap1_dma_handle_ch(ch); |
| 863 | if (enable_1510_mode && dma_chan[ch + 6].saved_csr) |
| 864 | handled_now += omap1_dma_handle_ch(ch + 6); |
| 865 | if (!handled_now) |
| 866 | break; |
| 867 | handled += handled_now; |
| 868 | } |
| 869 | |
| 870 | return handled ? IRQ_HANDLED : IRQ_NONE; |
| 871 | } |
| 872 | |
| 873 | #else |
| 874 | #define omap1_dma_irq_handler NULL |
| 875 | #endif |
| 876 | |
| 877 | #ifdef CONFIG_ARCH_OMAP2 |
| 878 | |
| 879 | static int omap2_dma_handle_ch(int ch) |
| 880 | { |
| 881 | u32 status = OMAP_DMA_CSR_REG(ch); |
| 882 | u32 val; |
| 883 | |
| 884 | if (!status) |
| 885 | return 0; |
| 886 | if (unlikely(dma_chan[ch].dev_id == -1)) |
| 887 | return 0; |
| 888 | /* REVISIT: According to 24xx TRM, there's no TOUT_IE */ |
| 889 | if (unlikely(status & OMAP_DMA_TOUT_IRQ)) |
| 890 | printk(KERN_INFO "DMA timeout with device %d\n", |
| 891 | dma_chan[ch].dev_id); |
| 892 | if (unlikely(status & OMAP_DMA_DROP_IRQ)) |
| 893 | printk(KERN_INFO |
| 894 | "DMA synchronization event drop occurred with device " |
| 895 | "%d\n", dma_chan[ch].dev_id); |
| 896 | |
| 897 | if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) |
| 898 | printk(KERN_INFO "DMA transaction error with device %d\n", |
| 899 | dma_chan[ch].dev_id); |
| 900 | |
| 901 | OMAP_DMA_CSR_REG(ch) = 0x20; |
| 902 | |
| 903 | val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); |
| 904 | /* ch in this function is from 0-31 while in register it is 1-32 */ |
| 905 | val = 1 << (ch); |
| 906 | omap_writel(val, OMAP_DMA4_IRQSTATUS_L0); |
| 907 | |
| 908 | if (likely(dma_chan[ch].callback != NULL)) |
| 909 | dma_chan[ch].callback(ch, status, dma_chan[ch].data); |
| 910 | |
| 911 | return 0; |
| 912 | } |
| 913 | |
| 914 | /* STATUS register count is from 1-32 while our is 0-31 */ |
| 915 | static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id, |
| 916 | struct pt_regs *regs) |
| 917 | { |
| 918 | u32 val; |
| 919 | int i; |
| 920 | |
| 921 | val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); |
| 922 | |
| 923 | for (i = 1; i <= OMAP_LOGICAL_DMA_CH_COUNT; i++) { |
| 924 | int active = val & (1 << (i - 1)); |
| 925 | if (active) |
| 926 | omap2_dma_handle_ch(i - 1); |
| 927 | } |
| 928 | |
| 929 | return IRQ_HANDLED; |
| 930 | } |
| 931 | |
| 932 | static struct irqaction omap24xx_dma_irq = { |
| 933 | .name = "DMA", |
| 934 | .handler = omap2_dma_irq_handler, |
| 935 | .flags = SA_INTERRUPT |
| 936 | }; |
| 937 | |
| 938 | #else |
| 939 | static struct irqaction omap24xx_dma_irq; |
| 940 | #endif |
| 941 | |
| 942 | /*----------------------------------------------------------------------------*/ |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 943 | |
| 944 | static struct lcd_dma_info { |
| 945 | spinlock_t lock; |
| 946 | int reserved; |
| 947 | void (* callback)(u16 status, void *data); |
| 948 | void *cb_data; |
| 949 | |
| 950 | int active; |
| 951 | unsigned long addr, size; |
| 952 | int rotate, data_type, xres, yres; |
| 953 | int vxres; |
| 954 | int mirror; |
| 955 | int xscale, yscale; |
| 956 | int ext_ctrl; |
| 957 | int src_port; |
| 958 | int single_transfer; |
| 959 | } lcd_dma; |
| 960 | |
| 961 | void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres, |
| 962 | int data_type) |
| 963 | { |
| 964 | lcd_dma.addr = addr; |
| 965 | lcd_dma.data_type = data_type; |
| 966 | lcd_dma.xres = fb_xres; |
| 967 | lcd_dma.yres = fb_yres; |
| 968 | } |
| 969 | |
| 970 | void omap_set_lcd_dma_src_port(int port) |
| 971 | { |
| 972 | lcd_dma.src_port = port; |
| 973 | } |
| 974 | |
| 975 | void omap_set_lcd_dma_ext_controller(int external) |
| 976 | { |
| 977 | lcd_dma.ext_ctrl = external; |
| 978 | } |
| 979 | |
| 980 | void omap_set_lcd_dma_single_transfer(int single) |
| 981 | { |
| 982 | lcd_dma.single_transfer = single; |
| 983 | } |
| 984 | |
| 985 | |
| 986 | void omap_set_lcd_dma_b1_rotation(int rotate) |
| 987 | { |
| 988 | if (omap_dma_in_1510_mode()) { |
| 989 | printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); |
| 990 | BUG(); |
| 991 | return; |
| 992 | } |
| 993 | lcd_dma.rotate = rotate; |
| 994 | } |
| 995 | |
| 996 | void omap_set_lcd_dma_b1_mirror(int mirror) |
| 997 | { |
| 998 | if (omap_dma_in_1510_mode()) { |
| 999 | printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); |
| 1000 | BUG(); |
| 1001 | } |
| 1002 | lcd_dma.mirror = mirror; |
| 1003 | } |
| 1004 | |
| 1005 | void omap_set_lcd_dma_b1_vxres(unsigned long vxres) |
| 1006 | { |
| 1007 | if (omap_dma_in_1510_mode()) { |
| 1008 | printk(KERN_ERR "DMA virtual resulotion is not supported " |
| 1009 | "in 1510 mode\n"); |
| 1010 | BUG(); |
| 1011 | } |
| 1012 | lcd_dma.vxres = vxres; |
| 1013 | } |
| 1014 | |
| 1015 | void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) |
| 1016 | { |
| 1017 | if (omap_dma_in_1510_mode()) { |
| 1018 | printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); |
| 1019 | BUG(); |
| 1020 | } |
| 1021 | lcd_dma.xscale = xscale; |
| 1022 | lcd_dma.yscale = yscale; |
| 1023 | } |
| 1024 | |
| 1025 | static void set_b1_regs(void) |
| 1026 | { |
| 1027 | unsigned long top, bottom; |
| 1028 | int es; |
| 1029 | u16 w; |
| 1030 | unsigned long en, fn; |
| 1031 | long ei, fi; |
| 1032 | unsigned long vxres; |
| 1033 | unsigned int xscale, yscale; |
| 1034 | |
| 1035 | switch (lcd_dma.data_type) { |
| 1036 | case OMAP_DMA_DATA_TYPE_S8: |
| 1037 | es = 1; |
| 1038 | break; |
| 1039 | case OMAP_DMA_DATA_TYPE_S16: |
| 1040 | es = 2; |
| 1041 | break; |
| 1042 | case OMAP_DMA_DATA_TYPE_S32: |
| 1043 | es = 4; |
| 1044 | break; |
| 1045 | default: |
| 1046 | BUG(); |
| 1047 | return; |
| 1048 | } |
| 1049 | |
| 1050 | vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres; |
| 1051 | xscale = lcd_dma.xscale ? lcd_dma.xscale : 1; |
| 1052 | yscale = lcd_dma.yscale ? lcd_dma.yscale : 1; |
| 1053 | BUG_ON(vxres < lcd_dma.xres); |
| 1054 | #define PIXADDR(x,y) (lcd_dma.addr + ((y) * vxres * yscale + (x) * xscale) * es) |
| 1055 | #define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1) |
| 1056 | switch (lcd_dma.rotate) { |
| 1057 | case 0: |
| 1058 | if (!lcd_dma.mirror) { |
| 1059 | top = PIXADDR(0, 0); |
| 1060 | bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); |
| 1061 | /* 1510 DMA requires the bottom address to be 2 more |
| 1062 | * than the actual last memory access location. */ |
| 1063 | if (omap_dma_in_1510_mode() && |
| 1064 | lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) |
| 1065 | bottom += 2; |
| 1066 | ei = PIXSTEP(0, 0, 1, 0); |
| 1067 | fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1); |
| 1068 | } else { |
| 1069 | top = PIXADDR(lcd_dma.xres - 1, 0); |
| 1070 | bottom = PIXADDR(0, lcd_dma.yres - 1); |
| 1071 | ei = PIXSTEP(1, 0, 0, 0); |
| 1072 | fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1); |
| 1073 | } |
| 1074 | en = lcd_dma.xres; |
| 1075 | fn = lcd_dma.yres; |
| 1076 | break; |
| 1077 | case 90: |
| 1078 | if (!lcd_dma.mirror) { |
| 1079 | top = PIXADDR(0, lcd_dma.yres - 1); |
| 1080 | bottom = PIXADDR(lcd_dma.xres - 1, 0); |
| 1081 | ei = PIXSTEP(0, 1, 0, 0); |
| 1082 | fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1); |
| 1083 | } else { |
| 1084 | top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); |
| 1085 | bottom = PIXADDR(0, 0); |
| 1086 | ei = PIXSTEP(0, 1, 0, 0); |
| 1087 | fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1); |
| 1088 | } |
| 1089 | en = lcd_dma.yres; |
| 1090 | fn = lcd_dma.xres; |
| 1091 | break; |
| 1092 | case 180: |
| 1093 | if (!lcd_dma.mirror) { |
| 1094 | top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); |
| 1095 | bottom = PIXADDR(0, 0); |
| 1096 | ei = PIXSTEP(1, 0, 0, 0); |
| 1097 | fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0); |
| 1098 | } else { |
| 1099 | top = PIXADDR(0, lcd_dma.yres - 1); |
| 1100 | bottom = PIXADDR(lcd_dma.xres - 1, 0); |
| 1101 | ei = PIXSTEP(0, 0, 1, 0); |
| 1102 | fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0); |
| 1103 | } |
| 1104 | en = lcd_dma.xres; |
| 1105 | fn = lcd_dma.yres; |
| 1106 | break; |
| 1107 | case 270: |
| 1108 | if (!lcd_dma.mirror) { |
| 1109 | top = PIXADDR(lcd_dma.xres - 1, 0); |
| 1110 | bottom = PIXADDR(0, lcd_dma.yres - 1); |
| 1111 | ei = PIXSTEP(0, 0, 0, 1); |
| 1112 | fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0); |
| 1113 | } else { |
| 1114 | top = PIXADDR(0, 0); |
| 1115 | bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); |
| 1116 | ei = PIXSTEP(0, 0, 0, 1); |
| 1117 | fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0); |
| 1118 | } |
| 1119 | en = lcd_dma.yres; |
| 1120 | fn = lcd_dma.xres; |
| 1121 | break; |
| 1122 | default: |
| 1123 | BUG(); |
| 1124 | return; /* Supress warning about uninitialized vars */ |
| 1125 | } |
| 1126 | |
| 1127 | if (omap_dma_in_1510_mode()) { |
| 1128 | omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); |
| 1129 | omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); |
| 1130 | omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); |
| 1131 | omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L); |
| 1132 | |
| 1133 | return; |
| 1134 | } |
| 1135 | |
| 1136 | /* 1610 regs */ |
| 1137 | omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U); |
| 1138 | omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L); |
| 1139 | omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U); |
| 1140 | omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L); |
| 1141 | |
| 1142 | omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1); |
| 1143 | omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1); |
| 1144 | |
| 1145 | w = omap_readw(OMAP1610_DMA_LCD_CSDP); |
| 1146 | w &= ~0x03; |
| 1147 | w |= lcd_dma.data_type; |
| 1148 | omap_writew(w, OMAP1610_DMA_LCD_CSDP); |
| 1149 | |
| 1150 | w = omap_readw(OMAP1610_DMA_LCD_CTRL); |
| 1151 | /* Always set the source port as SDRAM for now*/ |
| 1152 | w &= ~(0x03 << 6); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1153 | if (lcd_dma.callback != NULL) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1154 | w |= 1 << 1; /* Block interrupt enable */ |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1155 | else |
| 1156 | w &= ~(1 << 1); |
| 1157 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); |
| 1158 | |
| 1159 | if (!(lcd_dma.rotate || lcd_dma.mirror || |
| 1160 | lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale)) |
| 1161 | return; |
| 1162 | |
| 1163 | w = omap_readw(OMAP1610_DMA_LCD_CCR); |
| 1164 | /* Set the double-indexed addressing mode */ |
| 1165 | w |= (0x03 << 12); |
| 1166 | omap_writew(w, OMAP1610_DMA_LCD_CCR); |
| 1167 | |
| 1168 | omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1); |
| 1169 | omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U); |
| 1170 | omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L); |
| 1171 | } |
| 1172 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1173 | static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id, |
| 1174 | struct pt_regs *regs) |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1175 | { |
| 1176 | u16 w; |
| 1177 | |
| 1178 | w = omap_readw(OMAP1610_DMA_LCD_CTRL); |
| 1179 | if (unlikely(!(w & (1 << 3)))) { |
| 1180 | printk(KERN_WARNING "Spurious LCD DMA IRQ\n"); |
| 1181 | return IRQ_NONE; |
| 1182 | } |
| 1183 | /* Ack the IRQ */ |
| 1184 | w |= (1 << 3); |
| 1185 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); |
| 1186 | lcd_dma.active = 0; |
| 1187 | if (lcd_dma.callback != NULL) |
| 1188 | lcd_dma.callback(w, lcd_dma.cb_data); |
| 1189 | |
| 1190 | return IRQ_HANDLED; |
| 1191 | } |
| 1192 | |
| 1193 | int omap_request_lcd_dma(void (* callback)(u16 status, void *data), |
| 1194 | void *data) |
| 1195 | { |
| 1196 | spin_lock_irq(&lcd_dma.lock); |
| 1197 | if (lcd_dma.reserved) { |
| 1198 | spin_unlock_irq(&lcd_dma.lock); |
| 1199 | printk(KERN_ERR "LCD DMA channel already reserved\n"); |
| 1200 | BUG(); |
| 1201 | return -EBUSY; |
| 1202 | } |
| 1203 | lcd_dma.reserved = 1; |
| 1204 | spin_unlock_irq(&lcd_dma.lock); |
| 1205 | lcd_dma.callback = callback; |
| 1206 | lcd_dma.cb_data = data; |
| 1207 | lcd_dma.active = 0; |
| 1208 | lcd_dma.single_transfer = 0; |
| 1209 | lcd_dma.rotate = 0; |
| 1210 | lcd_dma.vxres = 0; |
| 1211 | lcd_dma.mirror = 0; |
| 1212 | lcd_dma.xscale = 0; |
| 1213 | lcd_dma.yscale = 0; |
| 1214 | lcd_dma.ext_ctrl = 0; |
| 1215 | lcd_dma.src_port = 0; |
| 1216 | |
| 1217 | return 0; |
| 1218 | } |
| 1219 | |
| 1220 | void omap_free_lcd_dma(void) |
| 1221 | { |
| 1222 | spin_lock(&lcd_dma.lock); |
| 1223 | if (!lcd_dma.reserved) { |
| 1224 | spin_unlock(&lcd_dma.lock); |
| 1225 | printk(KERN_ERR "LCD DMA is not reserved\n"); |
| 1226 | BUG(); |
| 1227 | return; |
| 1228 | } |
| 1229 | if (!enable_1510_mode) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1230 | omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, |
| 1231 | OMAP1610_DMA_LCD_CCR); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1232 | lcd_dma.reserved = 0; |
| 1233 | spin_unlock(&lcd_dma.lock); |
| 1234 | } |
| 1235 | |
| 1236 | void omap_enable_lcd_dma(void) |
| 1237 | { |
| 1238 | u16 w; |
| 1239 | |
| 1240 | /* Set the Enable bit only if an external controller is |
| 1241 | * connected. Otherwise the OMAP internal controller will |
| 1242 | * start the transfer when it gets enabled. |
| 1243 | */ |
| 1244 | if (enable_1510_mode || !lcd_dma.ext_ctrl) |
| 1245 | return; |
Tony Lindgren | bb13b5f | 2005-07-10 19:58:18 +0100 | [diff] [blame] | 1246 | |
| 1247 | w = omap_readw(OMAP1610_DMA_LCD_CTRL); |
| 1248 | w |= 1 << 8; |
| 1249 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); |
| 1250 | |
Tony Lindgren | 92105bb | 2005-09-07 17:20:26 +0100 | [diff] [blame] | 1251 | lcd_dma.active = 1; |
| 1252 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1253 | w = omap_readw(OMAP1610_DMA_LCD_CCR); |
| 1254 | w |= 1 << 7; |
| 1255 | omap_writew(w, OMAP1610_DMA_LCD_CCR); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1256 | } |
| 1257 | |
| 1258 | void omap_setup_lcd_dma(void) |
| 1259 | { |
| 1260 | BUG_ON(lcd_dma.active); |
| 1261 | if (!enable_1510_mode) { |
| 1262 | /* Set some reasonable defaults */ |
| 1263 | omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); |
| 1264 | omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); |
| 1265 | omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); |
| 1266 | } |
| 1267 | set_b1_regs(); |
| 1268 | if (!enable_1510_mode) { |
| 1269 | u16 w; |
| 1270 | |
| 1271 | w = omap_readw(OMAP1610_DMA_LCD_CCR); |
| 1272 | /* If DMA was already active set the end_prog bit to have |
| 1273 | * the programmed register set loaded into the active |
| 1274 | * register set. |
| 1275 | */ |
| 1276 | w |= 1 << 11; /* End_prog */ |
| 1277 | if (!lcd_dma.single_transfer) |
| 1278 | w |= (3 << 8); /* Auto_init, repeat */ |
| 1279 | omap_writew(w, OMAP1610_DMA_LCD_CCR); |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | void omap_stop_lcd_dma(void) |
| 1284 | { |
Tony Lindgren | bb13b5f | 2005-07-10 19:58:18 +0100 | [diff] [blame] | 1285 | u16 w; |
| 1286 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1287 | lcd_dma.active = 0; |
Tony Lindgren | bb13b5f | 2005-07-10 19:58:18 +0100 | [diff] [blame] | 1288 | if (enable_1510_mode || !lcd_dma.ext_ctrl) |
| 1289 | return; |
| 1290 | |
| 1291 | w = omap_readw(OMAP1610_DMA_LCD_CCR); |
| 1292 | w &= ~(1 << 7); |
| 1293 | omap_writew(w, OMAP1610_DMA_LCD_CCR); |
| 1294 | |
| 1295 | w = omap_readw(OMAP1610_DMA_LCD_CTRL); |
| 1296 | w &= ~(1 << 8); |
| 1297 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1298 | } |
| 1299 | |
Tony Lindgren | 0dc5e77 | 2006-04-02 17:46:26 +0100 | [diff] [blame] | 1300 | int omap_lcd_dma_ext_running(void) |
| 1301 | { |
| 1302 | return lcd_dma.ext_ctrl && lcd_dma.active; |
| 1303 | } |
| 1304 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1305 | /*----------------------------------------------------------------------------*/ |
Tony Lindgren | bb13b5f | 2005-07-10 19:58:18 +0100 | [diff] [blame] | 1306 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1307 | static int __init omap_init_dma(void) |
| 1308 | { |
| 1309 | int ch, r; |
| 1310 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1311 | if (cpu_is_omap15xx()) { |
| 1312 | printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1313 | dma_chan_count = 9; |
| 1314 | enable_1510_mode = 1; |
| 1315 | } else if (cpu_is_omap16xx() || cpu_is_omap730()) { |
| 1316 | printk(KERN_INFO "OMAP DMA hardware version %d\n", |
| 1317 | omap_readw(OMAP_DMA_HW_ID)); |
| 1318 | printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1319 | (omap_readw(OMAP_DMA_CAPS_0_U) << 16) | |
| 1320 | omap_readw(OMAP_DMA_CAPS_0_L), |
| 1321 | (omap_readw(OMAP_DMA_CAPS_1_U) << 16) | |
| 1322 | omap_readw(OMAP_DMA_CAPS_1_L), |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1323 | omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3), |
| 1324 | omap_readw(OMAP_DMA_CAPS_4)); |
| 1325 | if (!enable_1510_mode) { |
| 1326 | u16 w; |
| 1327 | |
| 1328 | /* Disable OMAP 3.0/3.1 compatibility mode. */ |
| 1329 | w = omap_readw(OMAP_DMA_GSCR); |
| 1330 | w |= 1 << 3; |
| 1331 | omap_writew(w, OMAP_DMA_GSCR); |
| 1332 | dma_chan_count = 16; |
| 1333 | } else |
| 1334 | dma_chan_count = 9; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1335 | } else if (cpu_is_omap24xx()) { |
| 1336 | u8 revision = omap_readb(OMAP_DMA4_REVISION); |
| 1337 | printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", |
| 1338 | revision >> 4, revision & 0xf); |
| 1339 | dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT; |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1340 | } else { |
| 1341 | dma_chan_count = 0; |
| 1342 | return 0; |
| 1343 | } |
| 1344 | |
| 1345 | memset(&lcd_dma, 0, sizeof(lcd_dma)); |
| 1346 | spin_lock_init(&lcd_dma.lock); |
| 1347 | spin_lock_init(&dma_chan_lock); |
| 1348 | memset(&dma_chan, 0, sizeof(dma_chan)); |
| 1349 | |
| 1350 | for (ch = 0; ch < dma_chan_count; ch++) { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1351 | omap_clear_dma(ch); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1352 | dma_chan[ch].dev_id = -1; |
| 1353 | dma_chan[ch].next_lch = -1; |
| 1354 | |
| 1355 | if (ch >= 6 && enable_1510_mode) |
| 1356 | continue; |
| 1357 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1358 | if (cpu_class_is_omap1()) { |
| 1359 | /* request_irq() doesn't like dev_id (ie. ch) being |
| 1360 | * zero, so we have to kludge around this. */ |
| 1361 | r = request_irq(omap1_dma_irq[ch], |
| 1362 | omap1_dma_irq_handler, 0, "DMA", |
| 1363 | (void *) (ch + 1)); |
| 1364 | if (r != 0) { |
| 1365 | int i; |
| 1366 | |
| 1367 | printk(KERN_ERR "unable to request IRQ %d " |
| 1368 | "for DMA (error %d)\n", |
| 1369 | omap1_dma_irq[ch], r); |
| 1370 | for (i = 0; i < ch; i++) |
| 1371 | free_irq(omap1_dma_irq[i], |
| 1372 | (void *) (i + 1)); |
| 1373 | return r; |
| 1374 | } |
| 1375 | } |
| 1376 | } |
| 1377 | |
| 1378 | if (cpu_is_omap24xx()) |
| 1379 | setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq); |
| 1380 | |
| 1381 | /* FIXME: Update LCD DMA to work on 24xx */ |
| 1382 | if (cpu_class_is_omap1()) { |
| 1383 | r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, |
| 1384 | "LCD DMA", NULL); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1385 | if (r != 0) { |
| 1386 | int i; |
| 1387 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1388 | printk(KERN_ERR "unable to request IRQ for LCD DMA " |
| 1389 | "(error %d)\n", r); |
| 1390 | for (i = 0; i < dma_chan_count; i++) |
| 1391 | free_irq(omap1_dma_irq[i], (void *) (i + 1)); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1392 | return r; |
| 1393 | } |
| 1394 | } |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1395 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1396 | return 0; |
| 1397 | } |
| 1398 | |
| 1399 | arch_initcall(omap_init_dma); |
| 1400 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1401 | EXPORT_SYMBOL(omap_get_dma_src_pos); |
| 1402 | EXPORT_SYMBOL(omap_get_dma_dst_pos); |
Tony Lindgren | 92105bb | 2005-09-07 17:20:26 +0100 | [diff] [blame] | 1403 | EXPORT_SYMBOL(omap_get_dma_src_addr_counter); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1404 | EXPORT_SYMBOL(omap_clear_dma); |
| 1405 | EXPORT_SYMBOL(omap_set_dma_priority); |
| 1406 | EXPORT_SYMBOL(omap_request_dma); |
| 1407 | EXPORT_SYMBOL(omap_free_dma); |
| 1408 | EXPORT_SYMBOL(omap_start_dma); |
| 1409 | EXPORT_SYMBOL(omap_stop_dma); |
| 1410 | EXPORT_SYMBOL(omap_enable_dma_irq); |
| 1411 | EXPORT_SYMBOL(omap_disable_dma_irq); |
| 1412 | |
| 1413 | EXPORT_SYMBOL(omap_set_dma_transfer_params); |
| 1414 | EXPORT_SYMBOL(omap_set_dma_color_mode); |
| 1415 | |
| 1416 | EXPORT_SYMBOL(omap_set_dma_src_params); |
| 1417 | EXPORT_SYMBOL(omap_set_dma_src_index); |
| 1418 | EXPORT_SYMBOL(omap_set_dma_src_data_pack); |
| 1419 | EXPORT_SYMBOL(omap_set_dma_src_burst_mode); |
| 1420 | |
| 1421 | EXPORT_SYMBOL(omap_set_dma_dest_params); |
| 1422 | EXPORT_SYMBOL(omap_set_dma_dest_index); |
| 1423 | EXPORT_SYMBOL(omap_set_dma_dest_data_pack); |
| 1424 | EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); |
| 1425 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 1426 | EXPORT_SYMBOL(omap_set_dma_params); |
| 1427 | |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1428 | EXPORT_SYMBOL(omap_dma_link_lch); |
| 1429 | EXPORT_SYMBOL(omap_dma_unlink_lch); |
| 1430 | |
| 1431 | EXPORT_SYMBOL(omap_request_lcd_dma); |
| 1432 | EXPORT_SYMBOL(omap_free_lcd_dma); |
| 1433 | EXPORT_SYMBOL(omap_enable_lcd_dma); |
| 1434 | EXPORT_SYMBOL(omap_setup_lcd_dma); |
| 1435 | EXPORT_SYMBOL(omap_stop_lcd_dma); |
Tony Lindgren | 0dc5e77 | 2006-04-02 17:46:26 +0100 | [diff] [blame] | 1436 | EXPORT_SYMBOL(omap_lcd_dma_ext_running); |
Tony Lindgren | 5e1c5ff | 2005-07-10 19:58:15 +0100 | [diff] [blame] | 1437 | EXPORT_SYMBOL(omap_set_lcd_dma_b1); |
| 1438 | EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); |
| 1439 | EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller); |
| 1440 | EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); |
| 1441 | EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); |
| 1442 | EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale); |
| 1443 | EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); |
| 1444 | |