| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/drivers/dma/dma-sh.c | 
|  | 3 | * | 
|  | 4 | * SuperH On-chip DMAC Support | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2000 Takashi YOSHII | 
|  | 7 | * Copyright (C) 2003, 2004 Paul Mundt | 
|  | 8 | * | 
|  | 9 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 10 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 11 | * for more details. | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #include <linux/config.h> | 
|  | 15 | #include <linux/init.h> | 
|  | 16 | #include <linux/irq.h> | 
|  | 17 | #include <linux/interrupt.h> | 
|  | 18 | #include <linux/module.h> | 
|  | 19 | #include <asm/signal.h> | 
|  | 20 | #include <asm/irq.h> | 
|  | 21 | #include <asm/dma.h> | 
|  | 22 | #include <asm/io.h> | 
|  | 23 | #include "dma-sh.h" | 
|  | 24 |  | 
|  | 25 | /* | 
|  | 26 | * The SuperH DMAC supports a number of transmit sizes, we list them here, | 
|  | 27 | * with their respective values as they appear in the CHCR registers. | 
|  | 28 | * | 
|  | 29 | * Defaults to a 64-bit transfer size. | 
|  | 30 | */ | 
|  | 31 | enum { | 
|  | 32 | XMIT_SZ_64BIT, | 
|  | 33 | XMIT_SZ_8BIT, | 
|  | 34 | XMIT_SZ_16BIT, | 
|  | 35 | XMIT_SZ_32BIT, | 
|  | 36 | XMIT_SZ_256BIT, | 
|  | 37 | }; | 
|  | 38 |  | 
|  | 39 | /* | 
|  | 40 | * The DMA count is defined as the number of bytes to transfer. | 
|  | 41 | */ | 
|  | 42 | static unsigned int ts_shift[] = { | 
|  | 43 | [XMIT_SZ_64BIT]		= 3, | 
|  | 44 | [XMIT_SZ_8BIT]		= 0, | 
|  | 45 | [XMIT_SZ_16BIT]		= 1, | 
|  | 46 | [XMIT_SZ_32BIT]		= 2, | 
|  | 47 | [XMIT_SZ_256BIT]	= 5, | 
|  | 48 | }; | 
|  | 49 |  | 
|  | 50 | static inline unsigned int get_dmte_irq(unsigned int chan) | 
|  | 51 | { | 
|  | 52 | unsigned int irq; | 
|  | 53 |  | 
|  | 54 | /* | 
|  | 55 | * Normally we could just do DMTE0_IRQ + chan outright, though in the | 
|  | 56 | * case of the 7751R, the DMTE IRQs for channels > 4 start right above | 
|  | 57 | * the SCIF | 
|  | 58 | */ | 
|  | 59 |  | 
|  | 60 | if (chan < 4) { | 
|  | 61 | irq = DMTE0_IRQ + chan; | 
|  | 62 | } else { | 
|  | 63 | irq = DMTE4_IRQ + chan - 4; | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | return irq; | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | /* | 
|  | 70 | * We determine the correct shift size based off of the CHCR transmit size | 
|  | 71 | * for the given channel. Since we know that it will take: | 
|  | 72 | * | 
|  | 73 | *	info->count >> ts_shift[transmit_size] | 
|  | 74 | * | 
|  | 75 | * iterations to complete the transfer. | 
|  | 76 | */ | 
|  | 77 | static inline unsigned int calc_xmit_shift(struct dma_channel *chan) | 
|  | 78 | { | 
|  | 79 | u32 chcr = ctrl_inl(CHCR[chan->chan]); | 
|  | 80 |  | 
|  | 81 | chcr >>= 4; | 
|  | 82 |  | 
|  | 83 | return ts_shift[chcr & 0x0007]; | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | /* | 
|  | 87 | * The transfer end interrupt must read the chcr register to end the | 
|  | 88 | * hardware interrupt active condition. | 
|  | 89 | * Besides that it needs to waken any waiting process, which should handle | 
|  | 90 | * setting up the next transfer. | 
|  | 91 | */ | 
|  | 92 | static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs) | 
|  | 93 | { | 
|  | 94 | struct dma_channel *chan = (struct dma_channel *)dev_id; | 
|  | 95 | u32 chcr; | 
|  | 96 |  | 
|  | 97 | chcr = ctrl_inl(CHCR[chan->chan]); | 
|  | 98 |  | 
|  | 99 | if (!(chcr & CHCR_TE)) | 
|  | 100 | return IRQ_NONE; | 
|  | 101 |  | 
|  | 102 | chcr &= ~(CHCR_IE | CHCR_DE); | 
|  | 103 | ctrl_outl(chcr, CHCR[chan->chan]); | 
|  | 104 |  | 
|  | 105 | wake_up(&chan->wait_queue); | 
|  | 106 |  | 
|  | 107 | return IRQ_HANDLED; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | static int sh_dmac_request_dma(struct dma_channel *chan) | 
|  | 111 | { | 
|  | 112 | return request_irq(get_dmte_irq(chan->chan), dma_tei, | 
|  | 113 | SA_INTERRUPT, "DMAC Transfer End", chan); | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | static void sh_dmac_free_dma(struct dma_channel *chan) | 
|  | 117 | { | 
|  | 118 | free_irq(get_dmte_irq(chan->chan), chan); | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | static void sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) | 
|  | 122 | { | 
|  | 123 | if (!chcr) | 
|  | 124 | chcr = RS_DUAL; | 
|  | 125 |  | 
|  | 126 | ctrl_outl(chcr, CHCR[chan->chan]); | 
|  | 127 |  | 
|  | 128 | chan->flags |= DMA_CONFIGURED; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | static void sh_dmac_enable_dma(struct dma_channel *chan) | 
|  | 132 | { | 
|  | 133 | int irq = get_dmte_irq(chan->chan); | 
|  | 134 | u32 chcr; | 
|  | 135 |  | 
|  | 136 | chcr = ctrl_inl(CHCR[chan->chan]); | 
|  | 137 | chcr |= CHCR_DE | CHCR_IE; | 
|  | 138 | ctrl_outl(chcr, CHCR[chan->chan]); | 
|  | 139 |  | 
|  | 140 | enable_irq(irq); | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | static void sh_dmac_disable_dma(struct dma_channel *chan) | 
|  | 144 | { | 
|  | 145 | int irq = get_dmte_irq(chan->chan); | 
|  | 146 | u32 chcr; | 
|  | 147 |  | 
|  | 148 | disable_irq(irq); | 
|  | 149 |  | 
|  | 150 | chcr = ctrl_inl(CHCR[chan->chan]); | 
|  | 151 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | 
|  | 152 | ctrl_outl(chcr, CHCR[chan->chan]); | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | static int sh_dmac_xfer_dma(struct dma_channel *chan) | 
|  | 156 | { | 
|  | 157 | /* | 
|  | 158 | * If we haven't pre-configured the channel with special flags, use | 
|  | 159 | * the defaults. | 
|  | 160 | */ | 
|  | 161 | if (!(chan->flags & DMA_CONFIGURED)) | 
|  | 162 | sh_dmac_configure_channel(chan, 0); | 
|  | 163 |  | 
|  | 164 | sh_dmac_disable_dma(chan); | 
|  | 165 |  | 
|  | 166 | /* | 
|  | 167 | * Single-address mode usage note! | 
|  | 168 | * | 
|  | 169 | * It's important that we don't accidentally write any value to SAR/DAR | 
|  | 170 | * (this includes 0) that hasn't been directly specified by the user if | 
|  | 171 | * we're in single-address mode. | 
|  | 172 | * | 
|  | 173 | * In this case, only one address can be defined, anything else will | 
|  | 174 | * result in a DMA address error interrupt (at least on the SH-4), | 
|  | 175 | * which will subsequently halt the transfer. | 
|  | 176 | * | 
|  | 177 | * Channel 2 on the Dreamcast is a special case, as this is used for | 
|  | 178 | * cascading to the PVR2 DMAC. In this case, we still need to write | 
|  | 179 | * SAR and DAR, regardless of value, in order for cascading to work. | 
|  | 180 | */ | 
|  | 181 | if (chan->sar || (mach_is_dreamcast() && chan->chan == 2)) | 
|  | 182 | ctrl_outl(chan->sar, SAR[chan->chan]); | 
|  | 183 | if (chan->dar || (mach_is_dreamcast() && chan->chan == 2)) | 
|  | 184 | ctrl_outl(chan->dar, DAR[chan->chan]); | 
|  | 185 |  | 
|  | 186 | ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]); | 
|  | 187 |  | 
|  | 188 | sh_dmac_enable_dma(chan); | 
|  | 189 |  | 
|  | 190 | return 0; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | static int sh_dmac_get_dma_residue(struct dma_channel *chan) | 
|  | 194 | { | 
|  | 195 | if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) | 
|  | 196 | return 0; | 
|  | 197 |  | 
|  | 198 | return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | #if defined(CONFIG_CPU_SH4) | 
|  | 202 | static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs) | 
|  | 203 | { | 
|  | 204 | unsigned long dmaor = ctrl_inl(DMAOR); | 
|  | 205 |  | 
|  | 206 | printk("DMAE: DMAOR=%lx\n", dmaor); | 
|  | 207 |  | 
|  | 208 | ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_NMIF, DMAOR); | 
|  | 209 | ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_AE, DMAOR); | 
|  | 210 | ctrl_outl(ctrl_inl(DMAOR)|DMAOR_DME, DMAOR); | 
|  | 211 |  | 
|  | 212 | disable_irq(irq); | 
|  | 213 |  | 
|  | 214 | return IRQ_HANDLED; | 
|  | 215 | } | 
|  | 216 | #endif | 
|  | 217 |  | 
|  | 218 | static struct dma_ops sh_dmac_ops = { | 
|  | 219 | .request	= sh_dmac_request_dma, | 
|  | 220 | .free		= sh_dmac_free_dma, | 
|  | 221 | .get_residue	= sh_dmac_get_dma_residue, | 
|  | 222 | .xfer		= sh_dmac_xfer_dma, | 
|  | 223 | .configure	= sh_dmac_configure_channel, | 
|  | 224 | }; | 
|  | 225 |  | 
|  | 226 | static struct dma_info sh_dmac_info = { | 
|  | 227 | .name		= "SuperH DMAC", | 
|  | 228 | .nr_channels	= 4, | 
|  | 229 | .ops		= &sh_dmac_ops, | 
|  | 230 | .flags		= DMAC_CHANNELS_TEI_CAPABLE, | 
|  | 231 | }; | 
|  | 232 |  | 
|  | 233 | static int __init sh_dmac_init(void) | 
|  | 234 | { | 
|  | 235 | struct dma_info *info = &sh_dmac_info; | 
|  | 236 | int i; | 
|  | 237 |  | 
|  | 238 | #ifdef CONFIG_CPU_SH4 | 
|  | 239 | make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); | 
|  | 240 | i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0); | 
|  | 241 | if (i < 0) | 
|  | 242 | return i; | 
|  | 243 | #endif | 
|  | 244 |  | 
|  | 245 | for (i = 0; i < info->nr_channels; i++) { | 
|  | 246 | int irq = get_dmte_irq(i); | 
|  | 247 |  | 
|  | 248 | make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | ctrl_outl(0x8000 | DMAOR_DME, DMAOR); | 
|  | 252 |  | 
|  | 253 | return register_dmac(info); | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | static void __exit sh_dmac_exit(void) | 
|  | 257 | { | 
|  | 258 | #ifdef CONFIG_CPU_SH4 | 
|  | 259 | free_irq(DMAE_IRQ, 0); | 
|  | 260 | #endif | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | subsys_initcall(sh_dmac_init); | 
|  | 264 | module_exit(sh_dmac_exit); | 
|  | 265 |  | 
|  | 266 | MODULE_LICENSE("GPL"); | 
|  | 267 |  |