| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_DMA_H | 
|  | 2 | #define _ASM_POWERPC_DMA_H | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 4 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | /* | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 6 | * Defines for using and allocating dma channels. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Written by Hennus Bergman, 1992. | 
|  | 8 | * High DMA channel support & info by Hannu Savolainen | 
|  | 9 | * and John Boyd, Nov. 1992. | 
|  | 10 | * Changes for ppc sound by Christoph Nadig | 
|  | 11 | */ | 
|  | 12 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | /* | 
|  | 14 | * Note: Adapted for PowerPC by Gary Thomas | 
|  | 15 | * Modified by Cort Dougan <cort@cs.nmt.edu> | 
|  | 16 | * | 
|  | 17 | * None of this really applies for Power Macintoshes.  There is | 
|  | 18 | * basically just enough here to get kernel/dma.c to compile. | 
|  | 19 | * | 
|  | 20 | * There may be some comments or restrictions made here which are | 
|  | 21 | * not valid for the PReP platform.  Take what you read | 
|  | 22 | * with a grain of salt. | 
|  | 23 | */ | 
|  | 24 |  | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 25 | #include <asm/io.h> | 
|  | 26 | #include <linux/spinlock.h> | 
|  | 27 | #include <asm/system.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
|  | 29 | #ifndef MAX_DMA_CHANNELS | 
|  | 30 | #define MAX_DMA_CHANNELS	8 | 
|  | 31 | #endif | 
|  | 32 |  | 
|  | 33 | /* The maximum address that we can perform a DMA transfer to on this platform */ | 
|  | 34 | /* Doesn't really apply... */ | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 35 | #define MAX_DMA_ADDRESS		(~0UL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 37 | #if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
|  | 39 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | 
|  | 40 | #define dma_outb	outb_p | 
|  | 41 | #else | 
|  | 42 | #define dma_outb	outb | 
|  | 43 | #endif | 
|  | 44 |  | 
|  | 45 | #define dma_inb		inb | 
|  | 46 |  | 
|  | 47 | /* | 
|  | 48 | * NOTES about DMA transfers: | 
|  | 49 | * | 
|  | 50 | *  controller 1: channels 0-3, byte operations, ports 00-1F | 
|  | 51 | *  controller 2: channels 4-7, word operations, ports C0-DF | 
|  | 52 | * | 
|  | 53 | *  - ALL registers are 8 bits only, regardless of transfer size | 
|  | 54 | *  - channel 4 is not used - cascades 1 into 2. | 
|  | 55 | *  - channels 0-3 are byte - addresses/counts are for physical bytes | 
|  | 56 | *  - channels 5-7 are word - addresses/counts are for physical words | 
|  | 57 | *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | 
|  | 58 | *  - transfer count loaded to registers is 1 less than actual count | 
|  | 59 | *  - controller 2 offsets are all even (2x offsets for controller 1) | 
|  | 60 | *  - page registers for 5-7 don't use data bit 0, represent 128K pages | 
|  | 61 | *  - page registers for 0-3 use bit 0, represent 64K pages | 
|  | 62 | * | 
|  | 63 | * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory. | 
|  | 64 | * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing. | 
|  | 65 | * Note that addresses loaded into registers must be _physical_ addresses, | 
|  | 66 | * not logical addresses (which may differ if paging is active). | 
|  | 67 | * | 
|  | 68 | *  Address mapping for channels 0-3: | 
|  | 69 | * | 
|  | 70 | *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses) | 
|  | 71 | *    |  ...  |   |  ... |   |  ... | | 
|  | 72 | *    |  ...  |   |  ... |   |  ... | | 
|  | 73 | *    |  ...  |   |  ... |   |  ... | | 
|  | 74 | *   P7  ...  P0  A7 ... A0  A7 ... A0 | 
|  | 75 | * |    Page    | Addr MSB | Addr LSB |   (DMA registers) | 
|  | 76 | * | 
|  | 77 | *  Address mapping for channels 5-7: | 
|  | 78 | * | 
|  | 79 | *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses) | 
|  | 80 | *    |  ...  |   \   \   ... \  \  \  ... \  \ | 
|  | 81 | *    |  ...  |    \   \   ... \  \  \  ... \  (not used) | 
|  | 82 | *    |  ...  |     \   \   ... \  \  \  ... \ | 
|  | 83 | *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0 | 
|  | 84 | * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers) | 
|  | 85 | * | 
|  | 86 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | 
|  | 87 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | 
|  | 88 | * the hardware level, so odd-byte transfers aren't possible). | 
|  | 89 | * | 
|  | 90 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | 
|  | 91 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more, | 
|  | 92 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | 
|  | 93 | * | 
|  | 94 | */ | 
|  | 95 |  | 
|  | 96 | /* see prep_setup_arch() for detailed informations */ | 
|  | 97 | #if defined(CONFIG_SOUND_CS4232) && defined(CONFIG_PPC_PREP) | 
|  | 98 | extern long ppc_cs4232_dma, ppc_cs4232_dma2; | 
|  | 99 | #define SND_DMA1 ppc_cs4232_dma | 
|  | 100 | #define SND_DMA2 ppc_cs4232_dma2 | 
|  | 101 | #else | 
|  | 102 | #define SND_DMA1 -1 | 
|  | 103 | #define SND_DMA2 -1 | 
|  | 104 | #endif | 
|  | 105 |  | 
|  | 106 | /* 8237 DMA controllers */ | 
|  | 107 | #define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */ | 
|  | 108 | #define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */ | 
|  | 109 |  | 
|  | 110 | /* DMA controller registers */ | 
|  | 111 | #define DMA1_CMD_REG		0x08	/* command register (w) */ | 
|  | 112 | #define DMA1_STAT_REG		0x08	/* status register (r) */ | 
|  | 113 | #define DMA1_REQ_REG		0x09	/* request register (w) */ | 
|  | 114 | #define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */ | 
|  | 115 | #define DMA1_MODE_REG		0x0B	/* mode register (w) */ | 
|  | 116 | #define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */ | 
|  | 117 | #define DMA1_TEMP_REG		0x0D	/* Temporary Register (r) */ | 
|  | 118 | #define DMA1_RESET_REG		0x0D	/* Master Clear (w) */ | 
|  | 119 | #define DMA1_CLR_MASK_REG	0x0E	/* Clear Mask */ | 
|  | 120 | #define DMA1_MASK_ALL_REG	0x0F	/* all-channels mask (w) */ | 
|  | 121 |  | 
|  | 122 | #define DMA2_CMD_REG		0xD0	/* command register (w) */ | 
|  | 123 | #define DMA2_STAT_REG		0xD0	/* status register (r) */ | 
|  | 124 | #define DMA2_REQ_REG		0xD2	/* request register (w) */ | 
|  | 125 | #define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */ | 
|  | 126 | #define DMA2_MODE_REG		0xD6	/* mode register (w) */ | 
|  | 127 | #define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */ | 
|  | 128 | #define DMA2_TEMP_REG		0xDA	/* Temporary Register (r) */ | 
|  | 129 | #define DMA2_RESET_REG		0xDA	/* Master Clear (w) */ | 
|  | 130 | #define DMA2_CLR_MASK_REG	0xDC	/* Clear Mask */ | 
|  | 131 | #define DMA2_MASK_ALL_REG	0xDE	/* all-channels mask (w) */ | 
|  | 132 |  | 
|  | 133 | #define DMA_ADDR_0		0x00	/* DMA address registers */ | 
|  | 134 | #define DMA_ADDR_1		0x02 | 
|  | 135 | #define DMA_ADDR_2		0x04 | 
|  | 136 | #define DMA_ADDR_3		0x06 | 
|  | 137 | #define DMA_ADDR_4		0xC0 | 
|  | 138 | #define DMA_ADDR_5		0xC4 | 
|  | 139 | #define DMA_ADDR_6		0xC8 | 
|  | 140 | #define DMA_ADDR_7		0xCC | 
|  | 141 |  | 
|  | 142 | #define DMA_CNT_0		0x01	/* DMA count registers */ | 
|  | 143 | #define DMA_CNT_1		0x03 | 
|  | 144 | #define DMA_CNT_2		0x05 | 
|  | 145 | #define DMA_CNT_3		0x07 | 
|  | 146 | #define DMA_CNT_4		0xC2 | 
|  | 147 | #define DMA_CNT_5		0xC6 | 
|  | 148 | #define DMA_CNT_6		0xCA | 
|  | 149 | #define DMA_CNT_7		0xCE | 
|  | 150 |  | 
|  | 151 | #define DMA_LO_PAGE_0		0x87	/* DMA page registers */ | 
|  | 152 | #define DMA_LO_PAGE_1		0x83 | 
|  | 153 | #define DMA_LO_PAGE_2		0x81 | 
|  | 154 | #define DMA_LO_PAGE_3		0x82 | 
|  | 155 | #define DMA_LO_PAGE_5		0x8B | 
|  | 156 | #define DMA_LO_PAGE_6		0x89 | 
|  | 157 | #define DMA_LO_PAGE_7		0x8A | 
|  | 158 |  | 
|  | 159 | #define DMA_HI_PAGE_0		0x487	/* DMA page registers */ | 
|  | 160 | #define DMA_HI_PAGE_1		0x483 | 
|  | 161 | #define DMA_HI_PAGE_2		0x481 | 
|  | 162 | #define DMA_HI_PAGE_3		0x482 | 
|  | 163 | #define DMA_HI_PAGE_5		0x48B | 
|  | 164 | #define DMA_HI_PAGE_6		0x489 | 
|  | 165 | #define DMA_HI_PAGE_7		0x48A | 
|  | 166 |  | 
|  | 167 | #define DMA1_EXT_REG		0x40B | 
|  | 168 | #define DMA2_EXT_REG		0x4D6 | 
|  | 169 |  | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 170 | #ifndef __powerpc64__ | 
|  | 171 | /* in arch/ppc/kernel/setup.c -- Cort */ | 
|  | 172 | extern unsigned int DMA_MODE_WRITE; | 
|  | 173 | extern unsigned int DMA_MODE_READ; | 
|  | 174 | extern unsigned long ISA_DMA_THRESHOLD; | 
|  | 175 | #else | 
|  | 176 | #define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */ | 
|  | 177 | #define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */ | 
|  | 178 | #endif | 
|  | 179 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | #define DMA_MODE_CASCADE	0xC0	/* pass thru DREQ->HRQ, DACK<-HLDA only */ | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 181 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | #define DMA_AUTOINIT		0x10 | 
|  | 183 |  | 
|  | 184 | extern spinlock_t dma_spin_lock; | 
|  | 185 |  | 
|  | 186 | static __inline__ unsigned long claim_dma_lock(void) | 
|  | 187 | { | 
|  | 188 | unsigned long flags; | 
|  | 189 | spin_lock_irqsave(&dma_spin_lock, flags); | 
|  | 190 | return flags; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | static __inline__ void release_dma_lock(unsigned long flags) | 
|  | 194 | { | 
|  | 195 | spin_unlock_irqrestore(&dma_spin_lock, flags); | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | /* enable/disable a specific DMA channel */ | 
|  | 199 | static __inline__ void enable_dma(unsigned int dmanr) | 
|  | 200 | { | 
|  | 201 | unsigned char ucDmaCmd = 0x00; | 
|  | 202 |  | 
|  | 203 | if (dmanr != 4) { | 
|  | 204 | dma_outb(0, DMA2_MASK_REG);	/* This may not be enabled */ | 
|  | 205 | dma_outb(ucDmaCmd, DMA2_CMD_REG);	/* Enable group */ | 
|  | 206 | } | 
|  | 207 | if (dmanr <= 3) { | 
|  | 208 | dma_outb(dmanr, DMA1_MASK_REG); | 
|  | 209 | dma_outb(ucDmaCmd, DMA1_CMD_REG);	/* Enable group */ | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 210 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | dma_outb(dmanr & 3, DMA2_MASK_REG); | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 212 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } | 
|  | 214 |  | 
|  | 215 | static __inline__ void disable_dma(unsigned int dmanr) | 
|  | 216 | { | 
|  | 217 | if (dmanr <= 3) | 
|  | 218 | dma_outb(dmanr | 4, DMA1_MASK_REG); | 
|  | 219 | else | 
|  | 220 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | 
|  | 221 | } | 
|  | 222 |  | 
|  | 223 | /* Clear the 'DMA Pointer Flip Flop'. | 
|  | 224 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | 
|  | 225 | * Use this once to initialize the FF to a known state. | 
|  | 226 | * After that, keep track of it. :-) | 
|  | 227 | * --- In order to do that, the DMA routines below should --- | 
|  | 228 | * --- only be used while interrupts are disabled! --- | 
|  | 229 | */ | 
|  | 230 | static __inline__ void clear_dma_ff(unsigned int dmanr) | 
|  | 231 | { | 
|  | 232 | if (dmanr <= 3) | 
|  | 233 | dma_outb(0, DMA1_CLEAR_FF_REG); | 
|  | 234 | else | 
|  | 235 | dma_outb(0, DMA2_CLEAR_FF_REG); | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | /* set mode (above) for a specific DMA channel */ | 
|  | 239 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | 
|  | 240 | { | 
|  | 241 | if (dmanr <= 3) | 
|  | 242 | dma_outb(mode | dmanr, DMA1_MODE_REG); | 
|  | 243 | else | 
|  | 244 | dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | /* Set only the page register bits of the transfer address. | 
|  | 248 | * This is used for successive transfers when we know the contents of | 
|  | 249 | * the lower 16 bits of the DMA current address register, but a 64k boundary | 
|  | 250 | * may have been crossed. | 
|  | 251 | */ | 
|  | 252 | static __inline__ void set_dma_page(unsigned int dmanr, int pagenr) | 
|  | 253 | { | 
|  | 254 | switch (dmanr) { | 
|  | 255 | case 0: | 
|  | 256 | dma_outb(pagenr, DMA_LO_PAGE_0); | 
|  | 257 | dma_outb(pagenr >> 8, DMA_HI_PAGE_0); | 
|  | 258 | break; | 
|  | 259 | case 1: | 
|  | 260 | dma_outb(pagenr, DMA_LO_PAGE_1); | 
|  | 261 | dma_outb(pagenr >> 8, DMA_HI_PAGE_1); | 
|  | 262 | break; | 
|  | 263 | case 2: | 
|  | 264 | dma_outb(pagenr, DMA_LO_PAGE_2); | 
|  | 265 | dma_outb(pagenr >> 8, DMA_HI_PAGE_2); | 
|  | 266 | break; | 
|  | 267 | case 3: | 
|  | 268 | dma_outb(pagenr, DMA_LO_PAGE_3); | 
|  | 269 | dma_outb(pagenr >> 8, DMA_HI_PAGE_3); | 
|  | 270 | break; | 
|  | 271 | case 5: | 
|  | 272 | if (SND_DMA1 == 5 || SND_DMA2 == 5) | 
|  | 273 | dma_outb(pagenr, DMA_LO_PAGE_5); | 
|  | 274 | else | 
|  | 275 | dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5); | 
|  | 276 | dma_outb(pagenr >> 8, DMA_HI_PAGE_5); | 
|  | 277 | break; | 
|  | 278 | case 6: | 
|  | 279 | if (SND_DMA1 == 6 || SND_DMA2 == 6) | 
|  | 280 | dma_outb(pagenr, DMA_LO_PAGE_6); | 
|  | 281 | else | 
|  | 282 | dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6); | 
|  | 283 | dma_outb(pagenr >> 8, DMA_HI_PAGE_6); | 
|  | 284 | break; | 
|  | 285 | case 7: | 
|  | 286 | if (SND_DMA1 == 7 || SND_DMA2 == 7) | 
|  | 287 | dma_outb(pagenr, DMA_LO_PAGE_7); | 
|  | 288 | else | 
|  | 289 | dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7); | 
|  | 290 | dma_outb(pagenr >> 8, DMA_HI_PAGE_7); | 
|  | 291 | break; | 
|  | 292 | } | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | /* Set transfer address & page bits for specific DMA channel. | 
|  | 296 | * Assumes dma flipflop is clear. | 
|  | 297 | */ | 
|  | 298 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys) | 
|  | 299 | { | 
|  | 300 | if (dmanr <= 3) { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 301 | dma_outb(phys & 0xff, | 
|  | 302 | ((dmanr & 3) << 1) + IO_DMA1_BASE); | 
|  | 303 | dma_outb((phys >> 8) & 0xff, | 
|  | 304 | ((dmanr & 3) << 1) + IO_DMA1_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 306 | dma_outb(phys & 0xff, | 
|  | 307 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | 
|  | 308 | dma_outb((phys >> 8) & 0xff, | 
|  | 309 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | dma_outb((dmanr & 3), DMA2_EXT_REG); | 
|  | 311 | } else { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 312 | dma_outb((phys >> 1) & 0xff, | 
|  | 313 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | 
|  | 314 | dma_outb((phys >> 9) & 0xff, | 
|  | 315 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } | 
|  | 317 | set_dma_page(dmanr, phys >> 16); | 
|  | 318 | } | 
|  | 319 |  | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 320 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for | 
|  | 322 | * a specific DMA channel. | 
|  | 323 | * You must ensure the parameters are valid. | 
|  | 324 | * NOTE: from a manual: "the number of transfers is one more | 
|  | 325 | * than the initial word count"! This is taken into account. | 
|  | 326 | * Assumes dma flip-flop is clear. | 
|  | 327 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | 
|  | 328 | */ | 
|  | 329 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | 
|  | 330 | { | 
|  | 331 | count--; | 
|  | 332 | if (dmanr <= 3) { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 333 | dma_outb(count & 0xff, | 
|  | 334 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | 
|  | 335 | dma_outb((count >> 8) & 0xff, | 
|  | 336 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 338 | dma_outb(count & 0xff, | 
|  | 339 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 
|  | 340 | dma_outb((count >> 8) & 0xff, | 
|  | 341 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } else { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 343 | dma_outb((count >> 1) & 0xff, | 
|  | 344 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 
|  | 345 | dma_outb((count >> 9) & 0xff, | 
|  | 346 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | } | 
|  | 348 | } | 
|  | 349 |  | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 350 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | /* Get DMA residue count. After a DMA transfer, this | 
|  | 352 | * should return zero. Reading this while a DMA transfer is | 
|  | 353 | * still in progress will return unpredictable results. | 
|  | 354 | * If called before the channel has been used, it may return 1. | 
|  | 355 | * Otherwise, it returns the number of _bytes_ left to transfer. | 
|  | 356 | * | 
|  | 357 | * Assumes DMA flip-flop is clear. | 
|  | 358 | */ | 
|  | 359 | static __inline__ int get_dma_residue(unsigned int dmanr) | 
|  | 360 | { | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 361 | unsigned int io_port = (dmanr <= 3) | 
|  | 362 | ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; | 
|  | 364 |  | 
|  | 365 | /* using short to get 16-bit wrap around */ | 
|  | 366 | unsigned short count; | 
|  | 367 |  | 
|  | 368 | count = 1 + dma_inb(io_port); | 
|  | 369 | count += dma_inb(io_port) << 8; | 
|  | 370 |  | 
|  | 371 | return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2) | 
|  | 372 | ? count : (count << 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } | 
|  | 374 |  | 
|  | 375 | /* These are in kernel/dma.c: */ | 
|  | 376 |  | 
|  | 377 | /* reserve a DMA channel */ | 
|  | 378 | extern int request_dma(unsigned int dmanr, const char *device_id); | 
|  | 379 | /* release it again */ | 
|  | 380 | extern void free_dma(unsigned int dmanr); | 
|  | 381 |  | 
|  | 382 | #ifdef CONFIG_PCI | 
|  | 383 | extern int isa_dma_bridge_buggy; | 
|  | 384 | #else | 
|  | 385 | #define isa_dma_bridge_buggy	(0) | 
|  | 386 | #endif | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 387 |  | 
|  | 388 | #endif	/* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */ | 
|  | 389 |  | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 390 | #endif /* __KERNEL__ */ | 
| Jon Loeliger | b8f114d | 2005-09-21 14:54:51 -0500 | [diff] [blame] | 391 | #endif	/* _ASM_POWERPC_DMA_H */ |