| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 3 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 4 | * for more details. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com> | 
|  | 7 | * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org> | 
|  | 8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | 
|  | 9 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/types.h> | 
|  | 11 | #include <linux/mm.h> | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/string.h> | 
|  | 14 | #include <linux/dma-mapping.h> | 
|  | 15 |  | 
|  | 16 | #include <asm/cache.h> | 
|  | 17 | #include <asm/io.h> | 
|  | 18 |  | 
|  | 19 | /* | 
|  | 20 | * Warning on the terminology - Linux calls an uncached area coherent; | 
|  | 21 | * MIPS terminology calls memory areas with hardware maintained coherency | 
|  | 22 | * coherent. | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 
| Al Viro | 185a8ff | 2005-10-21 03:21:23 -0400 | [diff] [blame] | 26 | dma_addr_t * dma_handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | { | 
|  | 28 | void *ret; | 
|  | 29 | /* ignore region specifiers */ | 
|  | 30 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | 
|  | 31 |  | 
|  | 32 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | 
|  | 33 | gfp |= GFP_DMA; | 
|  | 34 | ret = (void *) __get_free_pages(gfp, get_order(size)); | 
|  | 35 |  | 
|  | 36 | if (ret != NULL) { | 
|  | 37 | memset(ret, 0, size); | 
|  | 38 | *dma_handle = virt_to_phys(ret); | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 | return ret; | 
|  | 42 | } | 
|  | 43 |  | 
|  | 44 | EXPORT_SYMBOL(dma_alloc_noncoherent); | 
|  | 45 |  | 
|  | 46 | void *dma_alloc_coherent(struct device *dev, size_t size, | 
| Al Viro | 185a8ff | 2005-10-21 03:21:23 -0400 | [diff] [blame] | 47 | dma_addr_t * dma_handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { | 
|  | 49 | void *ret; | 
|  | 50 |  | 
|  | 51 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 
|  | 52 | if (ret) { | 
|  | 53 | dma_cache_wback_inv((unsigned long) ret, size); | 
|  | 54 | ret = UNCAC_ADDR(ret); | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | return ret; | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 61 |  | 
|  | 62 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | 
|  | 63 | dma_addr_t dma_handle) | 
|  | 64 | { | 
|  | 65 | free_pages((unsigned long) vaddr, get_order(size)); | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | EXPORT_SYMBOL(dma_free_noncoherent); | 
|  | 69 |  | 
|  | 70 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 
|  | 71 | dma_addr_t dma_handle) | 
|  | 72 | { | 
|  | 73 | unsigned long addr = (unsigned long) vaddr; | 
|  | 74 |  | 
|  | 75 | addr = CAC_ADDR(addr); | 
|  | 76 | free_pages(addr, get_order(size)); | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 80 |  | 
|  | 81 | static inline void __dma_sync(unsigned long addr, size_t size, | 
|  | 82 | enum dma_data_direction direction) | 
|  | 83 | { | 
|  | 84 | switch (direction) { | 
|  | 85 | case DMA_TO_DEVICE: | 
|  | 86 | dma_cache_wback(addr, size); | 
|  | 87 | break; | 
|  | 88 |  | 
|  | 89 | case DMA_FROM_DEVICE: | 
|  | 90 | dma_cache_inv(addr, size); | 
|  | 91 | break; | 
|  | 92 |  | 
|  | 93 | case DMA_BIDIRECTIONAL: | 
|  | 94 | dma_cache_wback_inv(addr, size); | 
|  | 95 | break; | 
|  | 96 |  | 
|  | 97 | default: | 
|  | 98 | BUG(); | 
|  | 99 | } | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 
|  | 103 | enum dma_data_direction direction) | 
|  | 104 | { | 
|  | 105 | unsigned long addr = (unsigned long) ptr; | 
|  | 106 |  | 
| Thiemo Seufer | 424cada | 2005-09-09 17:09:18 +0000 | [diff] [blame] | 107 | __dma_sync(addr, size, direction); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 |  | 
|  | 109 | return virt_to_phys(ptr); | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | EXPORT_SYMBOL(dma_map_single); | 
|  | 113 |  | 
|  | 114 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 
|  | 115 | enum dma_data_direction direction) | 
|  | 116 | { | 
|  | 117 | unsigned long addr; | 
|  | 118 | addr = dma_addr + PAGE_OFFSET; | 
|  | 119 |  | 
| Thiemo Seufer | 424cada | 2005-09-09 17:09:18 +0000 | [diff] [blame] | 120 | //__dma_sync(addr, size, direction); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | } | 
|  | 122 |  | 
|  | 123 | EXPORT_SYMBOL(dma_unmap_single); | 
|  | 124 |  | 
|  | 125 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 126 | enum dma_data_direction direction) | 
|  | 127 | { | 
|  | 128 | int i; | 
|  | 129 |  | 
|  | 130 | BUG_ON(direction == DMA_NONE); | 
|  | 131 |  | 
|  | 132 | for (i = 0; i < nents; i++, sg++) { | 
|  | 133 | unsigned long addr; | 
| Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 134 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | addr = (unsigned long) page_address(sg->page); | 
| Thiemo Seufer | 424cada | 2005-09-09 17:09:18 +0000 | [diff] [blame] | 136 | if (addr) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | __dma_sync(addr + sg->offset, sg->length, direction); | 
| Thiemo Seufer | 424cada | 2005-09-09 17:09:18 +0000 | [diff] [blame] | 138 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) | 
|  | 139 | + sg->offset; | 
|  | 140 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } | 
|  | 142 |  | 
|  | 143 | return nents; | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | EXPORT_SYMBOL(dma_map_sg); | 
|  | 147 |  | 
|  | 148 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
|  | 149 | unsigned long offset, size_t size, enum dma_data_direction direction) | 
|  | 150 | { | 
|  | 151 | unsigned long addr; | 
|  | 152 |  | 
|  | 153 | BUG_ON(direction == DMA_NONE); | 
|  | 154 |  | 
|  | 155 | addr = (unsigned long) page_address(page) + offset; | 
|  | 156 | dma_cache_wback_inv(addr, size); | 
|  | 157 |  | 
|  | 158 | return page_to_phys(page) + offset; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | EXPORT_SYMBOL(dma_map_page); | 
|  | 162 |  | 
|  | 163 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 
|  | 164 | enum dma_data_direction direction) | 
|  | 165 | { | 
|  | 166 | BUG_ON(direction == DMA_NONE); | 
|  | 167 |  | 
|  | 168 | if (direction != DMA_TO_DEVICE) { | 
|  | 169 | unsigned long addr; | 
|  | 170 |  | 
|  | 171 | addr = dma_address + PAGE_OFFSET; | 
|  | 172 | dma_cache_wback_inv(addr, size); | 
|  | 173 | } | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | EXPORT_SYMBOL(dma_unmap_page); | 
|  | 177 |  | 
|  | 178 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 
|  | 179 | enum dma_data_direction direction) | 
|  | 180 | { | 
|  | 181 | unsigned long addr; | 
|  | 182 | int i; | 
|  | 183 |  | 
|  | 184 | BUG_ON(direction == DMA_NONE); | 
|  | 185 |  | 
|  | 186 | if (direction == DMA_TO_DEVICE) | 
|  | 187 | return; | 
|  | 188 |  | 
|  | 189 | for (i = 0; i < nhwentries; i++, sg++) { | 
|  | 190 | addr = (unsigned long) page_address(sg->page); | 
| Thiemo Seufer | 424cada | 2005-09-09 17:09:18 +0000 | [diff] [blame] | 191 | if (addr) | 
|  | 192 | __dma_sync(addr + sg->offset, sg->length, direction); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | } | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | EXPORT_SYMBOL(dma_unmap_sg); | 
|  | 197 |  | 
|  | 198 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 199 | size_t size, enum dma_data_direction direction) | 
|  | 200 | { | 
|  | 201 | unsigned long addr; | 
| Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 202 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | BUG_ON(direction == DMA_NONE); | 
| Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 204 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | addr = dma_handle + PAGE_OFFSET; | 
|  | 206 | __dma_sync(addr, size, direction); | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 
|  | 210 |  | 
|  | 211 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 
|  | 212 | size_t size, enum dma_data_direction direction) | 
|  | 213 | { | 
|  | 214 | unsigned long addr; | 
|  | 215 |  | 
|  | 216 | BUG_ON(direction == DMA_NONE); | 
|  | 217 |  | 
|  | 218 | addr = dma_handle + PAGE_OFFSET; | 
|  | 219 | __dma_sync(addr, size, direction); | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | EXPORT_SYMBOL(dma_sync_single_for_device); | 
|  | 223 |  | 
|  | 224 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 225 | unsigned long offset, size_t size, enum dma_data_direction direction) | 
|  | 226 | { | 
|  | 227 | unsigned long addr; | 
|  | 228 |  | 
|  | 229 | BUG_ON(direction == DMA_NONE); | 
|  | 230 |  | 
|  | 231 | addr = dma_handle + offset + PAGE_OFFSET; | 
|  | 232 | __dma_sync(addr, size, direction); | 
|  | 233 | } | 
|  | 234 |  | 
|  | 235 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | 
|  | 236 |  | 
|  | 237 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | 
|  | 238 | unsigned long offset, size_t size, enum dma_data_direction direction) | 
|  | 239 | { | 
|  | 240 | unsigned long addr; | 
|  | 241 |  | 
|  | 242 | BUG_ON(direction == DMA_NONE); | 
|  | 243 |  | 
|  | 244 | addr = dma_handle + offset + PAGE_OFFSET; | 
|  | 245 | __dma_sync(addr, size, direction); | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | 
|  | 249 |  | 
|  | 250 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 
|  | 251 | enum dma_data_direction direction) | 
|  | 252 | { | 
|  | 253 | int i; | 
| Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 254 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | BUG_ON(direction == DMA_NONE); | 
| Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 256 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | /* Make sure that gcc doesn't leave the empty loop body.  */ | 
|  | 258 | for (i = 0; i < nelems; i++, sg++) | 
|  | 259 | __dma_sync((unsigned long)page_address(sg->page), | 
|  | 260 | sg->length, direction); | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 
|  | 264 |  | 
|  | 265 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | 
|  | 266 | enum dma_data_direction direction) | 
|  | 267 | { | 
|  | 268 | int i; | 
|  | 269 |  | 
|  | 270 | BUG_ON(direction == DMA_NONE); | 
|  | 271 |  | 
|  | 272 | /* Make sure that gcc doesn't leave the empty loop body.  */ | 
|  | 273 | for (i = 0; i < nelems; i++, sg++) | 
|  | 274 | __dma_sync((unsigned long)page_address(sg->page), | 
|  | 275 | sg->length, direction); | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 
|  | 279 |  | 
|  | 280 | int dma_mapping_error(dma_addr_t dma_addr) | 
|  | 281 | { | 
|  | 282 | return 0; | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | EXPORT_SYMBOL(dma_mapping_error); | 
|  | 286 |  | 
|  | 287 | int dma_supported(struct device *dev, u64 mask) | 
|  | 288 | { | 
|  | 289 | /* | 
|  | 290 | * we fall back to GFP_DMA when the mask isn't all 1s, | 
|  | 291 | * so we can't guarantee allocations that must be | 
|  | 292 | * within a tighter range than GFP_DMA.. | 
|  | 293 | */ | 
|  | 294 | if (mask < 0x00ffffff) | 
|  | 295 | return 0; | 
|  | 296 |  | 
|  | 297 | return 1; | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | EXPORT_SYMBOL(dma_supported); | 
|  | 301 |  | 
|  | 302 | int dma_is_consistent(dma_addr_t dma_addr) | 
|  | 303 | { | 
|  | 304 | return 1; | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | EXPORT_SYMBOL(dma_is_consistent); | 
|  | 308 |  | 
|  | 309 | void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) | 
|  | 310 | { | 
|  | 311 | if (direction == DMA_NONE) | 
|  | 312 | return; | 
|  | 313 |  | 
|  | 314 | dma_cache_wback_inv((unsigned long)vaddr, size); | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | EXPORT_SYMBOL(dma_cache_sync); | 
|  | 318 |  | 
|  | 319 | /* The DAC routines are a PCIism.. */ | 
|  | 320 |  | 
|  | 321 | #ifdef CONFIG_PCI | 
|  | 322 |  | 
|  | 323 | #include <linux/pci.h> | 
|  | 324 |  | 
|  | 325 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | 
|  | 326 | struct page *page, unsigned long offset, int direction) | 
|  | 327 | { | 
|  | 328 | return (dma64_addr_t)page_to_phys(page) + offset; | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | EXPORT_SYMBOL(pci_dac_page_to_dma); | 
|  | 332 |  | 
|  | 333 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | 
|  | 334 | dma64_addr_t dma_addr) | 
|  | 335 | { | 
|  | 336 | return mem_map + (dma_addr >> PAGE_SHIFT); | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | EXPORT_SYMBOL(pci_dac_dma_to_page); | 
|  | 340 |  | 
|  | 341 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | 
|  | 342 | dma64_addr_t dma_addr) | 
|  | 343 | { | 
|  | 344 | return dma_addr & ~PAGE_MASK; | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | 
|  | 348 |  | 
|  | 349 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | 
|  | 350 | dma64_addr_t dma_addr, size_t len, int direction) | 
|  | 351 | { | 
|  | 352 | BUG_ON(direction == PCI_DMA_NONE); | 
|  | 353 |  | 
|  | 354 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | 
|  | 358 |  | 
|  | 359 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | 
|  | 360 | dma64_addr_t dma_addr, size_t len, int direction) | 
|  | 361 | { | 
|  | 362 | BUG_ON(direction == PCI_DMA_NONE); | 
|  | 363 |  | 
|  | 364 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | 
|  | 365 | } | 
|  | 366 |  | 
|  | 367 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | 
|  | 368 |  | 
|  | 369 | #endif /* CONFIG_PCI */ |