| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 3 | * License.  See the file COPYING in the main directory of this archive | 
|  | 4 | * for more details. | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #undef DEBUG | 
|  | 8 |  | 
|  | 9 | #include <linux/dma-mapping.h> | 
|  | 10 | #include <linux/device.h> | 
|  | 11 | #include <linux/kernel.h> | 
| Geert Uytterhoeven | 5a1cb47 | 2007-10-24 08:55:40 +0200 | [diff] [blame] | 12 | #include <linux/scatterlist.h> | 
| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 13 | #include <linux/vmalloc.h> | 
|  | 14 |  | 
|  | 15 | #include <asm/pgalloc.h> | 
| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 16 |  | 
|  | 17 | void *dma_alloc_coherent(struct device *dev, size_t size, | 
| Al Viro | dc36670 | 2006-10-06 00:43:58 -0700 | [diff] [blame] | 18 | dma_addr_t *handle, gfp_t flag) | 
| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 19 | { | 
|  | 20 | struct page *page, **map; | 
|  | 21 | pgprot_t pgprot; | 
|  | 22 | void *addr; | 
|  | 23 | int i, order; | 
|  | 24 |  | 
|  | 25 | pr_debug("dma_alloc_coherent: %d,%x\n", size, flag); | 
|  | 26 |  | 
|  | 27 | size = PAGE_ALIGN(size); | 
|  | 28 | order = get_order(size); | 
|  | 29 |  | 
|  | 30 | page = alloc_pages(flag, order); | 
|  | 31 | if (!page) | 
|  | 32 | return NULL; | 
|  | 33 |  | 
|  | 34 | *handle = page_to_phys(page); | 
|  | 35 | map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); | 
|  | 36 | if (!map) { | 
|  | 37 | __free_pages(page, order); | 
|  | 38 | return NULL; | 
|  | 39 | } | 
|  | 40 | split_page(page, order); | 
|  | 41 |  | 
|  | 42 | order = 1 << order; | 
|  | 43 | size >>= PAGE_SHIFT; | 
|  | 44 | map[0] = page; | 
|  | 45 | for (i = 1; i < size; i++) | 
|  | 46 | map[i] = page + i; | 
|  | 47 | for (; i < order; i++) | 
|  | 48 | __free_page(page + i); | 
|  | 49 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | 
|  | 50 | if (CPU_IS_040_OR_060) | 
|  | 51 | pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; | 
|  | 52 | else | 
|  | 53 | pgprot_val(pgprot) |= _PAGE_NOCACHE030; | 
| Al Viro | dc36670 | 2006-10-06 00:43:58 -0700 | [diff] [blame] | 54 | addr = vmap(map, size, VM_MAP, pgprot); | 
| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 55 | kfree(map); | 
|  | 56 |  | 
|  | 57 | return addr; | 
|  | 58 | } | 
|  | 59 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 60 |  | 
|  | 61 | void dma_free_coherent(struct device *dev, size_t size, | 
|  | 62 | void *addr, dma_addr_t handle) | 
|  | 63 | { | 
|  | 64 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); | 
|  | 65 | vfree(addr); | 
|  | 66 | } | 
|  | 67 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 68 |  | 
| Geert Uytterhoeven | dec6d14 | 2008-10-13 21:58:56 +0200 | [diff] [blame] | 69 | void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | 
|  | 70 | size_t size, enum dma_data_direction dir) | 
| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 71 | { | 
|  | 72 | switch (dir) { | 
|  | 73 | case DMA_TO_DEVICE: | 
|  | 74 | cache_push(handle, size); | 
|  | 75 | break; | 
|  | 76 | case DMA_FROM_DEVICE: | 
|  | 77 | cache_clear(handle, size); | 
|  | 78 | break; | 
|  | 79 | default: | 
|  | 80 | if (printk_ratelimit()) | 
|  | 81 | printk("dma_sync_single_for_device: unsupported dir %u\n", dir); | 
|  | 82 | break; | 
|  | 83 | } | 
|  | 84 | } | 
|  | 85 | EXPORT_SYMBOL(dma_sync_single_for_device); | 
|  | 86 |  | 
|  | 87 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 88 | enum dma_data_direction dir) | 
|  | 89 | { | 
|  | 90 | int i; | 
|  | 91 |  | 
|  | 92 | for (i = 0; i < nents; sg++, i++) | 
|  | 93 | dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); | 
|  | 94 | } | 
|  | 95 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 
|  | 96 |  | 
|  | 97 | dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, | 
|  | 98 | enum dma_data_direction dir) | 
|  | 99 | { | 
|  | 100 | dma_addr_t handle = virt_to_bus(addr); | 
|  | 101 |  | 
|  | 102 | dma_sync_single_for_device(dev, handle, size, dir); | 
|  | 103 | return handle; | 
|  | 104 | } | 
|  | 105 | EXPORT_SYMBOL(dma_map_single); | 
|  | 106 |  | 
|  | 107 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
|  | 108 | unsigned long offset, size_t size, | 
|  | 109 | enum dma_data_direction dir) | 
|  | 110 | { | 
|  | 111 | dma_addr_t handle = page_to_phys(page) + offset; | 
|  | 112 |  | 
|  | 113 | dma_sync_single_for_device(dev, handle, size, dir); | 
|  | 114 | return handle; | 
|  | 115 | } | 
|  | 116 | EXPORT_SYMBOL(dma_map_page); | 
|  | 117 |  | 
|  | 118 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 119 | enum dma_data_direction dir) | 
|  | 120 | { | 
|  | 121 | int i; | 
|  | 122 |  | 
|  | 123 | for (i = 0; i < nents; sg++, i++) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 124 | sg->dma_address = sg_phys(sg); | 
| Roman Zippel | b035c96 | 2006-06-25 05:46:56 -0700 | [diff] [blame] | 125 | dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); | 
|  | 126 | } | 
|  | 127 | return nents; | 
|  | 128 | } | 
|  | 129 | EXPORT_SYMBOL(dma_map_sg); |