| Greg Ungerer | 66d857b | 2011-03-22 13:39:27 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 3 | * License.  See the file COPYING in the main directory of this archive | 
|  | 4 | * for more details. | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #undef DEBUG | 
|  | 8 |  | 
|  | 9 | #include <linux/dma-mapping.h> | 
|  | 10 | #include <linux/device.h> | 
|  | 11 | #include <linux/kernel.h> | 
|  | 12 | #include <linux/scatterlist.h> | 
|  | 13 | #include <linux/slab.h> | 
|  | 14 | #include <linux/vmalloc.h> | 
|  | 15 |  | 
|  | 16 | #include <asm/pgalloc.h> | 
|  | 17 |  | 
|  | 18 | void *dma_alloc_coherent(struct device *dev, size_t size, | 
|  | 19 | dma_addr_t *handle, gfp_t flag) | 
|  | 20 | { | 
|  | 21 | struct page *page, **map; | 
|  | 22 | pgprot_t pgprot; | 
|  | 23 | void *addr; | 
|  | 24 | int i, order; | 
|  | 25 |  | 
|  | 26 | pr_debug("dma_alloc_coherent: %d,%x\n", size, flag); | 
|  | 27 |  | 
|  | 28 | size = PAGE_ALIGN(size); | 
|  | 29 | order = get_order(size); | 
|  | 30 |  | 
|  | 31 | page = alloc_pages(flag, order); | 
|  | 32 | if (!page) | 
|  | 33 | return NULL; | 
|  | 34 |  | 
|  | 35 | *handle = page_to_phys(page); | 
|  | 36 | map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); | 
|  | 37 | if (!map) { | 
|  | 38 | __free_pages(page, order); | 
|  | 39 | return NULL; | 
|  | 40 | } | 
|  | 41 | split_page(page, order); | 
|  | 42 |  | 
|  | 43 | order = 1 << order; | 
|  | 44 | size >>= PAGE_SHIFT; | 
|  | 45 | map[0] = page; | 
|  | 46 | for (i = 1; i < size; i++) | 
|  | 47 | map[i] = page + i; | 
|  | 48 | for (; i < order; i++) | 
|  | 49 | __free_page(page + i); | 
|  | 50 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | 
|  | 51 | if (CPU_IS_040_OR_060) | 
|  | 52 | pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; | 
|  | 53 | else | 
|  | 54 | pgprot_val(pgprot) |= _PAGE_NOCACHE030; | 
|  | 55 | addr = vmap(map, size, VM_MAP, pgprot); | 
|  | 56 | kfree(map); | 
|  | 57 |  | 
|  | 58 | return addr; | 
|  | 59 | } | 
|  | 60 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 61 |  | 
|  | 62 | void dma_free_coherent(struct device *dev, size_t size, | 
|  | 63 | void *addr, dma_addr_t handle) | 
|  | 64 | { | 
|  | 65 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); | 
|  | 66 | vfree(addr); | 
|  | 67 | } | 
|  | 68 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 69 |  | 
|  | 70 | void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | 
|  | 71 | size_t size, enum dma_data_direction dir) | 
|  | 72 | { | 
|  | 73 | switch (dir) { | 
|  | 74 | case DMA_TO_DEVICE: | 
|  | 75 | cache_push(handle, size); | 
|  | 76 | break; | 
|  | 77 | case DMA_FROM_DEVICE: | 
|  | 78 | cache_clear(handle, size); | 
|  | 79 | break; | 
|  | 80 | default: | 
|  | 81 | if (printk_ratelimit()) | 
|  | 82 | printk("dma_sync_single_for_device: unsupported dir %u\n", dir); | 
|  | 83 | break; | 
|  | 84 | } | 
|  | 85 | } | 
|  | 86 | EXPORT_SYMBOL(dma_sync_single_for_device); | 
|  | 87 |  | 
|  | 88 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 89 | enum dma_data_direction dir) | 
|  | 90 | { | 
|  | 91 | int i; | 
|  | 92 |  | 
|  | 93 | for (i = 0; i < nents; sg++, i++) | 
|  | 94 | dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); | 
|  | 95 | } | 
|  | 96 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 
|  | 97 |  | 
|  | 98 | dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, | 
|  | 99 | enum dma_data_direction dir) | 
|  | 100 | { | 
|  | 101 | dma_addr_t handle = virt_to_bus(addr); | 
|  | 102 |  | 
|  | 103 | dma_sync_single_for_device(dev, handle, size, dir); | 
|  | 104 | return handle; | 
|  | 105 | } | 
|  | 106 | EXPORT_SYMBOL(dma_map_single); | 
|  | 107 |  | 
|  | 108 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
|  | 109 | unsigned long offset, size_t size, | 
|  | 110 | enum dma_data_direction dir) | 
|  | 111 | { | 
|  | 112 | dma_addr_t handle = page_to_phys(page) + offset; | 
|  | 113 |  | 
|  | 114 | dma_sync_single_for_device(dev, handle, size, dir); | 
|  | 115 | return handle; | 
|  | 116 | } | 
|  | 117 | EXPORT_SYMBOL(dma_map_page); | 
|  | 118 |  | 
|  | 119 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 120 | enum dma_data_direction dir) | 
|  | 121 | { | 
|  | 122 | int i; | 
|  | 123 |  | 
|  | 124 | for (i = 0; i < nents; sg++, i++) { | 
|  | 125 | sg->dma_address = sg_phys(sg); | 
|  | 126 | dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); | 
|  | 127 | } | 
|  | 128 | return nents; | 
|  | 129 | } | 
|  | 130 | EXPORT_SYMBOL(dma_map_sg); |