| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | 
|  | 3 | * | 
|  | 4 | *   This program is free software; you can redistribute it and/or | 
|  | 5 | *   modify it under the terms of the GNU General Public License | 
|  | 6 | *   as published by the Free Software Foundation, version 2. | 
|  | 7 | * | 
|  | 8 | *   This program is distributed in the hope that it will be useful, but | 
|  | 9 | *   WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 
|  | 11 | *   NON INFRINGEMENT.  See the GNU General Public License for | 
|  | 12 | *   more details. | 
|  | 13 | */ | 
|  | 14 |  | 
|  | 15 | #include <linux/mm.h> | 
|  | 16 | #include <linux/dma-mapping.h> | 
|  | 17 | #include <linux/vmalloc.h> | 
|  | 18 | #include <asm/tlbflush.h> | 
|  | 19 | #include <asm/homecache.h> | 
|  | 20 |  | 
|  | 21 | /* Generic DMA mapping functions: */ | 
|  | 22 |  | 
|  | 23 | /* | 
|  | 24 | * Allocate what Linux calls "coherent" memory, which for us just | 
|  | 25 | * means uncached. | 
|  | 26 | */ | 
|  | 27 | void *dma_alloc_coherent(struct device *dev, | 
|  | 28 | size_t size, | 
|  | 29 | dma_addr_t *dma_handle, | 
|  | 30 | gfp_t gfp) | 
|  | 31 | { | 
|  | 32 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | 
|  | 33 | int node = dev_to_node(dev); | 
|  | 34 | int order = get_order(size); | 
|  | 35 | struct page *pg; | 
|  | 36 | dma_addr_t addr; | 
|  | 37 |  | 
| Chris Metcalf | 482e6f8 | 2010-06-05 09:05:47 -0400 | [diff] [blame] | 38 | gfp |= __GFP_ZERO; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 39 |  | 
|  | 40 | /* | 
|  | 41 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | 
|  | 42 | * high 32 bits of the resulting PA will be zero.  If the mask | 
|  | 43 | * size is, e.g., 24, we may still not be able to guarantee a | 
|  | 44 | * suitable memory address, in which case we will return NULL. | 
|  | 45 | * But such devices are uncommon. | 
|  | 46 | */ | 
|  | 47 | if (dma_mask <= DMA_BIT_MASK(32)) | 
|  | 48 | node = 0; | 
|  | 49 |  | 
|  | 50 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | 
|  | 51 | if (pg == NULL) | 
|  | 52 | return NULL; | 
|  | 53 |  | 
|  | 54 | addr = page_to_phys(pg); | 
|  | 55 | if (addr + size > dma_mask) { | 
|  | 56 | homecache_free_pages(addr, order); | 
|  | 57 | return NULL; | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | *dma_handle = addr; | 
|  | 61 | return page_address(pg); | 
|  | 62 | } | 
|  | 63 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 64 |  | 
|  | 65 | /* | 
|  | 66 | * Free memory that was allocated with dma_alloc_coherent. | 
|  | 67 | */ | 
|  | 68 | void dma_free_coherent(struct device *dev, size_t size, | 
|  | 69 | void *vaddr, dma_addr_t dma_handle) | 
|  | 70 | { | 
|  | 71 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 
|  | 72 | } | 
|  | 73 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * The map routines "map" the specified address range for DMA | 
|  | 77 | * accesses.  The memory belongs to the device after this call is | 
|  | 78 | * issued, until it is unmapped with dma_unmap_single. | 
|  | 79 | * | 
|  | 80 | * We don't need to do any mapping, we just flush the address range | 
|  | 81 | * out of the cache and return a DMA address. | 
|  | 82 | * | 
|  | 83 | * The unmap routines do whatever is necessary before the processor | 
|  | 84 | * accesses the memory again, and must be called before the driver | 
|  | 85 | * touches the memory.  We can get away with a cache invalidate if we | 
|  | 86 | * can count on nothing having been touched. | 
|  | 87 | */ | 
|  | 88 |  | 
|  | 89 |  | 
|  | 90 | /* | 
|  | 91 | * dma_map_single can be passed any memory address, and there appear | 
|  | 92 | * to be no alignment constraints. | 
|  | 93 | * | 
|  | 94 | * There is a chance that the start of the buffer will share a cache | 
|  | 95 | * line with some other data that has been touched in the meantime. | 
|  | 96 | */ | 
|  | 97 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 
|  | 98 | enum dma_data_direction direction) | 
|  | 99 | { | 
|  | 100 | struct page *page; | 
|  | 101 | dma_addr_t dma_addr; | 
|  | 102 | int thispage; | 
|  | 103 |  | 
|  | 104 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 105 | WARN_ON(size == 0); | 
|  | 106 |  | 
|  | 107 | dma_addr = __pa(ptr); | 
|  | 108 |  | 
|  | 109 | /* We might have been handed a buffer that wraps a page boundary */ | 
|  | 110 | while ((int)size > 0) { | 
|  | 111 | /* The amount to flush that's on this page */ | 
|  | 112 | thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1)); | 
|  | 113 | thispage = min((int)thispage, (int)size); | 
|  | 114 | /* Is this valid for any page we could be handed? */ | 
|  | 115 | page = pfn_to_page(kaddr_to_pfn(ptr)); | 
|  | 116 | homecache_flush_cache(page, 0); | 
|  | 117 | ptr += thispage; | 
|  | 118 | size -= thispage; | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | return dma_addr; | 
|  | 122 | } | 
|  | 123 | EXPORT_SYMBOL(dma_map_single); | 
|  | 124 |  | 
|  | 125 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 
|  | 126 | enum dma_data_direction direction) | 
|  | 127 | { | 
|  | 128 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 129 | } | 
|  | 130 | EXPORT_SYMBOL(dma_unmap_single); | 
|  | 131 |  | 
|  | 132 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 
|  | 133 | enum dma_data_direction direction) | 
|  | 134 | { | 
|  | 135 | struct scatterlist *sg; | 
|  | 136 | int i; | 
|  | 137 |  | 
|  | 138 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 139 |  | 
|  | 140 | WARN_ON(nents == 0 || sglist->length == 0); | 
|  | 141 |  | 
|  | 142 | for_each_sg(sglist, sg, nents, i) { | 
|  | 143 | struct page *page; | 
|  | 144 | sg->dma_address = sg_phys(sg); | 
|  | 145 | page = pfn_to_page(sg->dma_address >> PAGE_SHIFT); | 
|  | 146 | homecache_flush_cache(page, 0); | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | return nents; | 
|  | 150 | } | 
|  | 151 | EXPORT_SYMBOL(dma_map_sg); | 
|  | 152 |  | 
|  | 153 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 
|  | 154 | enum dma_data_direction direction) | 
|  | 155 | { | 
|  | 156 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 157 | } | 
|  | 158 | EXPORT_SYMBOL(dma_unmap_sg); | 
|  | 159 |  | 
|  | 160 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
|  | 161 | unsigned long offset, size_t size, | 
|  | 162 | enum dma_data_direction direction) | 
|  | 163 | { | 
|  | 164 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 165 |  | 
|  | 166 | homecache_flush_cache(page, 0); | 
|  | 167 |  | 
|  | 168 | return page_to_pa(page) + offset; | 
|  | 169 | } | 
|  | 170 | EXPORT_SYMBOL(dma_map_page); | 
|  | 171 |  | 
|  | 172 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 
|  | 173 | enum dma_data_direction direction) | 
|  | 174 | { | 
|  | 175 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 176 | } | 
|  | 177 | EXPORT_SYMBOL(dma_unmap_page); | 
|  | 178 |  | 
|  | 179 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 180 | size_t size, enum dma_data_direction direction) | 
|  | 181 | { | 
|  | 182 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 183 | } | 
|  | 184 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 
|  | 185 |  | 
|  | 186 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 
|  | 187 | size_t size, enum dma_data_direction direction) | 
|  | 188 | { | 
|  | 189 | unsigned long start = PFN_DOWN(dma_handle); | 
|  | 190 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | 
|  | 191 | unsigned long i; | 
|  | 192 |  | 
|  | 193 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 194 | for (i = start; i <= end; ++i) | 
|  | 195 | homecache_flush_cache(pfn_to_page(i), 0); | 
|  | 196 | } | 
|  | 197 | EXPORT_SYMBOL(dma_sync_single_for_device); | 
|  | 198 |  | 
|  | 199 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 
|  | 200 | enum dma_data_direction direction) | 
|  | 201 | { | 
|  | 202 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 203 | WARN_ON(nelems == 0 || sg[0].length == 0); | 
|  | 204 | } | 
|  | 205 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 
|  | 206 |  | 
|  | 207 | /* | 
|  | 208 | * Flush and invalidate cache for scatterlist. | 
|  | 209 | */ | 
|  | 210 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 
|  | 211 | int nelems, enum dma_data_direction direction) | 
|  | 212 | { | 
|  | 213 | struct scatterlist *sg; | 
|  | 214 | int i; | 
|  | 215 |  | 
|  | 216 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 217 | WARN_ON(nelems == 0 || sglist->length == 0); | 
|  | 218 |  | 
|  | 219 | for_each_sg(sglist, sg, nelems, i) { | 
|  | 220 | dma_sync_single_for_device(dev, sg->dma_address, | 
|  | 221 | sg_dma_len(sg), direction); | 
|  | 222 | } | 
|  | 223 | } | 
|  | 224 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 
|  | 225 |  | 
|  | 226 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 227 | unsigned long offset, size_t size, | 
|  | 228 | enum dma_data_direction direction) | 
|  | 229 | { | 
|  | 230 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | 
|  | 231 | } | 
|  | 232 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | 
|  | 233 |  | 
|  | 234 | void dma_sync_single_range_for_device(struct device *dev, | 
|  | 235 | dma_addr_t dma_handle, | 
|  | 236 | unsigned long offset, size_t size, | 
|  | 237 | enum dma_data_direction direction) | 
|  | 238 | { | 
|  | 239 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | 
|  | 240 | } | 
|  | 241 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | 
|  | 242 |  | 
|  | 243 | /* | 
|  | 244 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | 
|  | 245 | * need to do any flushing here. | 
|  | 246 | */ | 
|  | 247 | void dma_cache_sync(void *vaddr, size_t size, | 
|  | 248 | enum dma_data_direction direction) | 
|  | 249 | { | 
|  | 250 | } | 
|  | 251 | EXPORT_SYMBOL(dma_cache_sync); |