| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | 
|  | 3 | * | 
|  | 4 | *   This program is free software; you can redistribute it and/or | 
|  | 5 | *   modify it under the terms of the GNU General Public License | 
|  | 6 | *   as published by the Free Software Foundation, version 2. | 
|  | 7 | * | 
|  | 8 | *   This program is distributed in the hope that it will be useful, but | 
|  | 9 | *   WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 
|  | 11 | *   NON INFRINGEMENT.  See the GNU General Public License for | 
|  | 12 | *   more details. | 
|  | 13 | */ | 
|  | 14 |  | 
|  | 15 | #include <linux/mm.h> | 
|  | 16 | #include <linux/dma-mapping.h> | 
|  | 17 | #include <linux/vmalloc.h> | 
| Chris Metcalf | 3989efb | 2011-12-01 11:37:20 -0500 | [diff] [blame] | 18 | #include <linux/export.h> | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 19 | #include <asm/tlbflush.h> | 
|  | 20 | #include <asm/homecache.h> | 
|  | 21 |  | 
|  | 22 | /* Generic DMA mapping functions: */ | 
|  | 23 |  | 
|  | 24 | /* | 
|  | 25 | * Allocate what Linux calls "coherent" memory, which for us just | 
|  | 26 | * means uncached. | 
|  | 27 | */ | 
|  | 28 | void *dma_alloc_coherent(struct device *dev, | 
|  | 29 | size_t size, | 
|  | 30 | dma_addr_t *dma_handle, | 
|  | 31 | gfp_t gfp) | 
|  | 32 | { | 
|  | 33 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | 
|  | 34 | int node = dev_to_node(dev); | 
|  | 35 | int order = get_order(size); | 
|  | 36 | struct page *pg; | 
|  | 37 | dma_addr_t addr; | 
|  | 38 |  | 
| Chris Metcalf | 482e6f8 | 2010-06-05 09:05:47 -0400 | [diff] [blame] | 39 | gfp |= __GFP_ZERO; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 40 |  | 
|  | 41 | /* | 
|  | 42 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | 
|  | 43 | * high 32 bits of the resulting PA will be zero.  If the mask | 
|  | 44 | * size is, e.g., 24, we may still not be able to guarantee a | 
|  | 45 | * suitable memory address, in which case we will return NULL. | 
|  | 46 | * But such devices are uncommon. | 
|  | 47 | */ | 
|  | 48 | if (dma_mask <= DMA_BIT_MASK(32)) | 
|  | 49 | node = 0; | 
|  | 50 |  | 
|  | 51 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | 
|  | 52 | if (pg == NULL) | 
|  | 53 | return NULL; | 
|  | 54 |  | 
|  | 55 | addr = page_to_phys(pg); | 
|  | 56 | if (addr + size > dma_mask) { | 
|  | 57 | homecache_free_pages(addr, order); | 
|  | 58 | return NULL; | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | *dma_handle = addr; | 
|  | 62 | return page_address(pg); | 
|  | 63 | } | 
|  | 64 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 65 |  | 
|  | 66 | /* | 
|  | 67 | * Free memory that was allocated with dma_alloc_coherent. | 
|  | 68 | */ | 
|  | 69 | void dma_free_coherent(struct device *dev, size_t size, | 
|  | 70 | void *vaddr, dma_addr_t dma_handle) | 
|  | 71 | { | 
|  | 72 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 
|  | 73 | } | 
|  | 74 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 75 |  | 
|  | 76 | /* | 
|  | 77 | * The map routines "map" the specified address range for DMA | 
|  | 78 | * accesses.  The memory belongs to the device after this call is | 
|  | 79 | * issued, until it is unmapped with dma_unmap_single. | 
|  | 80 | * | 
|  | 81 | * We don't need to do any mapping, we just flush the address range | 
|  | 82 | * out of the cache and return a DMA address. | 
|  | 83 | * | 
|  | 84 | * The unmap routines do whatever is necessary before the processor | 
|  | 85 | * accesses the memory again, and must be called before the driver | 
|  | 86 | * touches the memory.  We can get away with a cache invalidate if we | 
|  | 87 | * can count on nothing having been touched. | 
|  | 88 | */ | 
|  | 89 |  | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 90 | /* Flush a PA range from cache page by page. */ | 
|  | 91 | static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) | 
|  | 92 | { | 
|  | 93 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); | 
|  | 94 | size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); | 
|  | 95 |  | 
|  | 96 | while ((ssize_t)size > 0) { | 
|  | 97 | /* Flush the page. */ | 
|  | 98 | homecache_flush_cache(page++, 0); | 
|  | 99 |  | 
|  | 100 | /* Figure out if we need to continue on the next page. */ | 
|  | 101 | size -= bytesleft; | 
|  | 102 | bytesleft = PAGE_SIZE; | 
|  | 103 | } | 
|  | 104 | } | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 105 |  | 
|  | 106 | /* | 
|  | 107 | * dma_map_single can be passed any memory address, and there appear | 
|  | 108 | * to be no alignment constraints. | 
|  | 109 | * | 
|  | 110 | * There is a chance that the start of the buffer will share a cache | 
|  | 111 | * line with some other data that has been touched in the meantime. | 
|  | 112 | */ | 
|  | 113 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 
|  | 114 | enum dma_data_direction direction) | 
|  | 115 | { | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 116 | dma_addr_t dma_addr = __pa(ptr); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 117 |  | 
|  | 118 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 119 | WARN_ON(size == 0); | 
|  | 120 |  | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 121 | __dma_map_pa_range(dma_addr, size); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 122 |  | 
|  | 123 | return dma_addr; | 
|  | 124 | } | 
|  | 125 | EXPORT_SYMBOL(dma_map_single); | 
|  | 126 |  | 
|  | 127 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 
|  | 128 | enum dma_data_direction direction) | 
|  | 129 | { | 
|  | 130 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 131 | } | 
|  | 132 | EXPORT_SYMBOL(dma_unmap_single); | 
|  | 133 |  | 
|  | 134 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 
|  | 135 | enum dma_data_direction direction) | 
|  | 136 | { | 
|  | 137 | struct scatterlist *sg; | 
|  | 138 | int i; | 
|  | 139 |  | 
|  | 140 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 141 |  | 
|  | 142 | WARN_ON(nents == 0 || sglist->length == 0); | 
|  | 143 |  | 
|  | 144 | for_each_sg(sglist, sg, nents, i) { | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 145 | sg->dma_address = sg_phys(sg); | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 146 | __dma_map_pa_range(sg->dma_address, sg->length); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 147 | } | 
|  | 148 |  | 
|  | 149 | return nents; | 
|  | 150 | } | 
|  | 151 | EXPORT_SYMBOL(dma_map_sg); | 
|  | 152 |  | 
|  | 153 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 
|  | 154 | enum dma_data_direction direction) | 
|  | 155 | { | 
|  | 156 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 157 | } | 
|  | 158 | EXPORT_SYMBOL(dma_unmap_sg); | 
|  | 159 |  | 
|  | 160 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
|  | 161 | unsigned long offset, size_t size, | 
|  | 162 | enum dma_data_direction direction) | 
|  | 163 | { | 
|  | 164 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 165 |  | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 166 | BUG_ON(offset + size > PAGE_SIZE); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 167 | homecache_flush_cache(page, 0); | 
|  | 168 |  | 
|  | 169 | return page_to_pa(page) + offset; | 
|  | 170 | } | 
|  | 171 | EXPORT_SYMBOL(dma_map_page); | 
|  | 172 |  | 
|  | 173 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 
|  | 174 | enum dma_data_direction direction) | 
|  | 175 | { | 
|  | 176 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 177 | } | 
|  | 178 | EXPORT_SYMBOL(dma_unmap_page); | 
|  | 179 |  | 
|  | 180 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 181 | size_t size, enum dma_data_direction direction) | 
|  | 182 | { | 
|  | 183 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 184 | } | 
|  | 185 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 
|  | 186 |  | 
|  | 187 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 
|  | 188 | size_t size, enum dma_data_direction direction) | 
|  | 189 | { | 
|  | 190 | unsigned long start = PFN_DOWN(dma_handle); | 
|  | 191 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | 
|  | 192 | unsigned long i; | 
|  | 193 |  | 
|  | 194 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 195 | for (i = start; i <= end; ++i) | 
|  | 196 | homecache_flush_cache(pfn_to_page(i), 0); | 
|  | 197 | } | 
|  | 198 | EXPORT_SYMBOL(dma_sync_single_for_device); | 
|  | 199 |  | 
|  | 200 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 
|  | 201 | enum dma_data_direction direction) | 
|  | 202 | { | 
|  | 203 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 204 | WARN_ON(nelems == 0 || sg[0].length == 0); | 
|  | 205 | } | 
|  | 206 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 
|  | 207 |  | 
|  | 208 | /* | 
|  | 209 | * Flush and invalidate cache for scatterlist. | 
|  | 210 | */ | 
|  | 211 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 
|  | 212 | int nelems, enum dma_data_direction direction) | 
|  | 213 | { | 
|  | 214 | struct scatterlist *sg; | 
|  | 215 | int i; | 
|  | 216 |  | 
|  | 217 | BUG_ON(!valid_dma_direction(direction)); | 
|  | 218 | WARN_ON(nelems == 0 || sglist->length == 0); | 
|  | 219 |  | 
|  | 220 | for_each_sg(sglist, sg, nelems, i) { | 
|  | 221 | dma_sync_single_for_device(dev, sg->dma_address, | 
|  | 222 | sg_dma_len(sg), direction); | 
|  | 223 | } | 
|  | 224 | } | 
|  | 225 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 
|  | 226 |  | 
|  | 227 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 228 | unsigned long offset, size_t size, | 
|  | 229 | enum dma_data_direction direction) | 
|  | 230 | { | 
|  | 231 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | 
|  | 232 | } | 
|  | 233 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | 
|  | 234 |  | 
|  | 235 | void dma_sync_single_range_for_device(struct device *dev, | 
|  | 236 | dma_addr_t dma_handle, | 
|  | 237 | unsigned long offset, size_t size, | 
|  | 238 | enum dma_data_direction direction) | 
|  | 239 | { | 
|  | 240 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | 
|  | 241 | } | 
|  | 242 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | 
|  | 243 |  | 
|  | 244 | /* | 
|  | 245 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | 
|  | 246 | * need to do any flushing here. | 
|  | 247 | */ | 
| James Hogan | ef0aaf8 | 2011-04-04 16:21:47 +0100 | [diff] [blame] | 248 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 249 | enum dma_data_direction direction) | 
|  | 250 | { | 
|  | 251 | } | 
|  | 252 | EXPORT_SYMBOL(dma_cache_sync); |