| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | #ifndef __ASM_AVR32_DMA_MAPPING_H | 
 | 2 | #define __ASM_AVR32_DMA_MAPPING_H | 
 | 3 |  | 
 | 4 | #include <linux/mm.h> | 
 | 5 | #include <linux/device.h> | 
 | 6 | #include <asm/scatterlist.h> | 
 | 7 | #include <asm/processor.h> | 
 | 8 | #include <asm/cacheflush.h> | 
 | 9 | #include <asm/io.h> | 
 | 10 |  | 
| Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 11 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 
 | 12 | 	int direction); | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 13 |  | 
 | 14 | /* | 
 | 15 |  * Return whether the given device DMA address mask can be supported | 
 | 16 |  * properly.  For example, if your device can only drive the low 24-bits | 
 | 17 |  * during bus mastering, then you would pass 0x00ffffff as the mask | 
 | 18 |  * to this function. | 
 | 19 |  */ | 
 | 20 | static inline int dma_supported(struct device *dev, u64 mask) | 
 | 21 | { | 
 | 22 | 	/* Fix when needed. I really don't know of any limitations */ | 
 | 23 | 	return 1; | 
 | 24 | } | 
 | 25 |  | 
 | 26 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 
 | 27 | { | 
 | 28 | 	if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 
 | 29 | 		return -EIO; | 
 | 30 |  | 
 | 31 | 	*dev->dma_mask = dma_mask; | 
 | 32 | 	return 0; | 
 | 33 | } | 
 | 34 |  | 
 | 35 | /** | 
 | 36 |  * dma_alloc_coherent - allocate consistent memory for DMA | 
 | 37 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 38 |  * @size: required memory size | 
 | 39 |  * @handle: bus-specific DMA address | 
 | 40 |  * | 
 | 41 |  * Allocate some uncached, unbuffered memory for a device for | 
 | 42 |  * performing DMA.  This function allocates pages, and will | 
 | 43 |  * return the CPU-viewed address, and sets @handle to be the | 
 | 44 |  * device-viewed address. | 
 | 45 |  */ | 
 | 46 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | 
 | 47 | 				dma_addr_t *handle, gfp_t gfp); | 
 | 48 |  | 
 | 49 | /** | 
 | 50 |  * dma_free_coherent - free memory allocated by dma_alloc_coherent | 
 | 51 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 52 |  * @size: size of memory originally requested in dma_alloc_coherent | 
 | 53 |  * @cpu_addr: CPU-view address returned from dma_alloc_coherent | 
 | 54 |  * @handle: device-view address returned from dma_alloc_coherent | 
 | 55 |  * | 
 | 56 |  * Free (and unmap) a DMA buffer previously allocated by | 
 | 57 |  * dma_alloc_coherent(). | 
 | 58 |  * | 
 | 59 |  * References to memory and mappings associated with cpu_addr/handle | 
 | 60 |  * during and after this call executing are illegal. | 
 | 61 |  */ | 
 | 62 | extern void dma_free_coherent(struct device *dev, size_t size, | 
 | 63 | 			      void *cpu_addr, dma_addr_t handle); | 
 | 64 |  | 
 | 65 | /** | 
 | 66 |  * dma_alloc_writecombine - allocate write-combining memory for DMA | 
 | 67 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 68 |  * @size: required memory size | 
 | 69 |  * @handle: bus-specific DMA address | 
 | 70 |  * | 
 | 71 |  * Allocate some uncached, buffered memory for a device for | 
 | 72 |  * performing DMA.  This function allocates pages, and will | 
 | 73 |  * return the CPU-viewed address, and sets @handle to be the | 
 | 74 |  * device-viewed address. | 
 | 75 |  */ | 
 | 76 | extern void *dma_alloc_writecombine(struct device *dev, size_t size, | 
 | 77 | 				    dma_addr_t *handle, gfp_t gfp); | 
 | 78 |  | 
 | 79 | /** | 
 | 80 |  * dma_free_coherent - free memory allocated by dma_alloc_writecombine | 
 | 81 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 82 |  * @size: size of memory originally requested in dma_alloc_writecombine | 
 | 83 |  * @cpu_addr: CPU-view address returned from dma_alloc_writecombine | 
 | 84 |  * @handle: device-view address returned from dma_alloc_writecombine | 
 | 85 |  * | 
 | 86 |  * Free (and unmap) a DMA buffer previously allocated by | 
 | 87 |  * dma_alloc_writecombine(). | 
 | 88 |  * | 
 | 89 |  * References to memory and mappings associated with cpu_addr/handle | 
 | 90 |  * during and after this call executing are illegal. | 
 | 91 |  */ | 
 | 92 | extern void dma_free_writecombine(struct device *dev, size_t size, | 
 | 93 | 				  void *cpu_addr, dma_addr_t handle); | 
 | 94 |  | 
 | 95 | /** | 
 | 96 |  * dma_map_single - map a single buffer for streaming DMA | 
 | 97 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 98 |  * @cpu_addr: CPU direct mapped address of buffer | 
 | 99 |  * @size: size of buffer to map | 
 | 100 |  * @dir: DMA transfer direction | 
 | 101 |  * | 
 | 102 |  * Ensure that any data held in the cache is appropriately discarded | 
 | 103 |  * or written back. | 
 | 104 |  * | 
 | 105 |  * The device owns this memory once this call has completed.  The CPU | 
 | 106 |  * can regain ownership by calling dma_unmap_single() or dma_sync_single(). | 
 | 107 |  */ | 
 | 108 | static inline dma_addr_t | 
 | 109 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 
 | 110 | 	       enum dma_data_direction direction) | 
 | 111 | { | 
| Haavard Skinnemoen | 50954ab | 2006-12-08 12:53:26 +0100 | [diff] [blame] | 112 | 	dma_cache_sync(dev, cpu_addr, size, direction); | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 113 | 	return virt_to_bus(cpu_addr); | 
 | 114 | } | 
 | 115 |  | 
 | 116 | /** | 
 | 117 |  * dma_unmap_single - unmap a single buffer previously mapped | 
 | 118 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 119 |  * @handle: DMA address of buffer | 
 | 120 |  * @size: size of buffer to map | 
 | 121 |  * @dir: DMA transfer direction | 
 | 122 |  * | 
 | 123 |  * Unmap a single streaming mode DMA translation.  The handle and size | 
 | 124 |  * must match what was provided in the previous dma_map_single() call. | 
 | 125 |  * All other usages are undefined. | 
 | 126 |  * | 
 | 127 |  * After this call, reads by the CPU to the buffer are guaranteed to see | 
 | 128 |  * whatever the device wrote there. | 
 | 129 |  */ | 
 | 130 | static inline void | 
 | 131 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 
 | 132 | 		 enum dma_data_direction direction) | 
 | 133 | { | 
 | 134 |  | 
 | 135 | } | 
 | 136 |  | 
 | 137 | /** | 
 | 138 |  * dma_map_page - map a portion of a page for streaming DMA | 
 | 139 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 140 |  * @page: page that buffer resides in | 
 | 141 |  * @offset: offset into page for start of buffer | 
 | 142 |  * @size: size of buffer to map | 
 | 143 |  * @dir: DMA transfer direction | 
 | 144 |  * | 
 | 145 |  * Ensure that any data held in the cache is appropriately discarded | 
 | 146 |  * or written back. | 
 | 147 |  * | 
 | 148 |  * The device owns this memory once this call has completed.  The CPU | 
 | 149 |  * can regain ownership by calling dma_unmap_page() or dma_sync_single(). | 
 | 150 |  */ | 
 | 151 | static inline dma_addr_t | 
 | 152 | dma_map_page(struct device *dev, struct page *page, | 
 | 153 | 	     unsigned long offset, size_t size, | 
 | 154 | 	     enum dma_data_direction direction) | 
 | 155 | { | 
 | 156 | 	return dma_map_single(dev, page_address(page) + offset, | 
 | 157 | 			      size, direction); | 
 | 158 | } | 
 | 159 |  | 
 | 160 | /** | 
 | 161 |  * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | 
 | 162 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 163 |  * @handle: DMA address of buffer | 
 | 164 |  * @size: size of buffer to map | 
 | 165 |  * @dir: DMA transfer direction | 
 | 166 |  * | 
 | 167 |  * Unmap a single streaming mode DMA translation.  The handle and size | 
 | 168 |  * must match what was provided in the previous dma_map_single() call. | 
 | 169 |  * All other usages are undefined. | 
 | 170 |  * | 
 | 171 |  * After this call, reads by the CPU to the buffer are guaranteed to see | 
 | 172 |  * whatever the device wrote there. | 
 | 173 |  */ | 
 | 174 | static inline void | 
 | 175 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 
 | 176 | 	       enum dma_data_direction direction) | 
 | 177 | { | 
 | 178 | 	dma_unmap_single(dev, dma_address, size, direction); | 
 | 179 | } | 
 | 180 |  | 
 | 181 | /** | 
 | 182 |  * dma_map_sg - map a set of SG buffers for streaming mode DMA | 
 | 183 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 184 |  * @sg: list of buffers | 
 | 185 |  * @nents: number of buffers to map | 
 | 186 |  * @dir: DMA transfer direction | 
 | 187 |  * | 
 | 188 |  * Map a set of buffers described by scatterlist in streaming | 
 | 189 |  * mode for DMA.  This is the scatter-gather version of the | 
 | 190 |  * above pci_map_single interface.  Here the scatter gather list | 
 | 191 |  * elements are each tagged with the appropriate dma address | 
 | 192 |  * and length.  They are obtained via sg_dma_{address,length}(SG). | 
 | 193 |  * | 
 | 194 |  * NOTE: An implementation may be able to use a smaller number of | 
 | 195 |  *       DMA address/length pairs than there are SG table elements. | 
 | 196 |  *       (for example via virtual mapping capabilities) | 
 | 197 |  *       The routine returns the number of addr/length pairs actually | 
 | 198 |  *       used, at most nents. | 
 | 199 |  * | 
 | 200 |  * Device ownership issues as mentioned above for pci_map_single are | 
 | 201 |  * the same here. | 
 | 202 |  */ | 
 | 203 | static inline int | 
 | 204 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
 | 205 | 	   enum dma_data_direction direction) | 
 | 206 | { | 
 | 207 | 	int i; | 
 | 208 |  | 
 | 209 | 	for (i = 0; i < nents; i++) { | 
 | 210 | 		char *virt; | 
 | 211 |  | 
 | 212 | 		sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; | 
 | 213 | 		virt = page_address(sg[i].page) + sg[i].offset; | 
| Haavard Skinnemoen | 50954ab | 2006-12-08 12:53:26 +0100 | [diff] [blame] | 214 | 		dma_cache_sync(dev, virt, sg[i].length, direction); | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 215 | 	} | 
 | 216 |  | 
 | 217 | 	return nents; | 
 | 218 | } | 
 | 219 |  | 
 | 220 | /** | 
 | 221 |  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 
 | 222 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 223 |  * @sg: list of buffers | 
 | 224 |  * @nents: number of buffers to map | 
 | 225 |  * @dir: DMA transfer direction | 
 | 226 |  * | 
 | 227 |  * Unmap a set of streaming mode DMA translations. | 
 | 228 |  * Again, CPU read rules concerning calls here are the same as for | 
 | 229 |  * pci_unmap_single() above. | 
 | 230 |  */ | 
 | 231 | static inline void | 
 | 232 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 
 | 233 | 	     enum dma_data_direction direction) | 
 | 234 | { | 
 | 235 |  | 
 | 236 | } | 
 | 237 |  | 
 | 238 | /** | 
 | 239 |  * dma_sync_single_for_cpu | 
 | 240 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 241 |  * @handle: DMA address of buffer | 
 | 242 |  * @size: size of buffer to map | 
 | 243 |  * @dir: DMA transfer direction | 
 | 244 |  * | 
 | 245 |  * Make physical memory consistent for a single streaming mode DMA | 
 | 246 |  * translation after a transfer. | 
 | 247 |  * | 
 | 248 |  * If you perform a dma_map_single() but wish to interrogate the | 
 | 249 |  * buffer using the cpu, yet do not wish to teardown the DMA mapping, | 
 | 250 |  * you must call this function before doing so.  At the next point you | 
 | 251 |  * give the DMA address back to the card, you must first perform a | 
 | 252 |  * dma_sync_single_for_device, and then the device again owns the | 
 | 253 |  * buffer. | 
 | 254 |  */ | 
 | 255 | static inline void | 
 | 256 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
 | 257 | 			size_t size, enum dma_data_direction direction) | 
 | 258 | { | 
| Haavard Skinnemoen | 50954ab | 2006-12-08 12:53:26 +0100 | [diff] [blame] | 259 | 	dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction); | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 260 | } | 
 | 261 |  | 
 | 262 | static inline void | 
 | 263 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 
 | 264 | 			   size_t size, enum dma_data_direction direction) | 
 | 265 | { | 
| Haavard Skinnemoen | 50954ab | 2006-12-08 12:53:26 +0100 | [diff] [blame] | 266 | 	dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction); | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 267 | } | 
 | 268 |  | 
 | 269 | /** | 
 | 270 |  * dma_sync_sg_for_cpu | 
 | 271 |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
 | 272 |  * @sg: list of buffers | 
 | 273 |  * @nents: number of buffers to map | 
 | 274 |  * @dir: DMA transfer direction | 
 | 275 |  * | 
 | 276 |  * Make physical memory consistent for a set of streaming | 
 | 277 |  * mode DMA translations after a transfer. | 
 | 278 |  * | 
 | 279 |  * The same as dma_sync_single_for_* but for a scatter-gather list, | 
 | 280 |  * same rules and usage. | 
 | 281 |  */ | 
 | 282 | static inline void | 
 | 283 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 
 | 284 | 		    int nents, enum dma_data_direction direction) | 
 | 285 | { | 
 | 286 | 	int i; | 
 | 287 |  | 
 | 288 | 	for (i = 0; i < nents; i++) { | 
| Haavard Skinnemoen | 50954ab | 2006-12-08 12:53:26 +0100 | [diff] [blame] | 289 | 		dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 290 | 			       sg[i].length, direction); | 
 | 291 | 	} | 
 | 292 | } | 
 | 293 |  | 
 | 294 | static inline void | 
 | 295 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 
 | 296 | 		       int nents, enum dma_data_direction direction) | 
 | 297 | { | 
 | 298 | 	int i; | 
 | 299 |  | 
 | 300 | 	for (i = 0; i < nents; i++) { | 
| Haavard Skinnemoen | 50954ab | 2006-12-08 12:53:26 +0100 | [diff] [blame] | 301 | 		dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 302 | 			       sg[i].length, direction); | 
 | 303 | 	} | 
 | 304 | } | 
 | 305 |  | 
 | 306 | /* Now for the API extensions over the pci_ one */ | 
 | 307 |  | 
 | 308 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 
 | 309 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 
 | 310 |  | 
| Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 311 | static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 312 | { | 
 | 313 | 	return 1; | 
 | 314 | } | 
 | 315 |  | 
 | 316 | static inline int dma_get_cache_alignment(void) | 
 | 317 | { | 
 | 318 | 	return boot_cpu_data.dcache.linesz; | 
 | 319 | } | 
 | 320 |  | 
 | 321 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ |