Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004 IBM |
| 3 | * |
| 4 | * Implements the generic device dma API for powerpc. |
| 5 | * the pci and vio busses |
| 6 | */ |
| 7 | #ifndef _ASM_DMA_MAPPING_H |
| 8 | #define _ASM_DMA_MAPPING_H |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 9 | #ifdef __KERNEL__ |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/cache.h> |
| 13 | /* need struct page definitions */ |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/scatterlist.h> |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 16 | #include <linux/dma-attrs.h> |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 17 | #include <asm/io.h> |
| 18 | |
| 19 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
| 20 | |
| 21 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 22 | /* |
| 23 | * DMA-consistent mapping functions for PowerPCs that don't support |
| 24 | * cache snooping. These allocate/free a region of uncached mapped |
| 25 | * memory space for use with DMA devices. Alternatively, you could |
| 26 | * allocate the space "normally" and use the cache management functions |
| 27 | * to ensure it is consistent. |
| 28 | */ |
Benjamin Herrenschmidt | 8b31e49 | 2009-05-27 13:50:33 +1000 | [diff] [blame^] | 29 | struct device; |
| 30 | extern void *__dma_alloc_coherent(struct device *dev, size_t size, |
| 31 | dma_addr_t *handle, gfp_t gfp); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 32 | extern void __dma_free_coherent(size_t size, void *vaddr); |
| 33 | extern void __dma_sync(void *vaddr, size_t size, int direction); |
| 34 | extern void __dma_sync_page(struct page *page, unsigned long offset, |
| 35 | size_t size, int direction); |
| 36 | |
| 37 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ |
| 38 | /* |
| 39 | * Cache coherent cores. |
| 40 | */ |
| 41 | |
Benjamin Herrenschmidt | 8b31e49 | 2009-05-27 13:50:33 +1000 | [diff] [blame^] | 42 | #define __dma_alloc_coherent(dev, gfp, size, handle) NULL |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 43 | #define __dma_free_coherent(size, addr) ((void)0) |
| 44 | #define __dma_sync(addr, size, rw) ((void)0) |
| 45 | #define __dma_sync_page(pg, off, sz, rw) ((void)0) |
| 46 | |
| 47 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ |
| 48 | |
Mark Nelson | 3a4c6f0 | 2008-07-05 05:05:45 +1000 | [diff] [blame] | 49 | static inline unsigned long device_to_mask(struct device *dev) |
| 50 | { |
| 51 | if (dev->dma_mask && *dev->dma_mask) |
| 52 | return *dev->dma_mask; |
| 53 | /* Assume devices without mask can take 32 bit addresses */ |
| 54 | return 0xfffffffful; |
| 55 | } |
| 56 | |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 57 | /* |
| 58 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO |
| 59 | */ |
| 60 | struct dma_mapping_ops { |
| 61 | void * (*alloc_coherent)(struct device *dev, size_t size, |
| 62 | dma_addr_t *dma_handle, gfp_t flag); |
| 63 | void (*free_coherent)(struct device *dev, size_t size, |
| 64 | void *vaddr, dma_addr_t dma_handle); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 65 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 66 | int nents, enum dma_data_direction direction, |
| 67 | struct dma_attrs *attrs); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 68 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 69 | int nents, enum dma_data_direction direction, |
| 70 | struct dma_attrs *attrs); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 71 | int (*dma_supported)(struct device *dev, u64 mask); |
| 72 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 73 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
| 74 | unsigned long offset, size_t size, |
| 75 | enum dma_data_direction direction, |
| 76 | struct dma_attrs *attrs); |
| 77 | void (*unmap_page)(struct device *dev, |
| 78 | dma_addr_t dma_address, size_t size, |
| 79 | enum dma_data_direction direction, |
| 80 | struct dma_attrs *attrs); |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 81 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS |
| 82 | void (*sync_single_range_for_cpu)(struct device *hwdev, |
| 83 | dma_addr_t dma_handle, unsigned long offset, |
| 84 | size_t size, |
| 85 | enum dma_data_direction direction); |
| 86 | void (*sync_single_range_for_device)(struct device *hwdev, |
| 87 | dma_addr_t dma_handle, unsigned long offset, |
| 88 | size_t size, |
| 89 | enum dma_data_direction direction); |
| 90 | void (*sync_sg_for_cpu)(struct device *hwdev, |
| 91 | struct scatterlist *sg, int nelems, |
| 92 | enum dma_data_direction direction); |
| 93 | void (*sync_sg_for_device)(struct device *hwdev, |
| 94 | struct scatterlist *sg, int nelems, |
| 95 | enum dma_data_direction direction); |
| 96 | #endif |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 97 | }; |
| 98 | |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 99 | /* |
| 100 | * Available generic sets of operations |
| 101 | */ |
| 102 | #ifdef CONFIG_PPC64 |
| 103 | extern struct dma_mapping_ops dma_iommu_ops; |
| 104 | #endif |
| 105 | extern struct dma_mapping_ops dma_direct_ops; |
| 106 | |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 107 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
| 108 | { |
| 109 | /* We don't handle the NULL dev case for ISA for now. We could |
| 110 | * do it via an out of line call but it is not needed for now. The |
| 111 | * only ISA DMA device we support is the floppy and we have a hack |
| 112 | * in the floppy driver directly to get a device for us. |
| 113 | */ |
Kumar Gala | 4ae0ff6 | 2009-03-19 03:40:52 +0000 | [diff] [blame] | 114 | if (unlikely(dev == NULL)) |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 115 | return NULL; |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 116 | |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 117 | return dev->archdata.dma_ops; |
| 118 | } |
| 119 | |
Michael Ellerman | 1f62a16 | 2008-01-30 01:13:58 +1100 | [diff] [blame] | 120 | static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) |
| 121 | { |
| 122 | dev->archdata.dma_ops = ops; |
| 123 | } |
| 124 | |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 125 | static inline int dma_supported(struct device *dev, u64 mask) |
| 126 | { |
| 127 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 128 | |
| 129 | if (unlikely(dma_ops == NULL)) |
| 130 | return 0; |
| 131 | if (dma_ops->dma_supported == NULL) |
| 132 | return 1; |
| 133 | return dma_ops->dma_supported(dev, mask); |
| 134 | } |
| 135 | |
Michael Ellerman | 84631f3 | 2007-12-17 17:35:53 +1100 | [diff] [blame] | 136 | /* We have our own implementation of pci_set_dma_mask() */ |
| 137 | #define HAVE_ARCH_PCI_SET_DMA_MASK |
| 138 | |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 139 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
| 140 | { |
| 141 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 142 | |
| 143 | if (unlikely(dma_ops == NULL)) |
| 144 | return -EIO; |
| 145 | if (dma_ops->set_dma_mask != NULL) |
| 146 | return dma_ops->set_dma_mask(dev, dma_mask); |
| 147 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
| 148 | return -EIO; |
| 149 | *dev->dma_mask = dma_mask; |
| 150 | return 0; |
| 151 | } |
| 152 | |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 153 | /* |
Mark Nelson | c73049f | 2008-10-27 20:38:14 +0000 | [diff] [blame] | 154 | * map_/unmap_single actually call through to map/unmap_page now that all the |
| 155 | * dma_mapping_ops have been converted over. We just have to get the page and |
| 156 | * offset to pass through to map_page |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 157 | */ |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 158 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
| 159 | void *cpu_addr, |
| 160 | size_t size, |
| 161 | enum dma_data_direction direction, |
| 162 | struct dma_attrs *attrs) |
| 163 | { |
| 164 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 165 | |
| 166 | BUG_ON(!dma_ops); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 167 | |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 168 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), |
| 169 | (unsigned long)cpu_addr % PAGE_SIZE, size, |
| 170 | direction, attrs); |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | static inline void dma_unmap_single_attrs(struct device *dev, |
| 174 | dma_addr_t dma_addr, |
| 175 | size_t size, |
| 176 | enum dma_data_direction direction, |
| 177 | struct dma_attrs *attrs) |
| 178 | { |
| 179 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 180 | |
| 181 | BUG_ON(!dma_ops); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 182 | |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 183 | dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
| 187 | struct page *page, |
| 188 | unsigned long offset, size_t size, |
| 189 | enum dma_data_direction direction, |
| 190 | struct dma_attrs *attrs) |
| 191 | { |
| 192 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 193 | |
| 194 | BUG_ON(!dma_ops); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 195 | |
Mark Nelson | c73049f | 2008-10-27 20:38:14 +0000 | [diff] [blame] | 196 | return dma_ops->map_page(dev, page, offset, size, direction, attrs); |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | static inline void dma_unmap_page_attrs(struct device *dev, |
| 200 | dma_addr_t dma_address, |
| 201 | size_t size, |
| 202 | enum dma_data_direction direction, |
| 203 | struct dma_attrs *attrs) |
| 204 | { |
| 205 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 206 | |
| 207 | BUG_ON(!dma_ops); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 208 | |
Mark Nelson | c73049f | 2008-10-27 20:38:14 +0000 | [diff] [blame] | 209 | dma_ops->unmap_page(dev, dma_address, size, direction, attrs); |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 213 | int nents, enum dma_data_direction direction, |
| 214 | struct dma_attrs *attrs) |
| 215 | { |
| 216 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 217 | |
| 218 | BUG_ON(!dma_ops); |
| 219 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); |
| 220 | } |
| 221 | |
| 222 | static inline void dma_unmap_sg_attrs(struct device *dev, |
| 223 | struct scatterlist *sg, |
| 224 | int nhwentries, |
| 225 | enum dma_data_direction direction, |
| 226 | struct dma_attrs *attrs) |
| 227 | { |
| 228 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 229 | |
| 230 | BUG_ON(!dma_ops); |
| 231 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); |
| 232 | } |
| 233 | |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 234 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 235 | dma_addr_t *dma_handle, gfp_t flag) |
| 236 | { |
| 237 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 238 | |
| 239 | BUG_ON(!dma_ops); |
| 240 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); |
| 241 | } |
| 242 | |
| 243 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 244 | void *cpu_addr, dma_addr_t dma_handle) |
| 245 | { |
| 246 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 247 | |
| 248 | BUG_ON(!dma_ops); |
| 249 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
| 250 | } |
| 251 | |
| 252 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
| 253 | size_t size, |
| 254 | enum dma_data_direction direction) |
| 255 | { |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 256 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
| 260 | size_t size, |
| 261 | enum dma_data_direction direction) |
| 262 | { |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 263 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 267 | unsigned long offset, size_t size, |
| 268 | enum dma_data_direction direction) |
| 269 | { |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 270 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); |
Anton Blanchard | 33ff910 | 2007-10-16 14:54:33 -0500 | [diff] [blame] | 271 | } |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 272 | |
| 273 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 274 | size_t size, |
| 275 | enum dma_data_direction direction) |
| 276 | { |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 277 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 281 | int nents, enum dma_data_direction direction) |
| 282 | { |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 283 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 287 | int nhwentries, |
| 288 | enum dma_data_direction direction) |
| 289 | { |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 290 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 291 | } |
| 292 | |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 293 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 294 | static inline void dma_sync_single_for_cpu(struct device *dev, |
| 295 | dma_addr_t dma_handle, size_t size, |
| 296 | enum dma_data_direction direction) |
| 297 | { |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 298 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 299 | |
| 300 | BUG_ON(!dma_ops); |
| 301 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, |
| 302 | size, direction); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | static inline void dma_sync_single_for_device(struct device *dev, |
| 306 | dma_addr_t dma_handle, size_t size, |
| 307 | enum dma_data_direction direction) |
| 308 | { |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 310 | |
| 311 | BUG_ON(!dma_ops); |
| 312 | dma_ops->sync_single_range_for_device(dev, dma_handle, |
| 313 | 0, size, direction); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 317 | struct scatterlist *sgl, int nents, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 318 | enum dma_data_direction direction) |
| 319 | { |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 320 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 321 | |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 322 | BUG_ON(!dma_ops); |
| 323 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | static inline void dma_sync_sg_for_device(struct device *dev, |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 327 | struct scatterlist *sgl, int nents, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 328 | enum dma_data_direction direction) |
| 329 | { |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 330 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 331 | |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 332 | BUG_ON(!dma_ops); |
| 333 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 334 | } |
| 335 | |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 336 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 337 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
| 338 | enum dma_data_direction direction) |
| 339 | { |
| 340 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 341 | |
| 342 | BUG_ON(!dma_ops); |
| 343 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, |
| 344 | offset, size, direction); |
| 345 | } |
| 346 | |
| 347 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 348 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
| 349 | enum dma_data_direction direction) |
| 350 | { |
| 351 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 352 | |
| 353 | BUG_ON(!dma_ops); |
| 354 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, |
| 355 | size, direction); |
| 356 | } |
| 357 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ |
Becky Bruce | 0efbb57 | 2008-12-04 08:12:40 +0000 | [diff] [blame] | 358 | static inline void dma_sync_single_for_cpu(struct device *dev, |
| 359 | dma_addr_t dma_handle, size_t size, |
| 360 | enum dma_data_direction direction) |
| 361 | { |
| 362 | } |
| 363 | |
| 364 | static inline void dma_sync_single_for_device(struct device *dev, |
| 365 | dma_addr_t dma_handle, size_t size, |
| 366 | enum dma_data_direction direction) |
| 367 | { |
| 368 | } |
| 369 | |
| 370 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 371 | struct scatterlist *sgl, int nents, |
| 372 | enum dma_data_direction direction) |
| 373 | { |
| 374 | } |
| 375 | |
| 376 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 377 | struct scatterlist *sgl, int nents, |
| 378 | enum dma_data_direction direction) |
| 379 | { |
| 380 | } |
| 381 | |
| 382 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 383 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
| 384 | enum dma_data_direction direction) |
| 385 | { |
| 386 | } |
| 387 | |
| 388 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 389 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
| 390 | enum dma_data_direction direction) |
| 391 | { |
| 392 | } |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 393 | #endif |
| 394 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 395 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 396 | { |
| 397 | #ifdef CONFIG_PPC64 |
| 398 | return (dma_addr == DMA_ERROR_CODE); |
| 399 | #else |
| 400 | return 0; |
| 401 | #endif |
| 402 | } |
| 403 | |
| 404 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 405 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 406 | #ifdef CONFIG_NOT_COHERENT_CACHE |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 407 | #define dma_is_consistent(d, h) (0) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 408 | #else |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 409 | #define dma_is_consistent(d, h) (1) |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 410 | #endif |
| 411 | |
| 412 | static inline int dma_get_cache_alignment(void) |
| 413 | { |
| 414 | #ifdef CONFIG_PPC64 |
| 415 | /* no easy way to get cache size on all processors, so return |
| 416 | * the maximum possible, to be safe */ |
Ravikiran G Thirumalai | 1fd73c6 | 2006-01-08 01:01:28 -0800 | [diff] [blame] | 417 | return (1 << INTERNODE_CACHE_SHIFT); |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 418 | #else |
| 419 | /* |
| 420 | * Each processor family will define its own L1_CACHE_SHIFT, |
| 421 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
| 422 | */ |
| 423 | return L1_CACHE_BYTES; |
| 424 | #endif |
| 425 | } |
| 426 | |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 427 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 428 | enum dma_data_direction direction) |
| 429 | { |
| 430 | BUG_ON(direction == DMA_NONE); |
| 431 | __dma_sync(vaddr, size, (int)direction); |
| 432 | } |
| 433 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 434 | #endif /* __KERNEL__ */ |
Stephen Rothwell | 78b0973 | 2005-11-19 01:40:46 +1100 | [diff] [blame] | 435 | #endif /* _ASM_DMA_MAPPING_H */ |