| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_DMA_MAPPING_H | 
|  | 2 | #define _ASM_DMA_MAPPING_H | 
|  | 3 |  | 
|  | 4 | #include <linux/device.h> | 
|  | 5 | #include <asm/cache.h> | 
|  | 6 | #include <asm/cacheflush.h> | 
|  | 7 | #include <asm/scatterlist.h> | 
|  | 8 | #include <asm/io.h> | 
|  | 9 |  | 
|  | 10 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 
|  | 11 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 
|  | 12 |  | 
|  | 13 | extern unsigned long __nongprelbss dma_coherent_mem_start; | 
|  | 14 | extern unsigned long __nongprelbss dma_coherent_mem_end; | 
|  | 15 |  | 
| Al Viro | a5da7d3 | 2005-10-21 03:21:18 -0400 | [diff] [blame] | 16 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); | 
|  | 18 |  | 
|  | 19 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | * Map a single buffer of the indicated size for DMA in streaming mode. | 
|  | 21 | * The 32-bit bus address to use is returned. | 
|  | 22 | * | 
|  | 23 | * Once the device is given the dma address, the device owns this memory | 
|  | 24 | * until either pci_unmap_single or pci_dma_sync_single is performed. | 
|  | 25 | */ | 
|  | 26 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 
|  | 27 | enum dma_data_direction direction); | 
|  | 28 |  | 
|  | 29 | /* | 
|  | 30 | * Unmap a single streaming mode DMA translation.  The dma_addr and size | 
|  | 31 | * must match what was provided for in a previous pci_map_single call.  All | 
|  | 32 | * other usages are undefined. | 
|  | 33 | * | 
|  | 34 | * After this call, reads by the cpu to the buffer are guarenteed to see | 
|  | 35 | * whatever the device wrote there. | 
|  | 36 | */ | 
|  | 37 | static inline | 
|  | 38 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 
|  | 39 | enum dma_data_direction direction) | 
|  | 40 | { | 
|  | 41 | BUG_ON(direction == DMA_NONE); | 
|  | 42 | } | 
|  | 43 |  | 
|  | 44 | /* | 
|  | 45 | * Map a set of buffers described by scatterlist in streaming | 
|  | 46 | * mode for DMA.  This is the scather-gather version of the | 
|  | 47 | * above pci_map_single interface.  Here the scatter gather list | 
|  | 48 | * elements are each tagged with the appropriate dma address | 
|  | 49 | * and length.  They are obtained via sg_dma_{address,length}(SG). | 
|  | 50 | * | 
|  | 51 | * NOTE: An implementation may be able to use a smaller number of | 
|  | 52 | *       DMA address/length pairs than there are SG table elements. | 
|  | 53 | *       (for example via virtual mapping capabilities) | 
|  | 54 | *       The routine returns the number of addr/length pairs actually | 
|  | 55 | *       used, at most nents. | 
|  | 56 | * | 
|  | 57 | * Device ownership issues as mentioned above for pci_map_single are | 
|  | 58 | * the same here. | 
|  | 59 | */ | 
|  | 60 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 61 | enum dma_data_direction direction); | 
|  | 62 |  | 
|  | 63 | /* | 
|  | 64 | * Unmap a set of streaming mode DMA translations. | 
|  | 65 | * Again, cpu read rules concerning calls here are the same as for | 
|  | 66 | * pci_unmap_single() above. | 
|  | 67 | */ | 
|  | 68 | static inline | 
|  | 69 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 
|  | 70 | enum dma_data_direction direction) | 
|  | 71 | { | 
|  | 72 | BUG_ON(direction == DMA_NONE); | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | extern | 
|  | 76 | dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, | 
|  | 77 | size_t size, enum dma_data_direction direction); | 
|  | 78 |  | 
|  | 79 | static inline | 
|  | 80 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 
|  | 81 | enum dma_data_direction direction) | 
|  | 82 | { | 
|  | 83 | BUG_ON(direction == DMA_NONE); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 |  | 
|  | 87 | static inline | 
|  | 88 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | 
|  | 89 | enum dma_data_direction direction) | 
|  | 90 | { | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | static inline | 
|  | 94 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | 
|  | 95 | enum dma_data_direction direction) | 
|  | 96 | { | 
|  | 97 | flush_write_buffers(); | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | static inline | 
|  | 101 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 
|  | 102 | unsigned long offset, size_t size, | 
|  | 103 | enum dma_data_direction direction) | 
|  | 104 | { | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | static inline | 
|  | 108 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | 
|  | 109 | unsigned long offset, size_t size, | 
|  | 110 | enum dma_data_direction direction) | 
|  | 111 | { | 
|  | 112 | flush_write_buffers(); | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | static inline | 
|  | 116 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 
|  | 117 | enum dma_data_direction direction) | 
|  | 118 | { | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | static inline | 
|  | 122 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | 
|  | 123 | enum dma_data_direction direction) | 
|  | 124 | { | 
|  | 125 | flush_write_buffers(); | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static inline | 
|  | 129 | int dma_mapping_error(dma_addr_t dma_addr) | 
|  | 130 | { | 
|  | 131 | return 0; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | static inline | 
|  | 135 | int dma_supported(struct device *dev, u64 mask) | 
|  | 136 | { | 
|  | 137 | /* | 
|  | 138 | * we fall back to GFP_DMA when the mask isn't all 1s, | 
|  | 139 | * so we can't guarantee allocations that must be | 
|  | 140 | * within a tighter range than GFP_DMA.. | 
|  | 141 | */ | 
|  | 142 | if (mask < 0x00ffffff) | 
|  | 143 | return 0; | 
|  | 144 |  | 
|  | 145 | return 1; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | static inline | 
|  | 149 | int dma_set_mask(struct device *dev, u64 mask) | 
|  | 150 | { | 
|  | 151 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 
|  | 152 | return -EIO; | 
|  | 153 |  | 
|  | 154 | *dev->dma_mask = mask; | 
|  | 155 |  | 
|  | 156 | return 0; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | static inline | 
|  | 160 | int dma_get_cache_alignment(void) | 
|  | 161 | { | 
|  | 162 | return 1 << L1_CACHE_SHIFT; | 
|  | 163 | } | 
|  | 164 |  | 
| Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 165 | #define dma_is_consistent(d, h)	(1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
|  | 167 | static inline | 
| Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 168 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | enum dma_data_direction direction) | 
|  | 170 | { | 
|  | 171 | flush_write_buffers(); | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | #endif  /* _ASM_DMA_MAPPING_H */ |