Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_SPARC64_DMA_MAPPING_H |
| 2 | #define _ASM_SPARC64_DMA_MAPPING_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
| 5 | #ifdef CONFIG_PCI |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 6 | |
| 7 | /* we implement the API below in terms of the existing PCI one, |
| 8 | * so include it */ |
| 9 | #include <linux/pci.h> |
| 10 | /* need struct page definitions */ |
| 11 | #include <linux/mm.h> |
| 12 | |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 13 | #include <asm/of_device.h> |
| 14 | |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 15 | static inline int |
| 16 | dma_supported(struct device *dev, u64 mask) |
| 17 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 18 | BUG_ON(dev->bus != &pci_bus_type && |
| 19 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 20 | |
| 21 | return pci_dma_supported(to_pci_dev(dev), mask); |
| 22 | } |
| 23 | |
| 24 | static inline int |
| 25 | dma_set_mask(struct device *dev, u64 dma_mask) |
| 26 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 27 | BUG_ON(dev->bus != &pci_bus_type && |
| 28 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 29 | |
| 30 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); |
| 31 | } |
| 32 | |
| 33 | static inline void * |
| 34 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 35 | gfp_t flag) |
| 36 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 37 | BUG_ON(dev->bus != &pci_bus_type && |
| 38 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 39 | |
| 40 | return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); |
| 41 | } |
| 42 | |
| 43 | static inline void |
| 44 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
| 45 | dma_addr_t dma_handle) |
| 46 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 47 | BUG_ON(dev->bus != &pci_bus_type && |
| 48 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 49 | |
| 50 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); |
| 51 | } |
| 52 | |
| 53 | static inline dma_addr_t |
| 54 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
| 55 | enum dma_data_direction direction) |
| 56 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 57 | BUG_ON(dev->bus != &pci_bus_type && |
| 58 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 59 | |
| 60 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); |
| 61 | } |
| 62 | |
| 63 | static inline void |
| 64 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 65 | enum dma_data_direction direction) |
| 66 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 67 | BUG_ON(dev->bus != &pci_bus_type && |
| 68 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 69 | |
| 70 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); |
| 71 | } |
| 72 | |
| 73 | static inline dma_addr_t |
| 74 | dma_map_page(struct device *dev, struct page *page, |
| 75 | unsigned long offset, size_t size, |
| 76 | enum dma_data_direction direction) |
| 77 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 78 | BUG_ON(dev->bus != &pci_bus_type && |
| 79 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 80 | |
| 81 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); |
| 82 | } |
| 83 | |
| 84 | static inline void |
| 85 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
| 86 | enum dma_data_direction direction) |
| 87 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 88 | BUG_ON(dev->bus != &pci_bus_type && |
| 89 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 90 | |
| 91 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); |
| 92 | } |
| 93 | |
| 94 | static inline int |
| 95 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 96 | enum dma_data_direction direction) |
| 97 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 98 | BUG_ON(dev->bus != &pci_bus_type && |
| 99 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 100 | |
| 101 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); |
| 102 | } |
| 103 | |
| 104 | static inline void |
| 105 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
| 106 | enum dma_data_direction direction) |
| 107 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 108 | BUG_ON(dev->bus != &pci_bus_type && |
| 109 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 110 | |
| 111 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); |
| 112 | } |
| 113 | |
| 114 | static inline void |
| 115 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
| 116 | enum dma_data_direction direction) |
| 117 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 118 | BUG_ON(dev->bus != &pci_bus_type && |
| 119 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 120 | |
| 121 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, |
| 122 | size, (int)direction); |
| 123 | } |
| 124 | |
| 125 | static inline void |
| 126 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
| 127 | enum dma_data_direction direction) |
| 128 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 129 | BUG_ON(dev->bus != &pci_bus_type && |
| 130 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 131 | |
| 132 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, |
| 133 | size, (int)direction); |
| 134 | } |
| 135 | |
| 136 | static inline void |
| 137 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, |
| 138 | enum dma_data_direction direction) |
| 139 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 140 | BUG_ON(dev->bus != &pci_bus_type && |
| 141 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 142 | |
| 143 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); |
| 144 | } |
| 145 | |
| 146 | static inline void |
| 147 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, |
| 148 | enum dma_data_direction direction) |
| 149 | { |
David S. Miller | 9ac6d4a | 2007-05-14 02:56:03 -0700 | [diff] [blame] | 150 | BUG_ON(dev->bus != &pci_bus_type && |
| 151 | dev->bus != &ebus_bus_type); |
David S. Miller | 42f1423 | 2006-05-23 02:07:22 -0700 | [diff] [blame] | 152 | |
| 153 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); |
| 154 | } |
| 155 | |
| 156 | static inline int |
| 157 | dma_mapping_error(dma_addr_t dma_addr) |
| 158 | { |
| 159 | return pci_dma_mapping_error(dma_addr); |
| 160 | } |
| 161 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | #else |
| 163 | |
| 164 | struct device; |
| 165 | |
| 166 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
Al Viro | 970a9e7 | 2005-10-21 03:21:53 -0400 | [diff] [blame] | 167 | dma_addr_t *dma_handle, gfp_t flag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | { |
| 169 | BUG(); |
| 170 | return NULL; |
| 171 | } |
| 172 | |
| 173 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 174 | void *vaddr, dma_addr_t dma_handle) |
| 175 | { |
| 176 | BUG(); |
| 177 | } |
| 178 | |
Randy Dunlap | 7233589 | 2006-07-05 20:18:39 -0700 | [diff] [blame] | 179 | static inline void |
| 180 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
| 181 | enum dma_data_direction direction) |
| 182 | { |
| 183 | BUG(); |
| 184 | } |
| 185 | |
| 186 | static inline void |
| 187 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
| 188 | enum dma_data_direction direction) |
| 189 | { |
| 190 | BUG(); |
| 191 | } |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | #endif /* PCI */ |
| 194 | |
David S. Miller | 3632142 | 2006-06-25 02:07:52 -0700 | [diff] [blame] | 195 | |
| 196 | /* Now for the API extensions over the pci_ one */ |
| 197 | |
| 198 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 199 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 200 | #define dma_is_consistent(d, h) (1) |
David S. Miller | 3632142 | 2006-06-25 02:07:52 -0700 | [diff] [blame] | 201 | |
| 202 | static inline int |
| 203 | dma_get_cache_alignment(void) |
| 204 | { |
| 205 | /* no easy way to get cache size on all processors, so return |
| 206 | * the maximum possible, to be safe */ |
| 207 | return (1 << INTERNODE_CACHE_SHIFT); |
| 208 | } |
| 209 | |
| 210 | static inline void |
| 211 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, |
| 212 | unsigned long offset, size_t size, |
| 213 | enum dma_data_direction direction) |
| 214 | { |
| 215 | /* just sync everything, that's all the pci API can do */ |
| 216 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); |
| 217 | } |
| 218 | |
| 219 | static inline void |
| 220 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, |
| 221 | unsigned long offset, size_t size, |
| 222 | enum dma_data_direction direction) |
| 223 | { |
| 224 | /* just sync everything, that's all the pci API can do */ |
| 225 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); |
| 226 | } |
| 227 | |
| 228 | static inline void |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 229 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
David S. Miller | 3632142 | 2006-06-25 02:07:52 -0700 | [diff] [blame] | 230 | enum dma_data_direction direction) |
| 231 | { |
| 232 | /* could define this in terms of the dma_cache ... operations, |
| 233 | * but if you get this on a platform, you should convert the platform |
| 234 | * to using the generic device DMA API */ |
| 235 | BUG(); |
| 236 | } |
| 237 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | #endif /* _ASM_SPARC64_DMA_MAPPING_H */ |