blob: 3fe05046fb3156938ac709e2078e39cdff07ac67 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090010#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030011#include <asm/io.h>
12#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020013#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030014
Glauber Costa7c183412008-03-25 18:36:36 -030015extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030016extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020017extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030018extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030019
Glauber Costa6f536632008-03-25 18:36:20 -030020struct dma_mapping_ops {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070021 int (*mapping_error)(struct device *dev,
22 dma_addr_t dma_addr);
Glauber Costa6f536632008-03-25 18:36:20 -030023 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020027 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030028 size_t size, int direction);
Glauber Costa6f536632008-03-25 18:36:20 -030029 void (*unmap_single)(struct device *dev, dma_addr_t addr,
30 size_t size, int direction);
31 void (*sync_single_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, size_t size,
33 int direction);
34 void (*sync_single_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, size_t size,
36 int direction);
37 void (*sync_single_range_for_cpu)(struct device *hwdev,
38 dma_addr_t dma_handle, unsigned long offset,
39 size_t size, int direction);
40 void (*sync_single_range_for_device)(struct device *hwdev,
41 dma_addr_t dma_handle, unsigned long offset,
42 size_t size, int direction);
43 void (*sync_sg_for_cpu)(struct device *hwdev,
44 struct scatterlist *sg, int nelems,
45 int direction);
46 void (*sync_sg_for_device)(struct device *hwdev,
47 struct scatterlist *sg, int nelems,
48 int direction);
49 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
50 int nents, int direction);
51 void (*unmap_sg)(struct device *hwdev,
52 struct scatterlist *sg, int nents,
53 int direction);
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090054 dma_addr_t (*map_page)(struct device *dev, struct page *page,
55 unsigned long offset, size_t size,
56 enum dma_data_direction dir,
57 struct dma_attrs *attrs);
58 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
59 size_t size, enum dma_data_direction dir,
60 struct dma_attrs *attrs);
Glauber Costa6f536632008-03-25 18:36:20 -030061 int (*dma_supported)(struct device *hwdev, u64 mask);
62 int is_phys;
63};
64
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070065extern struct dma_mapping_ops *dma_ops;
Glauber Costa22456b92008-03-25 18:36:21 -030066
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070067static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030068{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070069#ifdef CONFIG_X86_32
70 return dma_ops;
71#else
72 if (unlikely(!dev) || !dev->archdata.dma_ops)
73 return dma_ops;
74 else
75 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080076#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070077}
78
79/* Make sure we keep the same behaviour */
80static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
81{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070082 struct dma_mapping_ops *ops = get_dma_ops(dev);
83 if (ops->mapping_error)
84 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030085
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010086 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030087}
88
Glauber Costa8d396de2008-03-25 18:36:31 -030089#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
90#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020091#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030092
Glauber Costa802c1f62008-03-25 18:36:34 -030093extern int dma_supported(struct device *hwdev, u64 mask);
94extern int dma_set_mask(struct device *dev, u64 mask);
95
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090096extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
97 dma_addr_t *dma_addr, gfp_t flag);
98
Glauber Costa22456b92008-03-25 18:36:21 -030099static inline dma_addr_t
100dma_map_single(struct device *hwdev, void *ptr, size_t size,
101 int direction)
102{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700103 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
104
Glauber Costa22456b92008-03-25 18:36:21 -0300105 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700106 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -0300107}
108
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300109static inline void
110dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
111 int direction)
112{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700113 struct dma_mapping_ops *ops = get_dma_ops(dev);
114
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300115 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700116 if (ops->unmap_single)
117 ops->unmap_single(dev, addr, size, direction);
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300118}
119
Glauber Costa16a3ce92008-03-25 18:36:23 -0300120static inline int
121dma_map_sg(struct device *hwdev, struct scatterlist *sg,
122 int nents, int direction)
123{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700124 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
125
Glauber Costa16a3ce92008-03-25 18:36:23 -0300126 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700127 return ops->map_sg(hwdev, sg, nents, direction);
Glauber Costa16a3ce92008-03-25 18:36:23 -0300128}
Glauber Costa72c784f2008-03-25 18:36:24 -0300129
130static inline void
131dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
132 int direction)
133{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700134 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
135
Glauber Costa72c784f2008-03-25 18:36:24 -0300136 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700137 if (ops->unmap_sg)
138 ops->unmap_sg(hwdev, sg, nents, direction);
Glauber Costa72c784f2008-03-25 18:36:24 -0300139}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300140
141static inline void
142dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
143 size_t size, int direction)
144{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700145 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
146
Glauber Costac01dd8c2008-03-25 18:36:25 -0300147 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700148 if (ops->sync_single_for_cpu)
149 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300150 flush_write_buffers();
151}
152
Glauber Costa9231b262008-03-25 18:36:26 -0300153static inline void
154dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
155 size_t size, int direction)
156{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700157 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
158
Glauber Costa9231b262008-03-25 18:36:26 -0300159 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700160 if (ops->sync_single_for_device)
161 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
Glauber Costa9231b262008-03-25 18:36:26 -0300162 flush_write_buffers();
163}
164
Glauber Costa627610f2008-03-25 18:36:27 -0300165static inline void
166dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
167 unsigned long offset, size_t size, int direction)
168{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700169 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300170
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700171 BUG_ON(!valid_dma_direction(direction));
172 if (ops->sync_single_range_for_cpu)
173 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
174 size, direction);
Glauber Costa627610f2008-03-25 18:36:27 -0300175 flush_write_buffers();
176}
Glauber Costa71362332008-03-25 18:36:28 -0300177
178static inline void
179dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
180 unsigned long offset, size_t size,
181 int direction)
182{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700183 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300184
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700185 BUG_ON(!valid_dma_direction(direction));
186 if (ops->sync_single_range_for_device)
187 ops->sync_single_range_for_device(hwdev, dma_handle,
188 offset, size, direction);
Glauber Costa71362332008-03-25 18:36:28 -0300189 flush_write_buffers();
190}
191
Glauber Costaed435de2008-03-25 18:36:29 -0300192static inline void
193dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
194 int nelems, int direction)
195{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700196 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
197
Glauber Costaed435de2008-03-25 18:36:29 -0300198 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700199 if (ops->sync_sg_for_cpu)
200 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
Glauber Costaed435de2008-03-25 18:36:29 -0300201 flush_write_buffers();
202}
Glauber Costae7f3a912008-03-25 18:36:30 -0300203
204static inline void
205dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
206 int nelems, int direction)
207{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700208 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
209
Glauber Costae7f3a912008-03-25 18:36:30 -0300210 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700211 if (ops->sync_sg_for_device)
212 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
Glauber Costae7f3a912008-03-25 18:36:30 -0300213
214 flush_write_buffers();
215}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300216
217static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
218 size_t offset, size_t size,
219 int direction)
220{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700221 struct dma_mapping_ops *ops = get_dma_ops(dev);
222
Ingo Molnar2be62142008-04-19 19:19:56 +0200223 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700224 return ops->map_single(dev, page_to_phys(page) + offset,
225 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300226}
227
228static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
229 size_t size, int direction)
230{
231 dma_unmap_single(dev, addr, size, direction);
232}
233
Glauber Costa3cb6a912008-03-25 18:36:33 -0300234static inline void
235dma_cache_sync(struct device *dev, void *vaddr, size_t size,
236 enum dma_data_direction dir)
237{
238 flush_write_buffers();
239}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300240
Glauber Costab7107a32008-03-25 18:36:39 -0300241static inline int dma_get_cache_alignment(void)
242{
243 /* no easy way to get cache size on all x86, so return the
244 * maximum possible, to be safe */
245 return boot_cpu_data.x86_clflush_size;
246}
247
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900248static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
249 gfp_t gfp)
250{
251 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300252
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900253 dma_mask = dev->coherent_dma_mask;
254 if (!dma_mask)
255 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
256
257 return dma_mask;
258}
259
260static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
261{
262 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
263
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900264 if (dma_mask <= DMA_24BIT_MASK)
265 gfp |= GFP_DMA;
266#ifdef CONFIG_X86_64
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900267 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
268 gfp |= GFP_DMA32;
269#endif
270 return gfp;
271}
272
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200273static inline void *
274dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
275 gfp_t gfp)
276{
277 struct dma_mapping_ops *ops = get_dma_ops(dev);
278 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300279
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900280 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
281
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200282 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
283 return memory;
284
285 if (!dev) {
286 dev = &x86_dma_fallback_dev;
287 gfp |= GFP_DMA;
288 }
289
FUJITA Tomonori98216262008-09-10 00:49:48 +0900290 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900291 return NULL;
292
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900293 if (!ops->alloc_coherent)
294 return NULL;
295
296 return ops->alloc_coherent(dev, size, dma_handle,
297 dma_alloc_coherent_gfp_flags(dev, gfp));
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200298}
299
300static inline void dma_free_coherent(struct device *dev, size_t size,
301 void *vaddr, dma_addr_t bus)
302{
303 struct dma_mapping_ops *ops = get_dma_ops(dev);
304
305 WARN_ON(irqs_disabled()); /* for portability */
306
307 if (dma_release_from_coherent(dev, get_order(size), vaddr))
308 return;
309
310 if (ops->free_coherent)
311 ops->free_coherent(dev, size, vaddr, bus);
312}
313
Glauber Costa6f536632008-03-25 18:36:20 -0300314#endif