blob: b81f82268a1691174a46f514e10b0ed612cfcb7e [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090010#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030011#include <asm/io.h>
12#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020013#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030014
Glauber Costa7c183412008-03-25 18:36:36 -030015extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030016extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020017extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030018extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030019
Glauber Costa6f536632008-03-25 18:36:20 -030020struct dma_mapping_ops {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070021 int (*mapping_error)(struct device *dev,
22 dma_addr_t dma_addr);
Glauber Costa6f536632008-03-25 18:36:20 -030023 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
Glauber Costa6f536632008-03-25 18:36:20 -030027 void (*sync_single_for_cpu)(struct device *hwdev,
28 dma_addr_t dma_handle, size_t size,
29 int direction);
30 void (*sync_single_for_device)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_range_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, unsigned long offset,
35 size_t size, int direction);
36 void (*sync_single_range_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_sg_for_cpu)(struct device *hwdev,
40 struct scatterlist *sg, int nelems,
41 int direction);
42 void (*sync_sg_for_device)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46 int nents, int direction);
47 void (*unmap_sg)(struct device *hwdev,
48 struct scatterlist *sg, int nents,
49 int direction);
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090050 dma_addr_t (*map_page)(struct device *dev, struct page *page,
51 unsigned long offset, size_t size,
52 enum dma_data_direction dir,
53 struct dma_attrs *attrs);
54 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55 size_t size, enum dma_data_direction dir,
56 struct dma_attrs *attrs);
Glauber Costa6f536632008-03-25 18:36:20 -030057 int (*dma_supported)(struct device *hwdev, u64 mask);
58 int is_phys;
59};
60
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070061extern struct dma_mapping_ops *dma_ops;
Glauber Costa22456b92008-03-25 18:36:21 -030062
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070063static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030064{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070065#ifdef CONFIG_X86_32
66 return dma_ops;
67#else
68 if (unlikely(!dev) || !dev->archdata.dma_ops)
69 return dma_ops;
70 else
71 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080072#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070073}
74
75/* Make sure we keep the same behaviour */
76static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
77{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070078 struct dma_mapping_ops *ops = get_dma_ops(dev);
79 if (ops->mapping_error)
80 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030081
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010082 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030083}
84
Glauber Costa8d396de2008-03-25 18:36:31 -030085#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020087#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030088
Glauber Costa802c1f62008-03-25 18:36:34 -030089extern int dma_supported(struct device *hwdev, u64 mask);
90extern int dma_set_mask(struct device *dev, u64 mask);
91
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090092extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
Glauber Costa22456b92008-03-25 18:36:21 -030095static inline dma_addr_t
96dma_map_single(struct device *hwdev, void *ptr, size_t size,
97 int direction)
98{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070099 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
100
Glauber Costa22456b92008-03-25 18:36:21 -0300101 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +0900102 return ops->map_page(hwdev, virt_to_page(ptr),
103 (unsigned long)ptr & ~PAGE_MASK, size,
104 direction, NULL);
Glauber Costa22456b92008-03-25 18:36:21 -0300105}
106
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300107static inline void
108dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
109 int direction)
110{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700111 struct dma_mapping_ops *ops = get_dma_ops(dev);
112
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300113 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +0900114 if (ops->unmap_page)
115 ops->unmap_page(dev, addr, size, direction, NULL);
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300116}
117
Glauber Costa16a3ce92008-03-25 18:36:23 -0300118static inline int
119dma_map_sg(struct device *hwdev, struct scatterlist *sg,
120 int nents, int direction)
121{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700122 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
123
Glauber Costa16a3ce92008-03-25 18:36:23 -0300124 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700125 return ops->map_sg(hwdev, sg, nents, direction);
Glauber Costa16a3ce92008-03-25 18:36:23 -0300126}
Glauber Costa72c784f2008-03-25 18:36:24 -0300127
128static inline void
129dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
130 int direction)
131{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700132 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
133
Glauber Costa72c784f2008-03-25 18:36:24 -0300134 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700135 if (ops->unmap_sg)
136 ops->unmap_sg(hwdev, sg, nents, direction);
Glauber Costa72c784f2008-03-25 18:36:24 -0300137}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300138
139static inline void
140dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 size_t size, int direction)
142{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700143 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
144
Glauber Costac01dd8c2008-03-25 18:36:25 -0300145 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700146 if (ops->sync_single_for_cpu)
147 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300148 flush_write_buffers();
149}
150
Glauber Costa9231b262008-03-25 18:36:26 -0300151static inline void
152dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 size_t size, int direction)
154{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700155 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
156
Glauber Costa9231b262008-03-25 18:36:26 -0300157 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700158 if (ops->sync_single_for_device)
159 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
Glauber Costa9231b262008-03-25 18:36:26 -0300160 flush_write_buffers();
161}
162
Glauber Costa627610f2008-03-25 18:36:27 -0300163static inline void
164dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
165 unsigned long offset, size_t size, int direction)
166{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700167 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300168
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700169 BUG_ON(!valid_dma_direction(direction));
170 if (ops->sync_single_range_for_cpu)
171 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
172 size, direction);
Glauber Costa627610f2008-03-25 18:36:27 -0300173 flush_write_buffers();
174}
Glauber Costa71362332008-03-25 18:36:28 -0300175
176static inline void
177dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
178 unsigned long offset, size_t size,
179 int direction)
180{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700181 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300182
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700183 BUG_ON(!valid_dma_direction(direction));
184 if (ops->sync_single_range_for_device)
185 ops->sync_single_range_for_device(hwdev, dma_handle,
186 offset, size, direction);
Glauber Costa71362332008-03-25 18:36:28 -0300187 flush_write_buffers();
188}
189
Glauber Costaed435de2008-03-25 18:36:29 -0300190static inline void
191dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
192 int nelems, int direction)
193{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700194 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
195
Glauber Costaed435de2008-03-25 18:36:29 -0300196 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700197 if (ops->sync_sg_for_cpu)
198 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
Glauber Costaed435de2008-03-25 18:36:29 -0300199 flush_write_buffers();
200}
Glauber Costae7f3a912008-03-25 18:36:30 -0300201
202static inline void
203dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
204 int nelems, int direction)
205{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700206 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
207
Glauber Costae7f3a912008-03-25 18:36:30 -0300208 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700209 if (ops->sync_sg_for_device)
210 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
Glauber Costae7f3a912008-03-25 18:36:30 -0300211
212 flush_write_buffers();
213}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300214
215static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
216 size_t offset, size_t size,
217 int direction)
218{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700219 struct dma_mapping_ops *ops = get_dma_ops(dev);
220
Ingo Molnar2be62142008-04-19 19:19:56 +0200221 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +0900222 return ops->map_page(dev, page, offset, size, direction, NULL);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300223}
224
225static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226 size_t size, int direction)
227{
228 dma_unmap_single(dev, addr, size, direction);
229}
230
Glauber Costa3cb6a912008-03-25 18:36:33 -0300231static inline void
232dma_cache_sync(struct device *dev, void *vaddr, size_t size,
233 enum dma_data_direction dir)
234{
235 flush_write_buffers();
236}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300237
Glauber Costab7107a32008-03-25 18:36:39 -0300238static inline int dma_get_cache_alignment(void)
239{
240 /* no easy way to get cache size on all x86, so return the
241 * maximum possible, to be safe */
242 return boot_cpu_data.x86_clflush_size;
243}
244
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900245static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
246 gfp_t gfp)
247{
248 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300249
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900250 dma_mask = dev->coherent_dma_mask;
251 if (!dma_mask)
252 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
253
254 return dma_mask;
255}
256
257static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
258{
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900261 if (dma_mask <= DMA_24BIT_MASK)
262 gfp |= GFP_DMA;
263#ifdef CONFIG_X86_64
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900264 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
265 gfp |= GFP_DMA32;
266#endif
267 return gfp;
268}
269
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200270static inline void *
271dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
272 gfp_t gfp)
273{
274 struct dma_mapping_ops *ops = get_dma_ops(dev);
275 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300276
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900277 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
278
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200279 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
280 return memory;
281
282 if (!dev) {
283 dev = &x86_dma_fallback_dev;
284 gfp |= GFP_DMA;
285 }
286
FUJITA Tomonori98216262008-09-10 00:49:48 +0900287 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900288 return NULL;
289
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900290 if (!ops->alloc_coherent)
291 return NULL;
292
293 return ops->alloc_coherent(dev, size, dma_handle,
294 dma_alloc_coherent_gfp_flags(dev, gfp));
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200295}
296
297static inline void dma_free_coherent(struct device *dev, size_t size,
298 void *vaddr, dma_addr_t bus)
299{
300 struct dma_mapping_ops *ops = get_dma_ops(dev);
301
302 WARN_ON(irqs_disabled()); /* for portability */
303
304 if (dma_release_from_coherent(dev, get_order(size), vaddr))
305 return;
306
307 if (ops->free_coherent)
308 ops->free_coherent(dev, size, vaddr, bus);
309}
310
Glauber Costa6f536632008-03-25 18:36:20 -0300311#endif