blob: d57d0c1857bcff13cdf7df27e4471abb561cbcd0 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Randy Dunlap5872fb92009-01-29 16:28:02 -08005 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
Vegard Nossumd7002852008-07-20 10:44:54 +02009#include <linux/kmemcheck.h>
Glauber Costa6f536632008-03-25 18:36:20 -030010#include <linux/scatterlist.h>
Joerg Roedel2118d0c2009-01-09 15:13:15 +010011#include <linux/dma-debug.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090012#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013#include <asm/io.h>
14#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020015#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030016
Glauber Costa7c183412008-03-25 18:36:36 -030017extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030018extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020019extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030020extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030021
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090022extern struct dma_map_ops *dma_ops;
Glauber Costa6f536632008-03-25 18:36:20 -030023
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090024static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030025{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070026#ifdef CONFIG_X86_32
27 return dma_ops;
28#else
29 if (unlikely(!dev) || !dev->archdata.dma_ops)
30 return dma_ops;
31 else
32 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080033#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070034}
35
36/* Make sure we keep the same behaviour */
37static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
38{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090039 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070040 if (ops->mapping_error)
41 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030042
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010043 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030044}
45
Glauber Costa8d396de2008-03-25 18:36:31 -030046#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020048#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030049
Glauber Costa802c1f62008-03-25 18:36:34 -030050extern int dma_supported(struct device *hwdev, u64 mask);
51extern int dma_set_mask(struct device *dev, u64 mask);
52
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090053extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54 dma_addr_t *dma_addr, gfp_t flag);
55
Glauber Costa22456b92008-03-25 18:36:21 -030056static inline dma_addr_t
57dma_map_single(struct device *hwdev, void *ptr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090058 enum dma_data_direction dir)
Glauber Costa22456b92008-03-25 18:36:21 -030059{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090060 struct dma_map_ops *ops = get_dma_ops(hwdev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010061 dma_addr_t addr;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070062
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090063 BUG_ON(!valid_dma_direction(dir));
Vegard Nossumd7002852008-07-20 10:44:54 +020064 kmemcheck_mark_initialized(ptr, size);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010065 addr = ops->map_page(hwdev, virt_to_page(ptr),
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090066 (unsigned long)ptr & ~PAGE_MASK, size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090067 dir, NULL);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010068 debug_dma_map_page(hwdev, virt_to_page(ptr),
69 (unsigned long)ptr & ~PAGE_MASK, size,
70 dir, addr, true);
71 return addr;
Glauber Costa22456b92008-03-25 18:36:21 -030072}
73
Glauber Costa0cb0ae62008-03-25 18:36:22 -030074static inline void
75dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090076 enum dma_data_direction dir)
Glauber Costa0cb0ae62008-03-25 18:36:22 -030077{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090078 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070079
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090080 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090081 if (ops->unmap_page)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090082 ops->unmap_page(dev, addr, size, dir, NULL);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010083 debug_dma_unmap_page(dev, addr, size, dir, true);
Glauber Costa0cb0ae62008-03-25 18:36:22 -030084}
85
Glauber Costa16a3ce92008-03-25 18:36:23 -030086static inline int
87dma_map_sg(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090088 int nents, enum dma_data_direction dir)
Glauber Costa16a3ce92008-03-25 18:36:23 -030089{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090090 struct dma_map_ops *ops = get_dma_ops(hwdev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010091 int ents;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070092
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090093 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +010094 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
95 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
96
97 return ents;
Glauber Costa16a3ce92008-03-25 18:36:23 -030098}
Glauber Costa72c784f2008-03-25 18:36:24 -030099
100static inline void
101dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900102 enum dma_data_direction dir)
Glauber Costa72c784f2008-03-25 18:36:24 -0300103{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900104 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700105
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900106 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100107 debug_dma_unmap_sg(hwdev, sg, nents, dir);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700108 if (ops->unmap_sg)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900109 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
Glauber Costa72c784f2008-03-25 18:36:24 -0300110}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300111
112static inline void
113dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900114 size_t size, enum dma_data_direction dir)
Glauber Costac01dd8c2008-03-25 18:36:25 -0300115{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900116 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700117
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900118 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700119 if (ops->sync_single_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900120 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100121 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300122 flush_write_buffers();
123}
124
Glauber Costa9231b262008-03-25 18:36:26 -0300125static inline void
126dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900127 size_t size, enum dma_data_direction dir)
Glauber Costa9231b262008-03-25 18:36:26 -0300128{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900129 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700130
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900131 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700132 if (ops->sync_single_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900133 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100134 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
Glauber Costa9231b262008-03-25 18:36:26 -0300135 flush_write_buffers();
136}
137
Glauber Costa627610f2008-03-25 18:36:27 -0300138static inline void
139dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900140 unsigned long offset, size_t size,
141 enum dma_data_direction dir)
Glauber Costa627610f2008-03-25 18:36:27 -0300142{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900143 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300144
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900145 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700146 if (ops->sync_single_range_for_cpu)
147 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900148 size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100149 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
150 offset, size, dir);
Glauber Costa627610f2008-03-25 18:36:27 -0300151 flush_write_buffers();
152}
Glauber Costa71362332008-03-25 18:36:28 -0300153
154static inline void
155dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
156 unsigned long offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900157 enum dma_data_direction dir)
Glauber Costa71362332008-03-25 18:36:28 -0300158{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900159 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300160
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900161 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700162 if (ops->sync_single_range_for_device)
163 ops->sync_single_range_for_device(hwdev, dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900164 offset, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100165 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
166 offset, size, dir);
Glauber Costa71362332008-03-25 18:36:28 -0300167 flush_write_buffers();
168}
169
Glauber Costaed435de2008-03-25 18:36:29 -0300170static inline void
171dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900172 int nelems, enum dma_data_direction dir)
Glauber Costaed435de2008-03-25 18:36:29 -0300173{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900174 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700175
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900176 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700177 if (ops->sync_sg_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900178 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100179 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
Glauber Costaed435de2008-03-25 18:36:29 -0300180 flush_write_buffers();
181}
Glauber Costae7f3a912008-03-25 18:36:30 -0300182
183static inline void
184dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900185 int nelems, enum dma_data_direction dir)
Glauber Costae7f3a912008-03-25 18:36:30 -0300186{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900187 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700188
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900189 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700190 if (ops->sync_sg_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900191 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100192 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
Glauber Costae7f3a912008-03-25 18:36:30 -0300193
194 flush_write_buffers();
195}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300196
197static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
198 size_t offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900199 enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300200{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900201 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100202 dma_addr_t addr;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700203
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900204 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100205 addr = ops->map_page(dev, page, offset, size, dir, NULL);
206 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
207
208 return addr;
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300209}
210
211static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900212 size_t size, enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300213{
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100214 struct dma_map_ops *ops = get_dma_ops(dev);
215
216 BUG_ON(!valid_dma_direction(dir));
217 if (ops->unmap_page)
218 ops->unmap_page(dev, addr, size, dir, NULL);
219 debug_dma_unmap_page(dev, addr, size, dir, false);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300220}
221
Glauber Costa3cb6a912008-03-25 18:36:33 -0300222static inline void
223dma_cache_sync(struct device *dev, void *vaddr, size_t size,
224 enum dma_data_direction dir)
225{
226 flush_write_buffers();
227}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300228
Glauber Costab7107a32008-03-25 18:36:39 -0300229static inline int dma_get_cache_alignment(void)
230{
231 /* no easy way to get cache size on all x86, so return the
232 * maximum possible, to be safe */
233 return boot_cpu_data.x86_clflush_size;
234}
235
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900236static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
237 gfp_t gfp)
238{
239 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300240
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900241 dma_mask = dev->coherent_dma_mask;
242 if (!dma_mask)
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700243 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900244
245 return dma_mask;
246}
247
248static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
249{
250 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
251
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700252 if (dma_mask <= DMA_BIT_MASK(24))
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900253 gfp |= GFP_DMA;
254#ifdef CONFIG_X86_64
Yang Hongyang284901a2009-04-06 19:01:15 -0700255 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900256 gfp |= GFP_DMA32;
257#endif
258 return gfp;
259}
260
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200261static inline void *
262dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
263 gfp_t gfp)
264{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900265 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200266 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300267
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900268 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
269
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200270 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
271 return memory;
272
273 if (!dev) {
274 dev = &x86_dma_fallback_dev;
275 gfp |= GFP_DMA;
276 }
277
FUJITA Tomonori98216262008-09-10 00:49:48 +0900278 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900279 return NULL;
280
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900281 if (!ops->alloc_coherent)
282 return NULL;
283
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100284 memory = ops->alloc_coherent(dev, size, dma_handle,
285 dma_alloc_coherent_gfp_flags(dev, gfp));
286 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
287
288 return memory;
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200289}
290
291static inline void dma_free_coherent(struct device *dev, size_t size,
292 void *vaddr, dma_addr_t bus)
293{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900294 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200295
296 WARN_ON(irqs_disabled()); /* for portability */
297
298 if (dma_release_from_coherent(dev, get_order(size), vaddr))
299 return;
300
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100301 debug_dma_free_coherent(dev, size, vaddr, bus);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200302 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus);
304}
305
Glauber Costa6f536632008-03-25 18:36:20 -0300306#endif