blob: 2277ef7f67834a6b0200d2336c42588f42d65d86 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
9
10#include <asm-generic/dma-coherent.h>
11#include <asm/memory.h>
12
13#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
17#ifndef __arch_pfn_to_dma
18static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
19{
20 return (dma_addr_t)__pfn_to_bus(pfn);
21}
22
23static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
24{
25 return __bus_to_pfn(addr);
26}
27
28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
29{
30 return (void *)__bus_to_virt((unsigned long)addr);
31}
32
33static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
34{
35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
36}
37#else
38static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
39{
40 return __arch_pfn_to_dma(dev, pfn);
41}
42
43static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
44{
45 return __arch_dma_to_pfn(dev, addr);
46}
47
48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_virt(dev, addr);
51}
52
53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
54{
55 return __arch_virt_to_dma(dev, addr);
56}
57#endif
58
59static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
60 enum dma_data_direction dir)
61{
62 extern void ___dma_single_cpu_to_dev(const void *, size_t,
63 enum dma_data_direction);
64
65 if (!arch_is_coherent())
66 ___dma_single_cpu_to_dev(kaddr, size, dir);
67}
68
69static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
70 enum dma_data_direction dir)
71{
72 extern void ___dma_single_dev_to_cpu(const void *, size_t,
73 enum dma_data_direction);
74
75 if (!arch_is_coherent())
76 ___dma_single_dev_to_cpu(kaddr, size, dir);
77}
78
79static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
80 size_t size, enum dma_data_direction dir)
81{
82 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
83 size_t, enum dma_data_direction);
84
85 if (!arch_is_coherent())
86 ___dma_page_cpu_to_dev(page, off, size, dir);
87}
88
89static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
90 size_t size, enum dma_data_direction dir)
91{
92 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
93 size_t, enum dma_data_direction);
94
95 if (!arch_is_coherent())
96 ___dma_page_dev_to_cpu(page, off, size, dir);
97}
98
99extern int dma_supported(struct device *, u64);
100extern int dma_set_mask(struct device *, u64);
101
102static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
103{
104 return dma_addr == ~0;
105}
106
107static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
108 dma_addr_t *handle, gfp_t gfp)
109{
110 return NULL;
111}
112
113static inline void dma_free_noncoherent(struct device *dev, size_t size,
114 void *cpu_addr, dma_addr_t handle)
115{
116}
117
118
119static inline void dma_coherent_pre_ops(void)
120{
121#if COHERENT_IS_NORMAL == 1
122 dmb();
123#else
124 if (arch_is_coherent())
125 dmb();
126 else
127 barrier();
128#endif
129}
130static inline void dma_coherent_post_ops(void)
131{
132#if COHERENT_IS_NORMAL == 1
133 dmb();
134#else
135 if (arch_is_coherent())
136 dmb();
137 else
138 barrier();
139#endif
140}
141
142extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
143
144extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
145
146int dma_mmap_coherent(struct device *, struct vm_area_struct *,
147 void *, dma_addr_t, size_t);
148
149
150extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
151 gfp_t);
152
153#define dma_free_writecombine(dev,size,cpu_addr,handle) \
154 dma_free_coherent(dev,size,cpu_addr,handle)
155
156int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
157 void *, dma_addr_t, size_t);
158
159extern void __init init_consistent_dma_size(unsigned long size);
160
161
162#ifdef CONFIG_DMABOUNCE
163
164extern int dmabounce_register_dev(struct device *, unsigned long,
165 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
166
167extern void dmabounce_unregister_dev(struct device *);
168
169extern dma_addr_t __dma_map_page(struct device *, struct page *,
170 unsigned long, size_t, enum dma_data_direction);
171extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
172 enum dma_data_direction);
173
174int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
175 size_t, enum dma_data_direction);
176int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
177 size_t, enum dma_data_direction);
178#else
179static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
180 unsigned long offset, size_t size, enum dma_data_direction dir)
181{
182 return 1;
183}
184
185static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
186 unsigned long offset, size_t size, enum dma_data_direction dir)
187{
188 return 1;
189}
190
191
192static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
193 unsigned long offset, size_t size, enum dma_data_direction dir)
194{
195 __dma_page_cpu_to_dev(page, offset, size, dir);
196 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
197}
198
199static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
200 size_t size, enum dma_data_direction dir)
201{
202 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
203 handle & ~PAGE_MASK, size, dir);
204}
205#endif
206
207/**
208 * dma_map_single - map a single buffer for streaming DMA
209 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
210 * @cpu_addr: CPU direct mapped address of buffer
211 * @size: size of buffer to map
212 * @dir: DMA transfer direction
213 *
214 * Ensure that any data held in the cache is appropriately discarded
215 * or written back.
216 *
217 * The device owns this memory once this call has completed. The CPU
218 * can regain ownership by calling dma_unmap_single() or
219 * dma_sync_single_for_cpu().
220 */
221static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
222 size_t size, enum dma_data_direction dir)
223{
224 unsigned long offset;
225 struct page *page;
226 dma_addr_t addr;
227
228 BUG_ON(!virt_addr_valid(cpu_addr));
229 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
230 BUG_ON(!valid_dma_direction(dir));
231
232 page = virt_to_page(cpu_addr);
233 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
234 addr = __dma_map_page(dev, page, offset, size, dir);
235 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
236
237 return addr;
238}
239
240/**
241 * dma_cache_pre_ops - clean or invalidate cache before dma transfer is
242 * initiated and perform a barrier operation.
243 * @virtual_addr: A kernel logical or kernel virtual address
244 * @size: size of buffer to map
245 * @dir: DMA transfer direction
246 *
247 * Ensure that any data held in the cache is appropriately discarded
248 * or written back.
249 *
250 */
251static inline void dma_cache_pre_ops(void *virtual_addr,
252 size_t size, enum dma_data_direction dir)
253{
254 extern void ___dma_single_cpu_to_dev(const void *, size_t,
255 enum dma_data_direction);
256
257 BUG_ON(!valid_dma_direction(dir));
258
259 if (!arch_is_coherent())
260 ___dma_single_cpu_to_dev(virtual_addr, size, dir);
261}
262
263/**
264 * dma_cache_post_ops - clean or invalidate cache after dma transfer is
265 * initiated and perform a barrier operation.
266 * @virtual_addr: A kernel logical or kernel virtual address
267 * @size: size of buffer to map
268 * @dir: DMA transfer direction
269 *
270 * Ensure that any data held in the cache is appropriately discarded
271 * or written back.
272 *
273 */
274static inline void dma_cache_post_ops(void *virtual_addr,
275 size_t size, enum dma_data_direction dir)
276{
277 extern void ___dma_single_cpu_to_dev(const void *, size_t,
278 enum dma_data_direction);
279
280 BUG_ON(!valid_dma_direction(dir));
281
282 if (arch_has_speculative_dfetch() && !arch_is_coherent()
283 && dir != DMA_TO_DEVICE)
284 ___dma_single_cpu_to_dev(virtual_addr,
285 size, DMA_FROM_DEVICE);
286}
287
288/**
289 * dma_map_page - map a portion of a page for streaming DMA
290 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
291 * @page: page that buffer resides in
292 * @offset: offset into page for start of buffer
293 * @size: size of buffer to map
294 * @dir: DMA transfer direction
295 *
296 * Ensure that any data held in the cache is appropriately discarded
297 * or written back.
298 *
299 * The device owns this memory once this call has completed. The CPU
300 * can regain ownership by calling dma_unmap_page().
301 */
302static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
303 unsigned long offset, size_t size, enum dma_data_direction dir)
304{
305 dma_addr_t addr;
306
307 BUG_ON(!valid_dma_direction(dir));
308
309 addr = __dma_map_page(dev, page, offset, size, dir);
310 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
311
312 return addr;
313}
314
315static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
316 size_t size, enum dma_data_direction dir)
317{
318 debug_dma_unmap_page(dev, handle, size, dir, true);
319 __dma_unmap_page(dev, handle, size, dir);
320}
321
322static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
323 size_t size, enum dma_data_direction dir)
324{
325 debug_dma_unmap_page(dev, handle, size, dir, false);
326 __dma_unmap_page(dev, handle, size, dir);
327}
328
329static inline void dma_sync_single_range_for_cpu(struct device *dev,
330 dma_addr_t handle, unsigned long offset, size_t size,
331 enum dma_data_direction dir)
332{
333 BUG_ON(!valid_dma_direction(dir));
334
335 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
336
337 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
338 return;
339
340 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
341}
342
343static inline void dma_sync_single_range_for_device(struct device *dev,
344 dma_addr_t handle, unsigned long offset, size_t size,
345 enum dma_data_direction dir)
346{
347 BUG_ON(!valid_dma_direction(dir));
348
349 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
350
351 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
352 return;
353
354 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
355}
356
357static inline void dma_sync_single_for_cpu(struct device *dev,
358 dma_addr_t handle, size_t size, enum dma_data_direction dir)
359{
360 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
361}
362
363static inline void dma_sync_single_for_device(struct device *dev,
364 dma_addr_t handle, size_t size, enum dma_data_direction dir)
365{
366 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
367}
368
369extern int dma_map_sg(struct device *, struct scatterlist *, int,
370 enum dma_data_direction);
371extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
372 enum dma_data_direction);
373extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
374 enum dma_data_direction);
375extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
376 enum dma_data_direction);
377
378
379#endif
380#endif