blob: 1765c379138a319c3e5de611fd3f66466338bae6 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
Becky Bruceec3cf2e2009-05-14 12:42:28 +000018#include <asm/swiotlb.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050019
20#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21
Becky Bruceec3cf2e2009-05-14 12:42:28 +000022/* Some dma direct funcs must be visible for use in other dma_ops */
23extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t flag);
25extern void dma_direct_free_coherent(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
27
28extern unsigned long get_dma_direct_offset(struct device *dev);
29
Anton Blanchard33ff9102007-10-16 14:54:33 -050030#ifdef CONFIG_NOT_COHERENT_CACHE
31/*
32 * DMA-consistent mapping functions for PowerPCs that don't support
33 * cache snooping. These allocate/free a region of uncached mapped
34 * memory space for use with DMA devices. Alternatively, you could
35 * allocate the space "normally" and use the cache management functions
36 * to ensure it is consistent.
37 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100038struct device;
39extern void *__dma_alloc_coherent(struct device *dev, size_t size,
40 dma_addr_t *handle, gfp_t gfp);
Anton Blanchard33ff9102007-10-16 14:54:33 -050041extern void __dma_free_coherent(size_t size, void *vaddr);
42extern void __dma_sync(void *vaddr, size_t size, int direction);
43extern void __dma_sync_page(struct page *page, unsigned long offset,
44 size_t size, int direction);
45
46#else /* ! CONFIG_NOT_COHERENT_CACHE */
47/*
48 * Cache coherent cores.
49 */
50
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100051#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
Anton Blanchard33ff9102007-10-16 14:54:33 -050052#define __dma_free_coherent(size, addr) ((void)0)
53#define __dma_sync(addr, size, rw) ((void)0)
54#define __dma_sync_page(pg, off, sz, rw) ((void)0)
55
56#endif /* ! CONFIG_NOT_COHERENT_CACHE */
57
Mark Nelson3a4c6f02008-07-05 05:05:45 +100058static inline unsigned long device_to_mask(struct device *dev)
59{
60 if (dev->dma_mask && *dev->dma_mask)
61 return *dev->dma_mask;
62 /* Assume devices without mask can take 32 bit addresses */
63 return 0xfffffffful;
64}
65
Anton Blanchard33ff9102007-10-16 14:54:33 -050066/*
67 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
68 */
69struct dma_mapping_ops {
70 void * (*alloc_coherent)(struct device *dev, size_t size,
71 dma_addr_t *dma_handle, gfp_t flag);
72 void (*free_coherent)(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle);
Anton Blanchard33ff9102007-10-16 14:54:33 -050074 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100075 int nents, enum dma_data_direction direction,
76 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050077 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100078 int nents, enum dma_data_direction direction,
79 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050080 int (*dma_supported)(struct device *dev, u64 mask);
81 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
Becky Bruce4fc665b2008-09-12 10:34:46 +000082 dma_addr_t (*map_page)(struct device *dev, struct page *page,
83 unsigned long offset, size_t size,
84 enum dma_data_direction direction,
85 struct dma_attrs *attrs);
86 void (*unmap_page)(struct device *dev,
87 dma_addr_t dma_address, size_t size,
88 enum dma_data_direction direction,
89 struct dma_attrs *attrs);
Becky Bruce15e09c02008-11-20 06:49:16 +000090#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
91 void (*sync_single_range_for_cpu)(struct device *hwdev,
92 dma_addr_t dma_handle, unsigned long offset,
93 size_t size,
94 enum dma_data_direction direction);
95 void (*sync_single_range_for_device)(struct device *hwdev,
96 dma_addr_t dma_handle, unsigned long offset,
97 size_t size,
98 enum dma_data_direction direction);
99 void (*sync_sg_for_cpu)(struct device *hwdev,
100 struct scatterlist *sg, int nelems,
101 enum dma_data_direction direction);
102 void (*sync_sg_for_device)(struct device *hwdev,
103 struct scatterlist *sg, int nelems,
104 enum dma_data_direction direction);
105#endif
Anton Blanchard33ff9102007-10-16 14:54:33 -0500106};
107
Becky Bruce4fc665b2008-09-12 10:34:46 +0000108/*
109 * Available generic sets of operations
110 */
111#ifdef CONFIG_PPC64
112extern struct dma_mapping_ops dma_iommu_ops;
113#endif
114extern struct dma_mapping_ops dma_direct_ops;
115
Anton Blanchard33ff9102007-10-16 14:54:33 -0500116static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
117{
118 /* We don't handle the NULL dev case for ISA for now. We could
119 * do it via an out of line call but it is not needed for now. The
120 * only ISA DMA device we support is the floppy and we have a hack
121 * in the floppy driver directly to get a device for us.
122 */
Kumar Gala4ae0ff62009-03-19 03:40:52 +0000123 if (unlikely(dev == NULL))
Anton Blanchard33ff9102007-10-16 14:54:33 -0500124 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +0000125
Anton Blanchard33ff9102007-10-16 14:54:33 -0500126 return dev->archdata.dma_ops;
127}
128
Michael Ellerman1f62a162008-01-30 01:13:58 +1100129static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
130{
131 dev->archdata.dma_ops = ops;
132}
133
Anton Blanchard33ff9102007-10-16 14:54:33 -0500134static inline int dma_supported(struct device *dev, u64 mask)
135{
136 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
137
138 if (unlikely(dma_ops == NULL))
139 return 0;
140 if (dma_ops->dma_supported == NULL)
141 return 1;
142 return dma_ops->dma_supported(dev, mask);
143}
144
Michael Ellerman84631f32007-12-17 17:35:53 +1100145/* We have our own implementation of pci_set_dma_mask() */
146#define HAVE_ARCH_PCI_SET_DMA_MASK
147
Anton Blanchard33ff9102007-10-16 14:54:33 -0500148static inline int dma_set_mask(struct device *dev, u64 dma_mask)
149{
150 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
151
152 if (unlikely(dma_ops == NULL))
153 return -EIO;
154 if (dma_ops->set_dma_mask != NULL)
155 return dma_ops->set_dma_mask(dev, dma_mask);
156 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
157 return -EIO;
158 *dev->dma_mask = dma_mask;
159 return 0;
160}
161
Becky Bruce4fc665b2008-09-12 10:34:46 +0000162/*
Mark Nelsonc73049f2008-10-27 20:38:14 +0000163 * map_/unmap_single actually call through to map/unmap_page now that all the
164 * dma_mapping_ops have been converted over. We just have to get the page and
165 * offset to pass through to map_page
Becky Bruce4fc665b2008-09-12 10:34:46 +0000166 */
Mark Nelson3affedc2008-07-05 05:05:42 +1000167static inline dma_addr_t dma_map_single_attrs(struct device *dev,
168 void *cpu_addr,
169 size_t size,
170 enum dma_data_direction direction,
171 struct dma_attrs *attrs)
172{
173 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
174
175 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000176
Becky Bruce4fc665b2008-09-12 10:34:46 +0000177 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
178 (unsigned long)cpu_addr % PAGE_SIZE, size,
179 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000180}
181
182static inline void dma_unmap_single_attrs(struct device *dev,
183 dma_addr_t dma_addr,
184 size_t size,
185 enum dma_data_direction direction,
186 struct dma_attrs *attrs)
187{
188 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
189
190 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000191
Becky Bruce4fc665b2008-09-12 10:34:46 +0000192 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000193}
194
195static inline dma_addr_t dma_map_page_attrs(struct device *dev,
196 struct page *page,
197 unsigned long offset, size_t size,
198 enum dma_data_direction direction,
199 struct dma_attrs *attrs)
200{
201 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
202
203 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000204
Mark Nelsonc73049f2008-10-27 20:38:14 +0000205 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000206}
207
208static inline void dma_unmap_page_attrs(struct device *dev,
209 dma_addr_t dma_address,
210 size_t size,
211 enum dma_data_direction direction,
212 struct dma_attrs *attrs)
213{
214 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
215
216 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000217
Mark Nelsonc73049f2008-10-27 20:38:14 +0000218 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000219}
220
221static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
222 int nents, enum dma_data_direction direction,
223 struct dma_attrs *attrs)
224{
225 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
226
227 BUG_ON(!dma_ops);
228 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
229}
230
231static inline void dma_unmap_sg_attrs(struct device *dev,
232 struct scatterlist *sg,
233 int nhwentries,
234 enum dma_data_direction direction,
235 struct dma_attrs *attrs)
236{
237 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
238
239 BUG_ON(!dma_ops);
240 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
241}
242
Anton Blanchard33ff9102007-10-16 14:54:33 -0500243static inline void *dma_alloc_coherent(struct device *dev, size_t size,
244 dma_addr_t *dma_handle, gfp_t flag)
245{
246 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
247
248 BUG_ON(!dma_ops);
249 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
250}
251
252static inline void dma_free_coherent(struct device *dev, size_t size,
253 void *cpu_addr, dma_addr_t dma_handle)
254{
255 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
256
257 BUG_ON(!dma_ops);
258 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
259}
260
261static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
262 size_t size,
263 enum dma_data_direction direction)
264{
Mark Nelson3affedc2008-07-05 05:05:42 +1000265 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500266}
267
268static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
269 size_t size,
270 enum dma_data_direction direction)
271{
Mark Nelson3affedc2008-07-05 05:05:42 +1000272 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500273}
274
275static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
276 unsigned long offset, size_t size,
277 enum dma_data_direction direction)
278{
Mark Nelson3affedc2008-07-05 05:05:42 +1000279 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500280}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100281
282static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
283 size_t size,
284 enum dma_data_direction direction)
285{
Mark Nelson3affedc2008-07-05 05:05:42 +1000286 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100287}
288
289static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
290 int nents, enum dma_data_direction direction)
291{
Mark Nelson3affedc2008-07-05 05:05:42 +1000292 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100293}
294
295static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
296 int nhwentries,
297 enum dma_data_direction direction)
298{
Mark Nelson3affedc2008-07-05 05:05:42 +1000299 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100300}
301
Becky Bruce15e09c02008-11-20 06:49:16 +0000302#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
Stephen Rothwell78b09732005-11-19 01:40:46 +1100303static inline void dma_sync_single_for_cpu(struct device *dev,
304 dma_addr_t dma_handle, size_t size,
305 enum dma_data_direction direction)
306{
Becky Bruce15e09c02008-11-20 06:49:16 +0000307 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
308
309 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000310
311 if (dma_ops->sync_single_range_for_cpu)
312 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
Becky Bruce15e09c02008-11-20 06:49:16 +0000313 size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100314}
315
316static inline void dma_sync_single_for_device(struct device *dev,
317 dma_addr_t dma_handle, size_t size,
318 enum dma_data_direction direction)
319{
Becky Bruce15e09c02008-11-20 06:49:16 +0000320 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
321
322 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000323
324 if (dma_ops->sync_single_range_for_device)
325 dma_ops->sync_single_range_for_device(dev, dma_handle,
Becky Bruce15e09c02008-11-20 06:49:16 +0000326 0, size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100327}
328
329static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200330 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100331 enum dma_data_direction direction)
332{
Becky Bruce15e09c02008-11-20 06:49:16 +0000333 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100334
Becky Bruce15e09c02008-11-20 06:49:16 +0000335 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000336
337 if (dma_ops->sync_sg_for_cpu)
338 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100339}
340
341static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200342 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100343 enum dma_data_direction direction)
344{
Becky Bruce15e09c02008-11-20 06:49:16 +0000345 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100346
Becky Bruce15e09c02008-11-20 06:49:16 +0000347 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000348
349 if (dma_ops->sync_sg_for_device)
350 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100351}
352
Becky Bruce15e09c02008-11-20 06:49:16 +0000353static inline void dma_sync_single_range_for_cpu(struct device *dev,
354 dma_addr_t dma_handle, unsigned long offset, size_t size,
355 enum dma_data_direction direction)
356{
357 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
358
359 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000360
361 if (dma_ops->sync_single_range_for_cpu)
362 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
Becky Bruce15e09c02008-11-20 06:49:16 +0000363 offset, size, direction);
364}
365
366static inline void dma_sync_single_range_for_device(struct device *dev,
367 dma_addr_t dma_handle, unsigned long offset, size_t size,
368 enum dma_data_direction direction)
369{
370 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
371
372 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000373
374 if (dma_ops->sync_single_range_for_device)
375 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
Becky Bruce15e09c02008-11-20 06:49:16 +0000376 size, direction);
377}
378#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
Becky Bruce0efbb572008-12-04 08:12:40 +0000379static inline void dma_sync_single_for_cpu(struct device *dev,
380 dma_addr_t dma_handle, size_t size,
381 enum dma_data_direction direction)
382{
383}
384
385static inline void dma_sync_single_for_device(struct device *dev,
386 dma_addr_t dma_handle, size_t size,
387 enum dma_data_direction direction)
388{
389}
390
391static inline void dma_sync_sg_for_cpu(struct device *dev,
392 struct scatterlist *sgl, int nents,
393 enum dma_data_direction direction)
394{
395}
396
397static inline void dma_sync_sg_for_device(struct device *dev,
398 struct scatterlist *sgl, int nents,
399 enum dma_data_direction direction)
400{
401}
402
403static inline void dma_sync_single_range_for_cpu(struct device *dev,
404 dma_addr_t dma_handle, unsigned long offset, size_t size,
405 enum dma_data_direction direction)
406{
407}
408
409static inline void dma_sync_single_range_for_device(struct device *dev,
410 dma_addr_t dma_handle, unsigned long offset, size_t size,
411 enum dma_data_direction direction)
412{
413}
Becky Bruce15e09c02008-11-20 06:49:16 +0000414#endif
415
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700416static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100417{
418#ifdef CONFIG_PPC64
419 return (dma_addr == DMA_ERROR_CODE);
420#else
421 return 0;
422#endif
423}
424
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900425static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
426{
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000427#ifdef CONFIG_SWIOTLB
428 struct dev_archdata *sd = &dev->archdata;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900429
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000430 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900431 return 0;
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000432#endif
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900433
434 if (!dev->dma_mask)
435 return 0;
436
437 return addr + size <= *dev->dma_mask;
438}
439
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900440static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
441{
442 return paddr + get_dma_direct_offset(dev);
443}
444
445static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
446{
447 return daddr - get_dma_direct_offset(dev);
448}
449
Stephen Rothwell78b09732005-11-19 01:40:46 +1100450#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
451#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
452#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800453#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100454#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800455#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100456#endif
457
458static inline int dma_get_cache_alignment(void)
459{
460#ifdef CONFIG_PPC64
461 /* no easy way to get cache size on all processors, so return
462 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800463 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100464#else
465 /*
466 * Each processor family will define its own L1_CACHE_SHIFT,
467 * L1_CACHE_BYTES wraps to this, so this is always safe.
468 */
469 return L1_CACHE_BYTES;
470#endif
471}
472
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800473static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100474 enum dma_data_direction direction)
475{
476 BUG_ON(direction == DMA_NONE);
477 __dma_sync(vaddr, size, (int)direction);
478}
479
Arnd Bergmann88ced032005-12-16 22:43:46 +0100480#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100481#endif /* _ASM_DMA_MAPPING_H */