blob: 8ca2b5183c56c118e25690b927b59bdc0c3b96f8 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
Becky Bruceec3cf2e2009-05-14 12:42:28 +000018#include <asm/swiotlb.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050019
20#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21
Becky Bruceec3cf2e2009-05-14 12:42:28 +000022/* Some dma direct funcs must be visible for use in other dma_ops */
23extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t flag);
25extern void dma_direct_free_coherent(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
27
28extern unsigned long get_dma_direct_offset(struct device *dev);
29
Anton Blanchard33ff9102007-10-16 14:54:33 -050030#ifdef CONFIG_NOT_COHERENT_CACHE
31/*
32 * DMA-consistent mapping functions for PowerPCs that don't support
33 * cache snooping. These allocate/free a region of uncached mapped
34 * memory space for use with DMA devices. Alternatively, you could
35 * allocate the space "normally" and use the cache management functions
36 * to ensure it is consistent.
37 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100038struct device;
39extern void *__dma_alloc_coherent(struct device *dev, size_t size,
40 dma_addr_t *handle, gfp_t gfp);
Anton Blanchard33ff9102007-10-16 14:54:33 -050041extern void __dma_free_coherent(size_t size, void *vaddr);
42extern void __dma_sync(void *vaddr, size_t size, int direction);
43extern void __dma_sync_page(struct page *page, unsigned long offset,
44 size_t size, int direction);
45
46#else /* ! CONFIG_NOT_COHERENT_CACHE */
47/*
48 * Cache coherent cores.
49 */
50
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100051#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
Anton Blanchard33ff9102007-10-16 14:54:33 -050052#define __dma_free_coherent(size, addr) ((void)0)
53#define __dma_sync(addr, size, rw) ((void)0)
54#define __dma_sync_page(pg, off, sz, rw) ((void)0)
55
56#endif /* ! CONFIG_NOT_COHERENT_CACHE */
57
Mark Nelson3a4c6f02008-07-05 05:05:45 +100058static inline unsigned long device_to_mask(struct device *dev)
59{
60 if (dev->dma_mask && *dev->dma_mask)
61 return *dev->dma_mask;
62 /* Assume devices without mask can take 32 bit addresses */
63 return 0xfffffffful;
64}
65
Anton Blanchard33ff9102007-10-16 14:54:33 -050066/*
Becky Bruce4fc665b2008-09-12 10:34:46 +000067 * Available generic sets of operations
68 */
69#ifdef CONFIG_PPC64
FUJITA Tomonori45223c52009-08-04 19:08:25 +000070extern struct dma_map_ops dma_iommu_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000071#endif
FUJITA Tomonori45223c52009-08-04 19:08:25 +000072extern struct dma_map_ops dma_direct_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000073
FUJITA Tomonori45223c52009-08-04 19:08:25 +000074static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Anton Blanchard33ff9102007-10-16 14:54:33 -050075{
76 /* We don't handle the NULL dev case for ISA for now. We could
77 * do it via an out of line call but it is not needed for now. The
78 * only ISA DMA device we support is the floppy and we have a hack
79 * in the floppy driver directly to get a device for us.
80 */
Kumar Gala4ae0ff62009-03-19 03:40:52 +000081 if (unlikely(dev == NULL))
Anton Blanchard33ff9102007-10-16 14:54:33 -050082 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +000083
Anton Blanchard33ff9102007-10-16 14:54:33 -050084 return dev->archdata.dma_ops;
85}
86
FUJITA Tomonori45223c52009-08-04 19:08:25 +000087static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
Michael Ellerman1f62a162008-01-30 01:13:58 +110088{
89 dev->archdata.dma_ops = ops;
90}
91
Anton Blanchard33ff9102007-10-16 14:54:33 -050092static inline int dma_supported(struct device *dev, u64 mask)
93{
FUJITA Tomonori45223c52009-08-04 19:08:25 +000094 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -050095
96 if (unlikely(dma_ops == NULL))
97 return 0;
98 if (dma_ops->dma_supported == NULL)
99 return 1;
100 return dma_ops->dma_supported(dev, mask);
101}
102
Michael Ellerman84631f32007-12-17 17:35:53 +1100103/* We have our own implementation of pci_set_dma_mask() */
104#define HAVE_ARCH_PCI_SET_DMA_MASK
105
Anton Blanchard33ff9102007-10-16 14:54:33 -0500106static inline int dma_set_mask(struct device *dev, u64 dma_mask)
107{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000108 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500109
110 if (unlikely(dma_ops == NULL))
111 return -EIO;
112 if (dma_ops->set_dma_mask != NULL)
113 return dma_ops->set_dma_mask(dev, dma_mask);
114 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
115 return -EIO;
116 *dev->dma_mask = dma_mask;
117 return 0;
118}
119
Becky Bruce4fc665b2008-09-12 10:34:46 +0000120/*
Mark Nelsonc73049f2008-10-27 20:38:14 +0000121 * map_/unmap_single actually call through to map/unmap_page now that all the
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000122 * dma_map_ops have been converted over. We just have to get the page and
Mark Nelsonc73049f2008-10-27 20:38:14 +0000123 * offset to pass through to map_page
Becky Bruce4fc665b2008-09-12 10:34:46 +0000124 */
Mark Nelson3affedc2008-07-05 05:05:42 +1000125static inline dma_addr_t dma_map_single_attrs(struct device *dev,
126 void *cpu_addr,
127 size_t size,
128 enum dma_data_direction direction,
129 struct dma_attrs *attrs)
130{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000131 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Mark Nelson3affedc2008-07-05 05:05:42 +1000132
133 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000134
Becky Bruce4fc665b2008-09-12 10:34:46 +0000135 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
136 (unsigned long)cpu_addr % PAGE_SIZE, size,
137 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000138}
139
140static inline void dma_unmap_single_attrs(struct device *dev,
141 dma_addr_t dma_addr,
142 size_t size,
143 enum dma_data_direction direction,
144 struct dma_attrs *attrs)
145{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000146 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Mark Nelson3affedc2008-07-05 05:05:42 +1000147
148 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000149
Becky Bruce4fc665b2008-09-12 10:34:46 +0000150 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000151}
152
153static inline dma_addr_t dma_map_page_attrs(struct device *dev,
154 struct page *page,
155 unsigned long offset, size_t size,
156 enum dma_data_direction direction,
157 struct dma_attrs *attrs)
158{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000159 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Mark Nelson3affedc2008-07-05 05:05:42 +1000160
161 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000162
Mark Nelsonc73049f2008-10-27 20:38:14 +0000163 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000164}
165
166static inline void dma_unmap_page_attrs(struct device *dev,
167 dma_addr_t dma_address,
168 size_t size,
169 enum dma_data_direction direction,
170 struct dma_attrs *attrs)
171{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000172 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Mark Nelson3affedc2008-07-05 05:05:42 +1000173
174 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000175
Mark Nelsonc73049f2008-10-27 20:38:14 +0000176 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000177}
178
179static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
180 int nents, enum dma_data_direction direction,
181 struct dma_attrs *attrs)
182{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000183 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Mark Nelson3affedc2008-07-05 05:05:42 +1000184
185 BUG_ON(!dma_ops);
186 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
187}
188
189static inline void dma_unmap_sg_attrs(struct device *dev,
190 struct scatterlist *sg,
191 int nhwentries,
192 enum dma_data_direction direction,
193 struct dma_attrs *attrs)
194{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000195 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Mark Nelson3affedc2008-07-05 05:05:42 +1000196
197 BUG_ON(!dma_ops);
198 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
199}
200
Anton Blanchard33ff9102007-10-16 14:54:33 -0500201static inline void *dma_alloc_coherent(struct device *dev, size_t size,
202 dma_addr_t *dma_handle, gfp_t flag)
203{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000204 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500205
206 BUG_ON(!dma_ops);
207 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
208}
209
210static inline void dma_free_coherent(struct device *dev, size_t size,
211 void *cpu_addr, dma_addr_t dma_handle)
212{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000213 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500214
215 BUG_ON(!dma_ops);
216 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
217}
218
219static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
220 size_t size,
221 enum dma_data_direction direction)
222{
Mark Nelson3affedc2008-07-05 05:05:42 +1000223 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500224}
225
226static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
227 size_t size,
228 enum dma_data_direction direction)
229{
Mark Nelson3affedc2008-07-05 05:05:42 +1000230 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500231}
232
233static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t size,
235 enum dma_data_direction direction)
236{
Mark Nelson3affedc2008-07-05 05:05:42 +1000237 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500238}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100239
240static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
241 size_t size,
242 enum dma_data_direction direction)
243{
Mark Nelson3affedc2008-07-05 05:05:42 +1000244 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100245}
246
247static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
248 int nents, enum dma_data_direction direction)
249{
Mark Nelson3affedc2008-07-05 05:05:42 +1000250 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100251}
252
253static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
254 int nhwentries,
255 enum dma_data_direction direction)
256{
Mark Nelson3affedc2008-07-05 05:05:42 +1000257 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100258}
259
Becky Bruce15e09c02008-11-20 06:49:16 +0000260#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
Stephen Rothwell78b09732005-11-19 01:40:46 +1100261static inline void dma_sync_single_for_cpu(struct device *dev,
262 dma_addr_t dma_handle, size_t size,
263 enum dma_data_direction direction)
264{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000265 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Becky Bruce15e09c02008-11-20 06:49:16 +0000266
267 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000268
269 if (dma_ops->sync_single_range_for_cpu)
270 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
Becky Bruce15e09c02008-11-20 06:49:16 +0000271 size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100272}
273
274static inline void dma_sync_single_for_device(struct device *dev,
275 dma_addr_t dma_handle, size_t size,
276 enum dma_data_direction direction)
277{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000278 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Becky Bruce15e09c02008-11-20 06:49:16 +0000279
280 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000281
282 if (dma_ops->sync_single_range_for_device)
283 dma_ops->sync_single_range_for_device(dev, dma_handle,
Becky Bruce15e09c02008-11-20 06:49:16 +0000284 0, size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100285}
286
287static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200288 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100289 enum dma_data_direction direction)
290{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000291 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100292
Becky Bruce15e09c02008-11-20 06:49:16 +0000293 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000294
295 if (dma_ops->sync_sg_for_cpu)
296 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100297}
298
299static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200300 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100301 enum dma_data_direction direction)
302{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000303 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100304
Becky Bruce15e09c02008-11-20 06:49:16 +0000305 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000306
307 if (dma_ops->sync_sg_for_device)
308 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100309}
310
Becky Bruce15e09c02008-11-20 06:49:16 +0000311static inline void dma_sync_single_range_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, unsigned long offset, size_t size,
313 enum dma_data_direction direction)
314{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000315 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Becky Bruce15e09c02008-11-20 06:49:16 +0000316
317 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000318
319 if (dma_ops->sync_single_range_for_cpu)
320 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
Becky Bruce15e09c02008-11-20 06:49:16 +0000321 offset, size, direction);
322}
323
324static inline void dma_sync_single_range_for_device(struct device *dev,
325 dma_addr_t dma_handle, unsigned long offset, size_t size,
326 enum dma_data_direction direction)
327{
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000328 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Becky Bruce15e09c02008-11-20 06:49:16 +0000329
330 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000331
332 if (dma_ops->sync_single_range_for_device)
333 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
Becky Bruce15e09c02008-11-20 06:49:16 +0000334 size, direction);
335}
336#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
Becky Bruce0efbb572008-12-04 08:12:40 +0000337static inline void dma_sync_single_for_cpu(struct device *dev,
338 dma_addr_t dma_handle, size_t size,
339 enum dma_data_direction direction)
340{
341}
342
343static inline void dma_sync_single_for_device(struct device *dev,
344 dma_addr_t dma_handle, size_t size,
345 enum dma_data_direction direction)
346{
347}
348
349static inline void dma_sync_sg_for_cpu(struct device *dev,
350 struct scatterlist *sgl, int nents,
351 enum dma_data_direction direction)
352{
353}
354
355static inline void dma_sync_sg_for_device(struct device *dev,
356 struct scatterlist *sgl, int nents,
357 enum dma_data_direction direction)
358{
359}
360
361static inline void dma_sync_single_range_for_cpu(struct device *dev,
362 dma_addr_t dma_handle, unsigned long offset, size_t size,
363 enum dma_data_direction direction)
364{
365}
366
367static inline void dma_sync_single_range_for_device(struct device *dev,
368 dma_addr_t dma_handle, unsigned long offset, size_t size,
369 enum dma_data_direction direction)
370{
371}
Becky Bruce15e09c02008-11-20 06:49:16 +0000372#endif
373
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700374static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100375{
376#ifdef CONFIG_PPC64
377 return (dma_addr == DMA_ERROR_CODE);
378#else
379 return 0;
380#endif
381}
382
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900383static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
384{
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000385#ifdef CONFIG_SWIOTLB
386 struct dev_archdata *sd = &dev->archdata;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900387
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000388 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900389 return 0;
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000390#endif
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900391
392 if (!dev->dma_mask)
393 return 0;
394
395 return addr + size <= *dev->dma_mask;
396}
397
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900398static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
399{
400 return paddr + get_dma_direct_offset(dev);
401}
402
403static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
404{
405 return daddr - get_dma_direct_offset(dev);
406}
407
Stephen Rothwell78b09732005-11-19 01:40:46 +1100408#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
409#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
410#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800411#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100412#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800413#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100414#endif
415
416static inline int dma_get_cache_alignment(void)
417{
418#ifdef CONFIG_PPC64
419 /* no easy way to get cache size on all processors, so return
420 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800421 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100422#else
423 /*
424 * Each processor family will define its own L1_CACHE_SHIFT,
425 * L1_CACHE_BYTES wraps to this, so this is always safe.
426 */
427 return L1_CACHE_BYTES;
428#endif
429}
430
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800431static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100432 enum dma_data_direction direction)
433{
434 BUG_ON(direction == DMA_NONE);
435 __dma_sync(vaddr, size, (int)direction);
436}
437
Arnd Bergmann88ced032005-12-16 22:43:46 +0100438#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100439#endif /* _ASM_DMA_MAPPING_H */