blob: b9a8f18f35a2f24fd6991eda77b15519ea352493 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
Paul Mundt73c926b2009-10-20 12:55:56 +09004extern struct dma_map_ops *dma_ops;
5extern void no_iommu_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
Paul Mundt73c926b2009-10-20 12:55:56 +09007static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8{
9 return dma_ops;
10}
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Paul Mundt73c926b2009-10-20 12:55:56 +090012static inline int dma_supported(struct device *dev, u64 mask)
13{
14 struct dma_map_ops *ops = get_dma_ops(dev);
15
16 if (ops->dma_supported)
17 return ops->dma_supported(dev, mask);
18
19 return 1;
20}
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22static inline int dma_set_mask(struct device *dev, u64 mask)
23{
Paul Mundt73c926b2009-10-20 12:55:56 +090024 struct dma_map_ops *ops = get_dma_ops(dev);
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 if (!dev->dma_mask || !dma_supported(dev, mask))
27 return -EIO;
Paul Mundt73c926b2009-10-20 12:55:56 +090028 if (ops->set_dma_mask)
29 return ops->set_dma_mask(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31 *dev->dma_mask = mask;
32
33 return 0;
34}
35
Magnus Dammf93e97e2008-01-24 18:35:10 +090036void *dma_alloc_coherent(struct device *dev, size_t size,
37 dma_addr_t *dma_handle, gfp_t flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Magnus Dammf93e97e2008-01-24 18:35:10 +090039void dma_free_coherent(struct device *dev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Magnus Dammf93e97e2008-01-24 18:35:10 +090042void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
43 enum dma_data_direction dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Paul Mundtc7666e72007-02-13 11:11:22 +090045#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
46#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
47#define dma_is_consistent(d, h) (1)
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static inline int dma_get_cache_alignment(void)
50{
51 /*
52 * Each processor family will define its own L1_CACHE_SHIFT,
53 * L1_CACHE_BYTES wraps to this, so this is always safe.
54 */
55 return L1_CACHE_BYTES;
56}
57
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070058static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Paul Mundt73c926b2009-10-20 12:55:56 +090060 struct dma_map_ops *ops = get_dma_ops(dev);
61
62 if (ops->mapping_error)
63 return ops->mapping_error(dev, dma_addr);
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 return dma_addr == 0;
66}
Magnus Dammf93e97e2008-01-24 18:35:10 +090067
Paul Mundt73c926b2009-10-20 12:55:56 +090068#include <asm-generic/dma-coherent.h>
69#include <asm-generic/dma-mapping-common.h>
Magnus Dammf93e97e2008-01-24 18:35:10 +090070
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#endif /* __ASM_SH_DMA_MAPPING_H */