blob: 653076018df08d3b4aead0d755eb8eac4dbf3912 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
Paul Mundt73c926b2009-10-20 12:55:56 +09004extern struct dma_map_ops *dma_ops;
5extern void no_iommu_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
Paul Mundt73c926b2009-10-20 12:55:56 +09007static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8{
9 return dma_ops;
10}
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Paul Mundtf32154c2009-10-26 09:50:51 +090012#include <asm-generic/dma-coherent.h>
13#include <asm-generic/dma-mapping-common.h>
14
Paul Mundt73c926b2009-10-20 12:55:56 +090015static inline int dma_supported(struct device *dev, u64 mask)
16{
17 struct dma_map_ops *ops = get_dma_ops(dev);
18
19 if (ops->dma_supported)
20 return ops->dma_supported(dev, mask);
21
22 return 1;
23}
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25static inline int dma_set_mask(struct device *dev, u64 mask)
26{
Paul Mundt73c926b2009-10-20 12:55:56 +090027 struct dma_map_ops *ops = get_dma_ops(dev);
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 if (!dev->dma_mask || !dma_supported(dev, mask))
30 return -EIO;
Paul Mundt73c926b2009-10-20 12:55:56 +090031 if (ops->set_dma_mask)
32 return ops->set_dma_mask(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34 *dev->dma_mask = mask;
35
36 return 0;
37}
38
Magnus Dammf93e97e2008-01-24 18:35:10 +090039void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
40 enum dma_data_direction dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Paul Mundtc7666e72007-02-13 11:11:22 +090042#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
43#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
44#define dma_is_consistent(d, h) (1)
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int dma_get_cache_alignment(void)
47{
48 /*
49 * Each processor family will define its own L1_CACHE_SHIFT,
50 * L1_CACHE_BYTES wraps to this, so this is always safe.
51 */
52 return L1_CACHE_BYTES;
53}
54
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070055static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Paul Mundt73c926b2009-10-20 12:55:56 +090057 struct dma_map_ops *ops = get_dma_ops(dev);
58
59 if (ops->mapping_error)
60 return ops->mapping_error(dev, dma_addr);
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return dma_addr == 0;
63}
Magnus Dammf93e97e2008-01-24 18:35:10 +090064
Paul Mundtf32154c2009-10-26 09:50:51 +090065static inline void *dma_alloc_coherent(struct device *dev, size_t size,
66 dma_addr_t *dma_handle, gfp_t gfp)
67{
68 struct dma_map_ops *ops = get_dma_ops(dev);
69 void *memory;
70
71 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
72 return memory;
73 if (!ops->alloc_coherent)
74 return NULL;
75
76 memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
77 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
78
79 return memory;
80}
81
82static inline void dma_free_coherent(struct device *dev, size_t size,
83 void *vaddr, dma_addr_t dma_handle)
84{
85 struct dma_map_ops *ops = get_dma_ops(dev);
86
87 WARN_ON(irqs_disabled()); /* for portability */
88
89 if (dma_release_from_coherent(dev, get_order(size), vaddr))
90 return;
91
92 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
93 if (ops->free_coherent)
94 ops->free_coherent(dev, size, vaddr, dma_handle);
95}
96
97/* arch/sh/mm/consistent.c */
98extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
99 dma_addr_t *dma_addr, gfp_t flag);
100extern void dma_generic_free_coherent(struct device *dev, size_t size,
101 void *vaddr, dma_addr_t dma_handle);
Magnus Dammf93e97e2008-01-24 18:35:10 +0900102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#endif /* __ASM_SH_DMA_MAPPING_H */