blob: 84238c574d5e6bff1db1db79d58a5ddbd4436422 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_DMA_MAPPING_H
2#define _ASM_DMA_MAPPING_H
3
4#include <asm/scatterlist.h>
Steven J. Hillb6d92b42013-03-25 13:47:29 -05005#include <asm/dma-coherence.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <asm/cache.h>
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +09007#include <asm-generic/dma-coherent.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Ralf Baechle70342282013-01-22 12:59:30 +01009#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
David Daney48e1fd52010-10-01 13:27:32 -070010#include <dma-coherence.h>
Ralf Baechlea5602a32011-05-18 13:14:36 +010011#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
David Daney48e1fd52010-10-01 13:27:32 -070013extern struct dma_map_ops *mips_dma_map_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
David Daney48e1fd52010-10-01 13:27:32 -070015static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Atsushi Nemoto4f29c052009-01-23 00:42:11 +090016{
David Daney48e1fd52010-10-01 13:27:32 -070017 if (dev && dev->archdata.dma_ops)
18 return dev->archdata.dma_ops;
19 else
20 return mips_dma_map_ops;
Atsushi Nemoto4f29c052009-01-23 00:42:11 +090021}
22
David Daney48e1fd52010-10-01 13:27:32 -070023static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
24{
25 if (!dev->dma_mask)
26 return 0;
27
28 return addr + size <= *dev->dma_mask;
29}
30
31static inline void dma_mark_clean(void *addr, size_t size) {}
32
33#include <asm-generic/dma-mapping-common.h>
34
35static inline int dma_supported(struct device *dev, u64 mask)
36{
37 struct dma_map_ops *ops = get_dma_ops(dev);
38 return ops->dma_supported(dev, mask);
39}
40
41static inline int dma_mapping_error(struct device *dev, u64 mask)
42{
43 struct dma_map_ops *ops = get_dma_ops(dev);
Shuah Khan9c83b07c2012-11-23 14:34:56 -070044
45 debug_dma_mapping_error(dev, mask);
David Daney48e1fd52010-10-01 13:27:32 -070046 return ops->mapping_error(dev, mask);
47}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49static inline int
50dma_set_mask(struct device *dev, u64 mask)
51{
52 if(!dev->dma_mask || !dma_supported(dev, mask))
53 return -EIO;
54
55 *dev->dma_mask = mask;
56
57 return 0;
58}
59
Ralf Baechled3fa72e2006-12-06 20:38:56 -080060extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 enum dma_data_direction direction);
62
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +020063#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
64
65static inline void *dma_alloc_attrs(struct device *dev, size_t size,
66 dma_addr_t *dma_handle, gfp_t gfp,
67 struct dma_attrs *attrs)
David Daney48e1fd52010-10-01 13:27:32 -070068{
69 void *ret;
70 struct dma_map_ops *ops = get_dma_ops(dev);
71
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +020072 ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
David Daney48e1fd52010-10-01 13:27:32 -070073
74 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
75
76 return ret;
77}
78
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +020079#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
80
81static inline void dma_free_attrs(struct device *dev, size_t size,
82 void *vaddr, dma_addr_t dma_handle,
83 struct dma_attrs *attrs)
David Daney48e1fd52010-10-01 13:27:32 -070084{
85 struct dma_map_ops *ops = get_dma_ops(dev);
86
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +020087 ops->free(dev, size, vaddr, dma_handle, attrs);
David Daney48e1fd52010-10-01 13:27:32 -070088
89 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
90}
91
92
93void *dma_alloc_noncoherent(struct device *dev, size_t size,
94 dma_addr_t *dma_handle, gfp_t flag);
95
96void dma_free_noncoherent(struct device *dev, size_t size,
97 void *vaddr, dma_addr_t dma_handle);
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#endif /* _ASM_DMA_MAPPING_H */