Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
| 2 | #define _ASM_IA64_DMA_MAPPING_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2003-2004 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/machvec.h> |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 9 | #include <linux/scatterlist.h> |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 10 | #include <asm/swiotlb.h> |
| 11 | |
John Keller | 175add1 | 2008-11-24 16:47:17 -0600 | [diff] [blame] | 12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
| 13 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 14 | extern struct dma_map_ops *dma_ops; |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 15 | extern struct ia64_machine_vector ia64_mv; |
| 16 | extern void set_iommu_machvec(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
FUJITA Tomonori | c299030 | 2009-01-07 02:13:42 +0900 | [diff] [blame] | 18 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
| 19 | enum dma_data_direction); |
| 20 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, |
| 21 | enum dma_data_direction); |
| 22 | |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 23 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 24 | dma_addr_t *daddr, gfp_t gfp) |
| 25 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 26 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
Yasunori Goto | 0d688da | 2009-02-03 10:52:03 +0900 | [diff] [blame^] | 27 | return ops->alloc_coherent(dev, size, daddr, gfp); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 28 | } |
FUJITA Tomonori | 3a80b6a | 2008-09-08 18:10:10 +0900 | [diff] [blame] | 29 | |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 30 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 31 | void *caddr, dma_addr_t daddr) |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 32 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 33 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 34 | ops->free_coherent(dev, size, caddr, daddr); |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 35 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 36 | |
| 37 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 38 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 39 | |
| 40 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
| 41 | void *caddr, size_t size, |
| 42 | enum dma_data_direction dir, |
| 43 | struct dma_attrs *attrs) |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 44 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 45 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 46 | return ops->map_page(dev, virt_to_page(caddr), |
| 47 | (unsigned long)caddr & ~PAGE_MASK, size, |
| 48 | dir, attrs); |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 49 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 50 | |
| 51 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, |
| 52 | size_t size, |
| 53 | enum dma_data_direction dir, |
| 54 | struct dma_attrs *attrs) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 55 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 56 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 57 | ops->unmap_page(dev, daddr, size, dir, attrs); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 58 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 59 | |
| 60 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
| 61 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) |
| 62 | |
| 63 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
| 64 | int nents, enum dma_data_direction dir, |
| 65 | struct dma_attrs *attrs) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 66 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 67 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 68 | return ops->map_sg(dev, sgl, nents, dir, attrs); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 69 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 70 | |
| 71 | static inline void dma_unmap_sg_attrs(struct device *dev, |
| 72 | struct scatterlist *sgl, int nents, |
| 73 | enum dma_data_direction dir, |
| 74 | struct dma_attrs *attrs) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 75 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 76 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 77 | ops->unmap_sg(dev, sgl, nents, dir, attrs); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 78 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 79 | |
| 80 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
| 81 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) |
| 82 | |
| 83 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, |
| 84 | size_t size, |
| 85 | enum dma_data_direction dir) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 86 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 87 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 88 | ops->sync_single_for_cpu(dev, daddr, size, dir); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 89 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 90 | |
| 91 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 92 | struct scatterlist *sgl, |
| 93 | int nents, enum dma_data_direction dir) |
| 94 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 95 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 96 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | static inline void dma_sync_single_for_device(struct device *dev, |
| 100 | dma_addr_t daddr, |
| 101 | size_t size, |
| 102 | enum dma_data_direction dir) |
| 103 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 104 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 105 | ops->sync_single_for_device(dev, daddr, size, dir); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 109 | struct scatterlist *sgl, |
| 110 | int nents, |
| 111 | enum dma_data_direction dir) |
| 112 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 113 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 114 | ops->sync_sg_for_device(dev, sgl, nents, dir); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
| 118 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 119 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 120 | return ops->mapping_error(dev, daddr); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 121 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 123 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 124 | size_t offset, size_t size, |
| 125 | enum dma_data_direction dir) |
| 126 | { |
| 127 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 128 | return ops->map_page(dev, page, offset, size, dir, NULL); |
| 129 | } |
| 130 | |
| 131 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
| 132 | size_t size, enum dma_data_direction dir) |
| 133 | { |
| 134 | dma_unmap_single(dev, addr, size, dir); |
| 135 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
| 137 | /* |
| 138 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. |
| 139 | * See Documentation/DMA-API.txt for details. |
| 140 | */ |
| 141 | |
| 142 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ |
| 143 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) |
| 144 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ |
| 145 | dma_sync_single_for_device(dev, dma_handle, size, dir) |
| 146 | |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 147 | static inline int dma_supported(struct device *dev, u64 mask) |
| 148 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 149 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 150 | return ops->dma_supported(dev, mask); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 151 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
| 153 | static inline int |
| 154 | dma_set_mask (struct device *dev, u64 mask) |
| 155 | { |
| 156 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 157 | return -EIO; |
| 158 | *dev->dma_mask = mask; |
| 159 | return 0; |
| 160 | } |
| 161 | |
John W. Linville | e1531b4 | 2005-11-07 00:57:54 -0800 | [diff] [blame] | 162 | extern int dma_get_cache_alignment(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
| 164 | static inline void |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 165 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
| 166 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | { |
| 168 | /* |
| 169 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to |
| 170 | * ensure that dma_cache_sync() enforces order, hence the mb(). |
| 171 | */ |
| 172 | mb(); |
| 173 | } |
| 174 | |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 175 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |