Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
| 2 | #define _ASM_IA64_DMA_MAPPING_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2003-2004 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/machvec.h> |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 9 | #include <linux/scatterlist.h> |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 10 | #include <asm/swiotlb.h> |
| 11 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 12 | extern struct dma_map_ops *dma_ops; |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 13 | extern struct ia64_machine_vector ia64_mv; |
| 14 | extern void set_iommu_machvec(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
FUJITA Tomonori | c299030 | 2009-01-07 02:13:42 +0900 | [diff] [blame^] | 16 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
| 17 | enum dma_data_direction); |
| 18 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, |
| 19 | enum dma_data_direction); |
| 20 | |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 21 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 22 | dma_addr_t *daddr, gfp_t gfp) |
| 23 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 24 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 25 | return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 26 | } |
FUJITA Tomonori | 3a80b6a | 2008-09-08 18:10:10 +0900 | [diff] [blame] | 27 | |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 28 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 29 | void *caddr, dma_addr_t daddr) |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 30 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 31 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 32 | ops->free_coherent(dev, size, caddr, daddr); |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 33 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 34 | |
| 35 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 36 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 37 | |
| 38 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
| 39 | void *caddr, size_t size, |
| 40 | enum dma_data_direction dir, |
| 41 | struct dma_attrs *attrs) |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 42 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 43 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 44 | return ops->map_page(dev, virt_to_page(caddr), |
| 45 | (unsigned long)caddr & ~PAGE_MASK, size, |
| 46 | dir, attrs); |
Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 47 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 48 | |
| 49 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, |
| 50 | size_t size, |
| 51 | enum dma_data_direction dir, |
| 52 | struct dma_attrs *attrs) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 53 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 54 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 55 | ops->unmap_page(dev, daddr, size, dir, attrs); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 56 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 57 | |
| 58 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
| 59 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) |
| 60 | |
| 61 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
| 62 | int nents, enum dma_data_direction dir, |
| 63 | struct dma_attrs *attrs) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 64 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 65 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 66 | return ops->map_sg(dev, sgl, nents, dir, attrs); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 67 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 68 | |
| 69 | static inline void dma_unmap_sg_attrs(struct device *dev, |
| 70 | struct scatterlist *sgl, int nents, |
| 71 | enum dma_data_direction dir, |
| 72 | struct dma_attrs *attrs) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 73 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 74 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 75 | ops->unmap_sg(dev, sgl, nents, dir, attrs); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 76 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 77 | |
| 78 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
| 79 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) |
| 80 | |
| 81 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, |
| 82 | size_t size, |
| 83 | enum dma_data_direction dir) |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 84 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 85 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 86 | ops->sync_single_for_cpu(dev, daddr, size, dir); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 87 | } |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 88 | |
| 89 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 90 | struct scatterlist *sgl, |
| 91 | int nents, enum dma_data_direction dir) |
| 92 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 93 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 94 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | static inline void dma_sync_single_for_device(struct device *dev, |
| 98 | dma_addr_t daddr, |
| 99 | size_t size, |
| 100 | enum dma_data_direction dir) |
| 101 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 102 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 103 | ops->sync_single_for_device(dev, daddr, size, dir); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 107 | struct scatterlist *sgl, |
| 108 | int nents, |
| 109 | enum dma_data_direction dir) |
| 110 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 111 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 112 | ops->sync_sg_for_device(dev, sgl, nents, dir); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
| 116 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 117 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
FUJITA Tomonori | c190ab0 | 2009-01-05 23:36:16 +0900 | [diff] [blame] | 118 | return ops->mapping_error(dev, daddr); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 119 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 121 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 122 | size_t offset, size_t size, |
| 123 | enum dma_data_direction dir) |
| 124 | { |
| 125 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 126 | return ops->map_page(dev, page, offset, size, dir, NULL); |
| 127 | } |
| 128 | |
| 129 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
| 130 | size_t size, enum dma_data_direction dir) |
| 131 | { |
| 132 | dma_unmap_single(dev, addr, size, dir); |
| 133 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | |
| 135 | /* |
| 136 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. |
| 137 | * See Documentation/DMA-API.txt for details. |
| 138 | */ |
| 139 | |
| 140 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ |
| 141 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) |
| 142 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ |
| 143 | dma_sync_single_for_device(dev, dma_handle, size, dir) |
| 144 | |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 145 | static inline int dma_supported(struct device *dev, u64 mask) |
| 146 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 147 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 148 | return ops->dma_supported(dev, mask); |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 149 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
| 151 | static inline int |
| 152 | dma_set_mask (struct device *dev, u64 mask) |
| 153 | { |
| 154 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 155 | return -EIO; |
| 156 | *dev->dma_mask = mask; |
| 157 | return 0; |
| 158 | } |
| 159 | |
John W. Linville | e1531b4 | 2005-11-07 00:57:54 -0800 | [diff] [blame] | 160 | extern int dma_get_cache_alignment(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | |
| 162 | static inline void |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 163 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
| 164 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { |
| 166 | /* |
| 167 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to |
| 168 | * ensure that dma_cache_sync() enforces order, hence the mb(). |
| 169 | */ |
| 170 | mb(); |
| 171 | } |
| 172 | |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 173 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |