| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_DMA_MAPPING_H | 
 | 2 | #define _ASM_X86_DMA_MAPPING_H | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 3 |  | 
 | 4 | /* | 
| Randy Dunlap | 5872fb9 | 2009-01-29 16:28:02 -0800 | [diff] [blame] | 5 |  * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and | 
 | 6 |  * Documentation/DMA-API.txt for documentation. | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 7 |  */ | 
 | 8 |  | 
| Vegard Nossum | d700285 | 2008-07-20 10:44:54 +0200 | [diff] [blame] | 9 | #include <linux/kmemcheck.h> | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 10 | #include <linux/scatterlist.h> | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 11 | #include <linux/dma-debug.h> | 
| FUJITA Tomonori | abe6602 | 2009-01-05 23:47:21 +0900 | [diff] [blame] | 12 | #include <linux/dma-attrs.h> | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 13 | #include <asm/io.h> | 
 | 14 | #include <asm/swiotlb.h> | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 15 | #include <asm-generic/dma-coherent.h> | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 16 |  | 
| Glauber Costa | 7c18341 | 2008-03-25 18:36:36 -0300 | [diff] [blame] | 17 | extern dma_addr_t bad_dma_address; | 
| Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 18 | extern int iommu_merge; | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 19 | extern struct device x86_dma_fallback_dev; | 
| Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 20 | extern int panic_on_overflow; | 
| Glauber Costa | 7c18341 | 2008-03-25 18:36:36 -0300 | [diff] [blame] | 21 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 22 | extern struct dma_map_ops *dma_ops; | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 23 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 24 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | 
| Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 25 | { | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 26 | #ifdef CONFIG_X86_32 | 
 | 27 | 	return dma_ops; | 
 | 28 | #else | 
 | 29 | 	if (unlikely(!dev) || !dev->archdata.dma_ops) | 
 | 30 | 		return dma_ops; | 
 | 31 | 	else | 
 | 32 | 		return dev->archdata.dma_ops; | 
| Jeremy Fitzhardinge | cfb80c9 | 2008-12-16 12:17:36 -0800 | [diff] [blame] | 33 | #endif | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 34 | } | 
 | 35 |  | 
 | 36 | /* Make sure we keep the same behaviour */ | 
 | 37 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 
 | 38 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 39 | 	struct dma_map_ops *ops = get_dma_ops(dev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 40 | 	if (ops->mapping_error) | 
 | 41 | 		return ops->mapping_error(dev, dma_addr); | 
| Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 42 |  | 
| Thomas Bogendoerfer | 7b1dedc | 2008-11-29 13:46:27 +0100 | [diff] [blame] | 43 | 	return (dma_addr == bad_dma_address); | 
| Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 44 | } | 
 | 45 |  | 
| Glauber Costa | 8d396de | 2008-03-25 18:36:31 -0300 | [diff] [blame] | 46 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 
 | 47 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 48 | #define dma_is_consistent(d, h)	(1) | 
| Glauber Costa | 8d396de | 2008-03-25 18:36:31 -0300 | [diff] [blame] | 49 |  | 
| Glauber Costa | 802c1f6 | 2008-03-25 18:36:34 -0300 | [diff] [blame] | 50 | extern int dma_supported(struct device *hwdev, u64 mask); | 
 | 51 | extern int dma_set_mask(struct device *dev, u64 mask); | 
 | 52 |  | 
| FUJITA Tomonori | 9f6ac57 | 2008-09-24 20:48:35 +0900 | [diff] [blame] | 53 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 
 | 54 | 					dma_addr_t *dma_addr, gfp_t flag); | 
 | 55 |  | 
| Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 56 | static inline dma_addr_t | 
 | 57 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 58 | 	       enum dma_data_direction dir) | 
| Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 59 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 60 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 61 | 	dma_addr_t addr; | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 62 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 63 | 	BUG_ON(!valid_dma_direction(dir)); | 
| Vegard Nossum | d700285 | 2008-07-20 10:44:54 +0200 | [diff] [blame] | 64 | 	kmemcheck_mark_initialized(ptr, size); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 65 | 	addr = ops->map_page(hwdev, virt_to_page(ptr), | 
| FUJITA Tomonori | d7dff84 | 2009-01-05 23:47:28 +0900 | [diff] [blame] | 66 | 			     (unsigned long)ptr & ~PAGE_MASK, size, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 67 | 			     dir, NULL); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 68 | 	debug_dma_map_page(hwdev, virt_to_page(ptr), | 
 | 69 | 			   (unsigned long)ptr & ~PAGE_MASK, size, | 
 | 70 | 			   dir, addr, true); | 
 | 71 | 	return addr; | 
| Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 72 | } | 
 | 73 |  | 
| Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 74 | static inline void | 
 | 75 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 76 | 		 enum dma_data_direction dir) | 
| Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 77 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 78 | 	struct dma_map_ops *ops = get_dma_ops(dev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 79 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 80 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | d7dff84 | 2009-01-05 23:47:28 +0900 | [diff] [blame] | 81 | 	if (ops->unmap_page) | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 82 | 		ops->unmap_page(dev, addr, size, dir, NULL); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 83 | 	debug_dma_unmap_page(dev, addr, size, dir, true); | 
| Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 84 | } | 
 | 85 |  | 
| Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 86 | static inline int | 
 | 87 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 88 | 	   int nents, enum dma_data_direction dir) | 
| Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 89 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 90 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 91 | 	int ents; | 
| Vegard Nossum | 9b5cab3 | 2009-02-21 13:52:37 +0100 | [diff] [blame] | 92 | 	struct scatterlist *s; | 
 | 93 | 	int i; | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 94 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 95 | 	BUG_ON(!valid_dma_direction(dir)); | 
| Vegard Nossum | 9b5cab3 | 2009-02-21 13:52:37 +0100 | [diff] [blame] | 96 | 	for_each_sg(sg, s, nents, i) | 
 | 97 | 		kmemcheck_mark_initialized(sg_virt(s), s->length); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 98 | 	ents = ops->map_sg(hwdev, sg, nents, dir, NULL); | 
 | 99 | 	debug_dma_map_sg(hwdev, sg, nents, ents, dir); | 
 | 100 |  | 
 | 101 | 	return ents; | 
| Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 102 | } | 
| Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 103 |  | 
 | 104 | static inline void | 
 | 105 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 106 | 	     enum dma_data_direction dir) | 
| Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 107 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 108 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 109 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 110 | 	BUG_ON(!valid_dma_direction(dir)); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 111 | 	debug_dma_unmap_sg(hwdev, sg, nents, dir); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 112 | 	if (ops->unmap_sg) | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 113 | 		ops->unmap_sg(hwdev, sg, nents, dir, NULL); | 
| Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 114 | } | 
| Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 115 |  | 
 | 116 | static inline void | 
 | 117 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 118 | 			size_t size, enum dma_data_direction dir) | 
| Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 119 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 120 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 121 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 122 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 123 | 	if (ops->sync_single_for_cpu) | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 124 | 		ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 125 | 	debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | 
| Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 126 | 	flush_write_buffers(); | 
 | 127 | } | 
 | 128 |  | 
| Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 129 | static inline void | 
 | 130 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 131 | 			   size_t size, enum dma_data_direction dir) | 
| Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 132 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 133 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 134 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 135 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 136 | 	if (ops->sync_single_for_device) | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 137 | 		ops->sync_single_for_device(hwdev, dma_handle, size, dir); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 138 | 	debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | 
| Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 139 | 	flush_write_buffers(); | 
 | 140 | } | 
 | 141 |  | 
| Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 142 | static inline void | 
 | 143 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 144 | 			      unsigned long offset, size_t size, | 
 | 145 | 			      enum dma_data_direction dir) | 
| Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 146 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 147 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 148 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 149 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 150 | 	if (ops->sync_single_range_for_cpu) | 
 | 151 | 		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 152 | 					       size, dir); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 153 | 	debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | 
 | 154 | 					    offset, size, dir); | 
| Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 155 | 	flush_write_buffers(); | 
 | 156 | } | 
| Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 157 |  | 
 | 158 | static inline void | 
 | 159 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | 
 | 160 | 				 unsigned long offset, size_t size, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 161 | 				 enum dma_data_direction dir) | 
| Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 162 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 163 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 164 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 165 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 166 | 	if (ops->sync_single_range_for_device) | 
 | 167 | 		ops->sync_single_range_for_device(hwdev, dma_handle, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 168 | 						  offset, size, dir); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 169 | 	debug_dma_sync_single_range_for_device(hwdev, dma_handle, | 
 | 170 | 					       offset, size, dir); | 
| Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 171 | 	flush_write_buffers(); | 
 | 172 | } | 
 | 173 |  | 
| Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 174 | static inline void | 
 | 175 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 176 | 		    int nelems, enum dma_data_direction dir) | 
| Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 177 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 178 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 179 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 180 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 181 | 	if (ops->sync_sg_for_cpu) | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 182 | 		ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 183 | 	debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | 
| Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 184 | 	flush_write_buffers(); | 
 | 185 | } | 
| Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 186 |  | 
 | 187 | static inline void | 
 | 188 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 189 | 		       int nelems, enum dma_data_direction dir) | 
| Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 190 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 191 | 	struct dma_map_ops *ops = get_dma_ops(hwdev); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 192 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 193 | 	BUG_ON(!valid_dma_direction(dir)); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 194 | 	if (ops->sync_sg_for_device) | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 195 | 		ops->sync_sg_for_device(hwdev, sg, nelems, dir); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 196 | 	debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | 
| Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 197 |  | 
 | 198 | 	flush_write_buffers(); | 
 | 199 | } | 
| Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 200 |  | 
 | 201 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
 | 202 | 				      size_t offset, size_t size, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 203 | 				      enum dma_data_direction dir) | 
| Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 204 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 205 | 	struct dma_map_ops *ops = get_dma_ops(dev); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 206 | 	dma_addr_t addr; | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 207 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 208 | 	BUG_ON(!valid_dma_direction(dir)); | 
| Vegard Nossum | 9b5cab3 | 2009-02-21 13:52:37 +0100 | [diff] [blame] | 209 | 	kmemcheck_mark_initialized(page_address(page) + offset, size); | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 210 | 	addr = ops->map_page(dev, page, offset, size, dir, NULL); | 
 | 211 | 	debug_dma_map_page(dev, page, offset, size, dir, addr, false); | 
 | 212 |  | 
 | 213 | 	return addr; | 
| Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 214 | } | 
 | 215 |  | 
 | 216 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 217 | 				  size_t size, enum dma_data_direction dir) | 
| Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 218 | { | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 219 | 	struct dma_map_ops *ops = get_dma_ops(dev); | 
 | 220 |  | 
 | 221 | 	BUG_ON(!valid_dma_direction(dir)); | 
 | 222 | 	if (ops->unmap_page) | 
 | 223 | 		ops->unmap_page(dev, addr, size, dir, NULL); | 
 | 224 | 	debug_dma_unmap_page(dev, addr, size, dir, false); | 
| Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 225 | } | 
 | 226 |  | 
| Glauber Costa | 3cb6a91 | 2008-03-25 18:36:33 -0300 | [diff] [blame] | 227 | static inline void | 
 | 228 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 
 | 229 | 	enum dma_data_direction dir) | 
 | 230 | { | 
 | 231 | 	flush_write_buffers(); | 
 | 232 | } | 
| Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 233 |  | 
| Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 234 | static inline int dma_get_cache_alignment(void) | 
 | 235 | { | 
 | 236 | 	/* no easy way to get cache size on all x86, so return the | 
 | 237 | 	 * maximum possible, to be safe */ | 
 | 238 | 	return boot_cpu_data.x86_clflush_size; | 
 | 239 | } | 
 | 240 |  | 
| FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 241 | static inline unsigned long dma_alloc_coherent_mask(struct device *dev, | 
 | 242 | 						    gfp_t gfp) | 
 | 243 | { | 
 | 244 | 	unsigned long dma_mask = 0; | 
| Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 245 |  | 
| FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 246 | 	dma_mask = dev->coherent_dma_mask; | 
 | 247 | 	if (!dma_mask) | 
| Yang Hongyang | 2f4f27d | 2009-04-06 19:01:18 -0700 | [diff] [blame] | 248 | 		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); | 
| FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 249 |  | 
 | 250 | 	return dma_mask; | 
 | 251 | } | 
 | 252 |  | 
 | 253 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | 
 | 254 | { | 
 | 255 | 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); | 
 | 256 |  | 
| Yang Hongyang | 2f4f27d | 2009-04-06 19:01:18 -0700 | [diff] [blame] | 257 | 	if (dma_mask <= DMA_BIT_MASK(24)) | 
| FUJITA Tomonori | 75bebb7 | 2008-10-23 20:46:55 +0900 | [diff] [blame] | 258 | 		gfp |= GFP_DMA; | 
 | 259 | #ifdef CONFIG_X86_64 | 
| Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 260 | 	if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) | 
| FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 261 | 		gfp |= GFP_DMA32; | 
 | 262 | #endif | 
 | 263 |        return gfp; | 
 | 264 | } | 
 | 265 |  | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 266 | static inline void * | 
 | 267 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 
 | 268 | 		gfp_t gfp) | 
 | 269 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 270 | 	struct dma_map_ops *ops = get_dma_ops(dev); | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 271 | 	void *memory; | 
| Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 272 |  | 
| FUJITA Tomonori | 8a53ad6 | 2008-09-08 18:10:12 +0900 | [diff] [blame] | 273 | 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 
 | 274 |  | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 275 | 	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) | 
 | 276 | 		return memory; | 
 | 277 |  | 
 | 278 | 	if (!dev) { | 
 | 279 | 		dev = &x86_dma_fallback_dev; | 
 | 280 | 		gfp |= GFP_DMA; | 
 | 281 | 	} | 
 | 282 |  | 
| FUJITA Tomonori | 9821626 | 2008-09-10 00:49:48 +0900 | [diff] [blame] | 283 | 	if (!is_device_dma_capable(dev)) | 
| FUJITA Tomonori | de9f521 | 2008-09-08 18:10:11 +0900 | [diff] [blame] | 284 | 		return NULL; | 
 | 285 |  | 
| FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 286 | 	if (!ops->alloc_coherent) | 
 | 287 | 		return NULL; | 
 | 288 |  | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 289 | 	memory = ops->alloc_coherent(dev, size, dma_handle, | 
 | 290 | 				     dma_alloc_coherent_gfp_flags(dev, gfp)); | 
 | 291 | 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | 
 | 292 |  | 
 | 293 | 	return memory; | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 294 | } | 
 | 295 |  | 
 | 296 | static inline void dma_free_coherent(struct device *dev, size_t size, | 
 | 297 | 				     void *vaddr, dma_addr_t bus) | 
 | 298 | { | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 299 | 	struct dma_map_ops *ops = get_dma_ops(dev); | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 300 |  | 
 | 301 | 	WARN_ON(irqs_disabled());       /* for portability */ | 
 | 302 |  | 
 | 303 | 	if (dma_release_from_coherent(dev, get_order(size), vaddr)) | 
 | 304 | 		return; | 
 | 305 |  | 
| Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 306 | 	debug_dma_free_coherent(dev, size, vaddr, bus); | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 307 | 	if (ops->free_coherent) | 
 | 308 | 		ops->free_coherent(dev, size, vaddr, bus); | 
 | 309 | } | 
 | 310 |  | 
| Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 311 | #endif |