| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Dynamic DMA mapping support. | 
 | 3 |  * | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 4 |  * This implementation is a fallback for platforms that do not support | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  * I/O TLBs (aka DMA address translation hardware). | 
 | 6 |  * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> | 
 | 7 |  * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> | 
 | 8 |  * Copyright (C) 2000, 2003 Hewlett-Packard Co | 
 | 9 |  *	David Mosberger-Tang <davidm@hpl.hp.com> | 
 | 10 |  * | 
 | 11 |  * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API. | 
 | 12 |  * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid | 
 | 13 |  *			unnecessary i-cache flushing. | 
| John W. Linville | 569c8bf | 2005-09-29 14:45:24 -0700 | [diff] [blame] | 14 |  * 04/07/.. ak		Better overflow handling. Assorted fixes. | 
 | 15 |  * 05/09/10 linville	Add support for syncing ranges, support syncing for | 
 | 16 |  *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 17 |  * 08/12/11 beckyb	Add highmem support | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  */ | 
 | 19 |  | 
 | 20 | #include <linux/cache.h> | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 21 | #include <linux/dma-mapping.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/mm.h> | 
 | 23 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/spinlock.h> | 
 | 25 | #include <linux/string.h> | 
| Ian Campbell | 0016fde | 2008-12-16 12:17:27 -0800 | [diff] [blame] | 26 | #include <linux/swiotlb.h> | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 27 | #include <linux/pfn.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/types.h> | 
 | 29 | #include <linux/ctype.h> | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 30 | #include <linux/highmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
 | 32 | #include <asm/io.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/dma.h> | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 34 | #include <asm/scatterlist.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
 | 36 | #include <linux/init.h> | 
 | 37 | #include <linux/bootmem.h> | 
| FUJITA Tomonori | a852250 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 38 | #include <linux/iommu-helper.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
 | 40 | #define OFFSET(val,align) ((unsigned long)	\ | 
 | 41 | 	                   ( (val) & ( (align) - 1))) | 
 | 42 |  | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 43 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * Minimum IO TLB size to bother booting with.  Systems with mainly | 
 | 47 |  * 64bit capable cards will only lightly use the swiotlb.  If we can't | 
 | 48 |  * allocate a contiguous 1MB, we're probably in trouble anyway. | 
 | 49 |  */ | 
 | 50 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 
 | 51 |  | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 52 | /* | 
 | 53 |  * Enumeration for sync targets | 
 | 54 |  */ | 
 | 55 | enum dma_sync_target { | 
 | 56 | 	SYNC_FOR_CPU = 0, | 
 | 57 | 	SYNC_FOR_DEVICE = 1, | 
 | 58 | }; | 
 | 59 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | int swiotlb_force; | 
 | 61 |  | 
 | 62 | /* | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 63 |  * Used to do a quick range check in unmap_single and | 
 | 64 |  * sync_single_*, to see if the memory was in fact allocated by this | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  * API. | 
 | 66 |  */ | 
 | 67 | static char *io_tlb_start, *io_tlb_end; | 
 | 68 |  | 
 | 69 | /* | 
 | 70 |  * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and | 
 | 71 |  * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages. | 
 | 72 |  */ | 
 | 73 | static unsigned long io_tlb_nslabs; | 
 | 74 |  | 
 | 75 | /* | 
 | 76 |  * When the IOMMU overflows we return a fallback buffer. This sets the size. | 
 | 77 |  */ | 
 | 78 | static unsigned long io_tlb_overflow = 32*1024; | 
 | 79 |  | 
 | 80 | void *io_tlb_overflow_buffer; | 
 | 81 |  | 
 | 82 | /* | 
 | 83 |  * This is a free list describing the number of free entries available from | 
 | 84 |  * each index | 
 | 85 |  */ | 
 | 86 | static unsigned int *io_tlb_list; | 
 | 87 | static unsigned int io_tlb_index; | 
 | 88 |  | 
 | 89 | /* | 
 | 90 |  * We need to save away the original address corresponding to a mapped entry | 
 | 91 |  * for the sync operations. | 
 | 92 |  */ | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 93 | static phys_addr_t *io_tlb_orig_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  | 
 | 95 | /* | 
 | 96 |  * Protect the above data structures in the map and unmap calls | 
 | 97 |  */ | 
 | 98 | static DEFINE_SPINLOCK(io_tlb_lock); | 
 | 99 |  | 
 | 100 | static int __init | 
 | 101 | setup_io_tlb_npages(char *str) | 
 | 102 | { | 
 | 103 | 	if (isdigit(*str)) { | 
| Alex Williamson | e8579e7 | 2005-08-04 13:06:00 -0700 | [diff] [blame] | 104 | 		io_tlb_nslabs = simple_strtoul(str, &str, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | 		/* avoid tail segment of size < IO_TLB_SEGSIZE */ | 
 | 106 | 		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 
 | 107 | 	} | 
 | 108 | 	if (*str == ',') | 
 | 109 | 		++str; | 
 | 110 | 	if (!strcmp(str, "force")) | 
 | 111 | 		swiotlb_force = 1; | 
 | 112 | 	return 1; | 
 | 113 | } | 
 | 114 | __setup("swiotlb=", setup_io_tlb_npages); | 
 | 115 | /* make io_tlb_overflow tunable too? */ | 
 | 116 |  | 
| Roland Dreier | 79ff56e | 2008-12-30 20:18:00 -0800 | [diff] [blame] | 117 | void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 118 | { | 
 | 119 | 	return alloc_bootmem_low_pages(size); | 
 | 120 | } | 
 | 121 |  | 
 | 122 | void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) | 
 | 123 | { | 
 | 124 | 	return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | 
 | 125 | } | 
 | 126 |  | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 127 | dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 128 | { | 
 | 129 | 	return paddr; | 
 | 130 | } | 
 | 131 |  | 
| Becky Bruce | 42d7c5e | 2009-04-08 09:09:21 -0500 | [diff] [blame] | 132 | phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 133 | { | 
 | 134 | 	return baddr; | 
 | 135 | } | 
 | 136 |  | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 137 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | 
 | 138 | 				      volatile void *address) | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 139 | { | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 140 | 	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 141 | } | 
 | 142 |  | 
| Becky Bruce | 42d7c5e | 2009-04-08 09:09:21 -0500 | [diff] [blame] | 143 | void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 144 | { | 
| Becky Bruce | 42d7c5e | 2009-04-08 09:09:21 -0500 | [diff] [blame] | 145 | 	return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 146 | } | 
 | 147 |  | 
| Becky Bruce | ef5722f | 2009-04-08 09:09:18 -0500 | [diff] [blame] | 148 | int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, | 
 | 149 | 					       dma_addr_t addr, size_t size) | 
 | 150 | { | 
 | 151 | 	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 
 | 152 | } | 
 | 153 |  | 
| Ian Campbell | 0b8698a | 2009-01-09 18:32:09 +0000 | [diff] [blame] | 154 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 155 | { | 
 | 156 | 	return 0; | 
 | 157 | } | 
 | 158 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 159 | static void swiotlb_print_info(unsigned long bytes) | 
 | 160 | { | 
 | 161 | 	phys_addr_t pstart, pend; | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 162 |  | 
 | 163 | 	pstart = virt_to_phys(io_tlb_start); | 
 | 164 | 	pend = virt_to_phys(io_tlb_end); | 
 | 165 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 166 | 	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", | 
 | 167 | 	       bytes >> 20, io_tlb_start, io_tlb_end); | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 168 | 	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", | 
 | 169 | 	       (unsigned long long)pstart, | 
 | 170 | 	       (unsigned long long)pend); | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 171 | } | 
 | 172 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | /* | 
 | 174 |  * Statically reserve bounce buffer space and initialize bounce buffer data | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 175 |  * structures for the software IO TLB used to implement the DMA API. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 |  */ | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 177 | void __init | 
 | 178 | swiotlb_init_with_default_size(size_t default_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 180 | 	unsigned long i, bytes; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 |  | 
 | 182 | 	if (!io_tlb_nslabs) { | 
| Alex Williamson | e8579e7 | 2005-08-04 13:06:00 -0700 | [diff] [blame] | 183 | 		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | 		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 
 | 185 | 	} | 
 | 186 |  | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 187 | 	bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 
 | 188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | 	/* | 
 | 190 | 	 * Get IO TLB memory from the low pages | 
 | 191 | 	 */ | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 192 | 	io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | 	if (!io_tlb_start) | 
 | 194 | 		panic("Cannot allocate SWIOTLB buffer"); | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 195 | 	io_tlb_end = io_tlb_start + bytes; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 |  | 
 | 197 | 	/* | 
 | 198 | 	 * Allocate and initialize the free list array.  This array is used | 
 | 199 | 	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 
 | 200 | 	 * between io_tlb_start and io_tlb_end. | 
 | 201 | 	 */ | 
 | 202 | 	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 203 | 	for (i = 0; i < io_tlb_nslabs; i++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 
 | 205 | 	io_tlb_index = 0; | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 206 | 	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 |  | 
 | 208 | 	/* | 
 | 209 | 	 * Get the overflow emergency buffer | 
 | 210 | 	 */ | 
 | 211 | 	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 212 | 	if (!io_tlb_overflow_buffer) | 
 | 213 | 		panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 
 | 214 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 215 | 	swiotlb_print_info(bytes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | } | 
 | 217 |  | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 218 | void __init | 
 | 219 | swiotlb_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | { | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 221 | 	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } | 
 | 223 |  | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 224 | /* | 
 | 225 |  * Systems with larger DMA zones (those that don't support ISA) can | 
 | 226 |  * initialize the swiotlb later using the slab allocator if needed. | 
 | 227 |  * This should be just like above, but with some error catching. | 
 | 228 |  */ | 
 | 229 | int | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 230 | swiotlb_late_init_with_default_size(size_t default_size) | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 231 | { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 232 | 	unsigned long i, bytes, req_nslabs = io_tlb_nslabs; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 233 | 	unsigned int order; | 
 | 234 |  | 
 | 235 | 	if (!io_tlb_nslabs) { | 
 | 236 | 		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 
 | 237 | 		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 
 | 238 | 	} | 
 | 239 |  | 
 | 240 | 	/* | 
 | 241 | 	 * Get IO TLB memory from the low pages | 
 | 242 | 	 */ | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 243 | 	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 244 | 	io_tlb_nslabs = SLABS_PER_PAGE << order; | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 245 | 	bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 246 |  | 
 | 247 | 	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 248 | 		io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 249 | 		if (io_tlb_start) | 
 | 250 | 			break; | 
 | 251 | 		order--; | 
 | 252 | 	} | 
 | 253 |  | 
 | 254 | 	if (!io_tlb_start) | 
 | 255 | 		goto cleanup1; | 
 | 256 |  | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 257 | 	if (order != get_order(bytes)) { | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 258 | 		printk(KERN_WARNING "Warning: only able to allocate %ld MB " | 
 | 259 | 		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20); | 
 | 260 | 		io_tlb_nslabs = SLABS_PER_PAGE << order; | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 261 | 		bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 262 | 	} | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 263 | 	io_tlb_end = io_tlb_start + bytes; | 
 | 264 | 	memset(io_tlb_start, 0, bytes); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 265 |  | 
 | 266 | 	/* | 
 | 267 | 	 * Allocate and initialize the free list array.  This array is used | 
 | 268 | 	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 
 | 269 | 	 * between io_tlb_start and io_tlb_end. | 
 | 270 | 	 */ | 
 | 271 | 	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, | 
 | 272 | 	                              get_order(io_tlb_nslabs * sizeof(int))); | 
 | 273 | 	if (!io_tlb_list) | 
 | 274 | 		goto cleanup2; | 
 | 275 |  | 
 | 276 | 	for (i = 0; i < io_tlb_nslabs; i++) | 
 | 277 |  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 
 | 278 | 	io_tlb_index = 0; | 
 | 279 |  | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 280 | 	io_tlb_orig_addr = (phys_addr_t *) | 
 | 281 | 		__get_free_pages(GFP_KERNEL, | 
 | 282 | 				 get_order(io_tlb_nslabs * | 
 | 283 | 					   sizeof(phys_addr_t))); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 284 | 	if (!io_tlb_orig_addr) | 
 | 285 | 		goto cleanup3; | 
 | 286 |  | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 287 | 	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 288 |  | 
 | 289 | 	/* | 
 | 290 | 	 * Get the overflow emergency buffer | 
 | 291 | 	 */ | 
 | 292 | 	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, | 
 | 293 | 	                                          get_order(io_tlb_overflow)); | 
 | 294 | 	if (!io_tlb_overflow_buffer) | 
 | 295 | 		goto cleanup4; | 
 | 296 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 297 | 	swiotlb_print_info(bytes); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 298 |  | 
 | 299 | 	return 0; | 
 | 300 |  | 
 | 301 | cleanup4: | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 302 | 	free_pages((unsigned long)io_tlb_orig_addr, | 
 | 303 | 		   get_order(io_tlb_nslabs * sizeof(phys_addr_t))); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 304 | 	io_tlb_orig_addr = NULL; | 
 | 305 | cleanup3: | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 306 | 	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | 
 | 307 | 	                                                 sizeof(int))); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 308 | 	io_tlb_list = NULL; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 309 | cleanup2: | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 310 | 	io_tlb_end = NULL; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 311 | 	free_pages((unsigned long)io_tlb_start, order); | 
 | 312 | 	io_tlb_start = NULL; | 
 | 313 | cleanup1: | 
 | 314 | 	io_tlb_nslabs = req_nslabs; | 
 | 315 | 	return -ENOMEM; | 
 | 316 | } | 
 | 317 |  | 
| Becky Bruce | ef5722f | 2009-04-08 09:09:18 -0500 | [diff] [blame] | 318 | static inline int | 
| FUJITA Tomonori | 2797982 | 2008-09-10 01:06:49 +0900 | [diff] [blame] | 319 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | { | 
| Becky Bruce | ef5722f | 2009-04-08 09:09:18 -0500 | [diff] [blame] | 321 | 	return swiotlb_arch_address_needs_mapping(hwdev, addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } | 
 | 323 |  | 
| Ian Campbell | 0b8698a | 2009-01-09 18:32:09 +0000 | [diff] [blame] | 324 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 325 | { | 
| Ian Campbell | 0b8698a | 2009-01-09 18:32:09 +0000 | [diff] [blame] | 326 | 	return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 327 | } | 
 | 328 |  | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 329 | static int is_swiotlb_buffer(char *addr) | 
 | 330 | { | 
 | 331 | 	return addr >= io_tlb_start && addr < io_tlb_end; | 
 | 332 | } | 
 | 333 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | /* | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 335 |  * Bounce: copy the swiotlb buffer back to the original dma location | 
 | 336 |  */ | 
 | 337 | static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | 
 | 338 | 			   enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | { | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 340 | 	unsigned long pfn = PFN_DOWN(phys); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 |  | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 342 | 	if (PageHighMem(pfn_to_page(pfn))) { | 
 | 343 | 		/* The buffer does not have a mapping.  Map it in and copy */ | 
 | 344 | 		unsigned int offset = phys & ~PAGE_MASK; | 
 | 345 | 		char *buffer; | 
 | 346 | 		unsigned int sz = 0; | 
 | 347 | 		unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 |  | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 349 | 		while (size) { | 
| Becky Bruce | 67131ad | 2009-04-08 09:09:16 -0500 | [diff] [blame] | 350 | 			sz = min_t(size_t, PAGE_SIZE - offset, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 |  | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 352 | 			local_irq_save(flags); | 
 | 353 | 			buffer = kmap_atomic(pfn_to_page(pfn), | 
 | 354 | 					     KM_BOUNCE_READ); | 
 | 355 | 			if (dir == DMA_TO_DEVICE) | 
 | 356 | 				memcpy(dma_addr, buffer + offset, sz); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | 			else | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 358 | 				memcpy(buffer + offset, dma_addr, sz); | 
 | 359 | 			kunmap_atomic(buffer, KM_BOUNCE_READ); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | 			local_irq_restore(flags); | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 361 |  | 
 | 362 | 			size -= sz; | 
 | 363 | 			pfn++; | 
 | 364 | 			dma_addr += sz; | 
 | 365 | 			offset = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | 		} | 
 | 367 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | 		if (dir == DMA_TO_DEVICE) | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 369 | 			memcpy(dma_addr, phys_to_virt(phys), size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | 		else | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 371 | 			memcpy(phys_to_virt(phys), dma_addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | 	} | 
 | 373 | } | 
 | 374 |  | 
 | 375 | /* | 
 | 376 |  * Allocates bounce buffer and returns its kernel virtual address. | 
 | 377 |  */ | 
 | 378 | static void * | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 379 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | { | 
 | 381 | 	unsigned long flags; | 
 | 382 | 	char *dma_addr; | 
 | 383 | 	unsigned int nslots, stride, index, wrap; | 
 | 384 | 	int i; | 
| FUJITA Tomonori | 681cc5c | 2008-02-04 22:28:16 -0800 | [diff] [blame] | 385 | 	unsigned long start_dma_addr; | 
 | 386 | 	unsigned long mask; | 
 | 387 | 	unsigned long offset_slots; | 
 | 388 | 	unsigned long max_slots; | 
 | 389 |  | 
 | 390 | 	mask = dma_get_seg_boundary(hwdev); | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 391 | 	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; | 
| FUJITA Tomonori | 681cc5c | 2008-02-04 22:28:16 -0800 | [diff] [blame] | 392 |  | 
 | 393 | 	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 
| Ian Campbell | a5ddde4a | 2008-12-16 12:17:29 -0800 | [diff] [blame] | 394 |  | 
 | 395 | 	/* | 
 | 396 |  	 * Carefully handle integer overflow which can occur when mask == ~0UL. | 
 | 397 |  	 */ | 
| Jan Beulich | b15a3891 | 2008-03-13 09:13:30 +0000 | [diff] [blame] | 398 | 	max_slots = mask + 1 | 
 | 399 | 		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT | 
 | 400 | 		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
 | 402 | 	/* | 
 | 403 | 	 * For mappings greater than a page, we limit the stride (and | 
 | 404 | 	 * hence alignment) to a page size. | 
 | 405 | 	 */ | 
 | 406 | 	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 
 | 407 | 	if (size > PAGE_SIZE) | 
 | 408 | 		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); | 
 | 409 | 	else | 
 | 410 | 		stride = 1; | 
 | 411 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 412 | 	BUG_ON(!nslots); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 |  | 
 | 414 | 	/* | 
 | 415 | 	 * Find suitable number of IO TLB entries size that will fit this | 
 | 416 | 	 * request and allocate a buffer from that IO TLB pool. | 
 | 417 | 	 */ | 
 | 418 | 	spin_lock_irqsave(&io_tlb_lock, flags); | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 419 | 	index = ALIGN(io_tlb_index, stride); | 
 | 420 | 	if (index >= io_tlb_nslabs) | 
 | 421 | 		index = 0; | 
 | 422 | 	wrap = index; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 |  | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 424 | 	do { | 
| FUJITA Tomonori | a852250 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 425 | 		while (iommu_is_span_boundary(index, nslots, offset_slots, | 
 | 426 | 					      max_slots)) { | 
| Jan Beulich | b15a3891 | 2008-03-13 09:13:30 +0000 | [diff] [blame] | 427 | 			index += stride; | 
 | 428 | 			if (index >= io_tlb_nslabs) | 
 | 429 | 				index = 0; | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 430 | 			if (index == wrap) | 
 | 431 | 				goto not_found; | 
 | 432 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 |  | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 434 | 		/* | 
 | 435 | 		 * If we find a slot that indicates we have 'nslots' number of | 
 | 436 | 		 * contiguous buffers, we allocate the buffers from that slot | 
 | 437 | 		 * and mark the entries as '0' indicating unavailable. | 
 | 438 | 		 */ | 
 | 439 | 		if (io_tlb_list[index] >= nslots) { | 
 | 440 | 			int count = 0; | 
 | 441 |  | 
 | 442 | 			for (i = index; i < (int) (index + nslots); i++) | 
 | 443 | 				io_tlb_list[i] = 0; | 
 | 444 | 			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | 
 | 445 | 				io_tlb_list[i] = ++count; | 
 | 446 | 			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | 
 | 447 |  | 
 | 448 | 			/* | 
 | 449 | 			 * Update the indices to avoid searching in the next | 
 | 450 | 			 * round. | 
 | 451 | 			 */ | 
 | 452 | 			io_tlb_index = ((index + nslots) < io_tlb_nslabs | 
 | 453 | 					? (index + nslots) : 0); | 
 | 454 |  | 
 | 455 | 			goto found; | 
 | 456 | 		} | 
 | 457 | 		index += stride; | 
 | 458 | 		if (index >= io_tlb_nslabs) | 
 | 459 | 			index = 0; | 
 | 460 | 	} while (index != wrap); | 
 | 461 |  | 
 | 462 | not_found: | 
 | 463 | 	spin_unlock_irqrestore(&io_tlb_lock, flags); | 
 | 464 | 	return NULL; | 
 | 465 | found: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | 	spin_unlock_irqrestore(&io_tlb_lock, flags); | 
 | 467 |  | 
 | 468 | 	/* | 
 | 469 | 	 * Save away the mapping from the original address to the DMA address. | 
 | 470 | 	 * This is needed when we sync the memory.  Then we sync the buffer if | 
 | 471 | 	 * needed. | 
 | 472 | 	 */ | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 473 | 	for (i = 0; i < nslots; i++) | 
 | 474 | 		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | 	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 476 | 		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 |  | 
 | 478 | 	return dma_addr; | 
 | 479 | } | 
 | 480 |  | 
 | 481 | /* | 
 | 482 |  * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 
 | 483 |  */ | 
 | 484 | static void | 
| Becky Bruce | 7fcebbd | 2009-04-08 09:09:19 -0500 | [diff] [blame] | 485 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | { | 
 | 487 | 	unsigned long flags; | 
 | 488 | 	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 
 | 489 | 	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 490 | 	phys_addr_t phys = io_tlb_orig_addr[index]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 |  | 
 | 492 | 	/* | 
 | 493 | 	 * First, sync the memory before unmapping the entry | 
 | 494 | 	 */ | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 495 | 	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 496 | 		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 |  | 
 | 498 | 	/* | 
 | 499 | 	 * Return the buffer to the free list by setting the corresponding | 
 | 500 | 	 * entries to indicate the number of contigous entries available. | 
 | 501 | 	 * While returning the entries to the free list, we merge the entries | 
 | 502 | 	 * with slots below and above the pool being returned. | 
 | 503 | 	 */ | 
 | 504 | 	spin_lock_irqsave(&io_tlb_lock, flags); | 
 | 505 | 	{ | 
 | 506 | 		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? | 
 | 507 | 			 io_tlb_list[index + nslots] : 0); | 
 | 508 | 		/* | 
 | 509 | 		 * Step 1: return the slots to the free list, merging the | 
 | 510 | 		 * slots with superceeding slots | 
 | 511 | 		 */ | 
 | 512 | 		for (i = index + nslots - 1; i >= index; i--) | 
 | 513 | 			io_tlb_list[i] = ++count; | 
 | 514 | 		/* | 
 | 515 | 		 * Step 2: merge the returned slots with the preceding slots, | 
 | 516 | 		 * if available (non zero) | 
 | 517 | 		 */ | 
 | 518 | 		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | 
 | 519 | 			io_tlb_list[i] = ++count; | 
 | 520 | 	} | 
 | 521 | 	spin_unlock_irqrestore(&io_tlb_lock, flags); | 
 | 522 | } | 
 | 523 |  | 
 | 524 | static void | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 525 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | 
 | 526 | 	    int dir, int target) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | { | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 528 | 	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 
 | 529 | 	phys_addr_t phys = io_tlb_orig_addr[index]; | 
 | 530 |  | 
 | 531 | 	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); | 
| Keir Fraser | df336d1 | 2007-07-21 04:37:24 -0700 | [diff] [blame] | 532 |  | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 533 | 	switch (target) { | 
 | 534 | 	case SYNC_FOR_CPU: | 
 | 535 | 		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 536 | 			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 537 | 		else | 
 | 538 | 			BUG_ON(dir != DMA_TO_DEVICE); | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 539 | 		break; | 
 | 540 | 	case SYNC_FOR_DEVICE: | 
 | 541 | 		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | 
| Becky Bruce | fb05a37 | 2008-12-22 10:26:09 -0800 | [diff] [blame] | 542 | 			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 543 | 		else | 
 | 544 | 			BUG_ON(dir != DMA_FROM_DEVICE); | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 545 | 		break; | 
 | 546 | 	default: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | 		BUG(); | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 548 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | } | 
 | 550 |  | 
 | 551 | void * | 
 | 552 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 
| Al Viro | 06a5449 | 2005-10-21 03:21:03 -0400 | [diff] [blame] | 553 | 		       dma_addr_t *dma_handle, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 555 | 	dma_addr_t dev_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | 	void *ret; | 
 | 557 | 	int order = get_order(size); | 
| Yang Hongyang | 284901a9 | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 558 | 	u64 dma_mask = DMA_BIT_MASK(32); | 
| FUJITA Tomonori | 1e74f30 | 2008-11-17 16:24:34 +0900 | [diff] [blame] | 559 |  | 
 | 560 | 	if (hwdev && hwdev->coherent_dma_mask) | 
 | 561 | 		dma_mask = hwdev->coherent_dma_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 |  | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 563 | 	ret = (void *)__get_free_pages(flags, order); | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 564 | 	if (ret && | 
 | 565 | 	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), | 
 | 566 | 				   size)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | 		/* | 
 | 568 | 		 * The allocated memory isn't reachable by the device. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | 		 */ | 
 | 570 | 		free_pages((unsigned long) ret, order); | 
 | 571 | 		ret = NULL; | 
 | 572 | 	} | 
 | 573 | 	if (!ret) { | 
 | 574 | 		/* | 
 | 575 | 		 * We are either out of memory or the device can't DMA | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 576 | 		 * to GFP_DMA memory; fall back on map_single(), which | 
 | 577 | 		 * will grab memory from the lowest available address range. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 		 */ | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 579 | 		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 
| FUJITA Tomonori | 9dfda12 | 2008-09-08 18:53:48 +0900 | [diff] [blame] | 580 | 		if (!ret) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | 			return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | 	} | 
 | 583 |  | 
 | 584 | 	memset(ret, 0, size); | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 585 | 	dev_addr = swiotlb_virt_to_bus(hwdev, ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 |  | 
 | 587 | 	/* Confirm address can be DMA'd by device */ | 
| FUJITA Tomonori | 1e74f30 | 2008-11-17 16:24:34 +0900 | [diff] [blame] | 588 | 	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 589 | 		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 
| FUJITA Tomonori | 1e74f30 | 2008-11-17 16:24:34 +0900 | [diff] [blame] | 590 | 		       (unsigned long long)dma_mask, | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 591 | 		       (unsigned long long)dev_addr); | 
| FUJITA Tomonori | a2b89b5 | 2008-10-23 18:42:03 +0900 | [diff] [blame] | 592 |  | 
 | 593 | 		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 
| Becky Bruce | 7fcebbd | 2009-04-08 09:09:19 -0500 | [diff] [blame] | 594 | 		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 
| FUJITA Tomonori | a2b89b5 | 2008-10-23 18:42:03 +0900 | [diff] [blame] | 595 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | 	} | 
 | 597 | 	*dma_handle = dev_addr; | 
 | 598 | 	return ret; | 
 | 599 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 600 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 |  | 
 | 602 | void | 
 | 603 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 
 | 604 | 		      dma_addr_t dma_handle) | 
 | 605 | { | 
| David Brownell | aa24886 | 2007-08-10 13:10:27 -0700 | [diff] [blame] | 606 | 	WARN_ON(irqs_disabled()); | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 607 | 	if (!is_swiotlb_buffer(vaddr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | 		free_pages((unsigned long) vaddr, get_order(size)); | 
 | 609 | 	else | 
 | 610 | 		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 
| Becky Bruce | 7fcebbd | 2009-04-08 09:09:19 -0500 | [diff] [blame] | 611 | 		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 613 | EXPORT_SYMBOL(swiotlb_free_coherent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 |  | 
 | 615 | static void | 
 | 616 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 
 | 617 | { | 
 | 618 | 	/* | 
 | 619 | 	 * Ran out of IOMMU space for this operation. This is very bad. | 
 | 620 | 	 * Unfortunately the drivers cannot handle this operation properly. | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 621 | 	 * unless they check for dma_mapping_error (most don't) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | 	 * When the mapping is small enough return a static buffer to limit | 
 | 623 | 	 * the damage, or panic when the transfer is too big. | 
 | 624 | 	 */ | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 625 | 	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " | 
| Kay Sievers | 94b3248 | 2009-01-06 10:44:37 -0800 | [diff] [blame] | 626 | 	       "device %s\n", size, dev ? dev_name(dev) : "?"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 |  | 
 | 628 | 	if (size > io_tlb_overflow && do_panic) { | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 629 | 		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 
 | 630 | 			panic("DMA: Memory would be corrupted\n"); | 
 | 631 | 		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 
 | 632 | 			panic("DMA: Random memory would be DMAed\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | 	} | 
 | 634 | } | 
 | 635 |  | 
 | 636 | /* | 
 | 637 |  * Map a single buffer of the indicated size for DMA in streaming mode.  The | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 638 |  * physical address to use is returned. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 |  * | 
 | 640 |  * Once the device is given the dma address, the device owns this memory until | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 641 |  * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 |  */ | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 643 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 
 | 644 | 			    unsigned long offset, size_t size, | 
 | 645 | 			    enum dma_data_direction dir, | 
 | 646 | 			    struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | { | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 648 | 	phys_addr_t phys = page_to_phys(page) + offset; | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 649 | 	dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | 	void *map; | 
 | 651 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 652 | 	BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | 	/* | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 654 | 	 * If the address happens to be in the device's DMA window, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | 	 * we can safely return the device addr and not worry about bounce | 
 | 656 | 	 * buffering it. | 
 | 657 | 	 */ | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 658 | 	if (!address_needs_mapping(dev, dev_addr, size) && | 
| Becky Bruce | dd6b02f | 2009-04-08 09:09:17 -0500 | [diff] [blame] | 659 | 	    !range_needs_mapping(phys, size)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | 		return dev_addr; | 
 | 661 |  | 
 | 662 | 	/* | 
 | 663 | 	 * Oh well, have to allocate and map a bounce buffer. | 
 | 664 | 	 */ | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 665 | 	map = map_single(dev, phys, size, dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | 	if (!map) { | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 667 | 		swiotlb_full(dev, size, dir, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | 		map = io_tlb_overflow_buffer; | 
 | 669 | 	} | 
 | 670 |  | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 671 | 	dev_addr = swiotlb_virt_to_bus(dev, map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 |  | 
 | 673 | 	/* | 
 | 674 | 	 * Ensure that the address returned is DMA'ble | 
 | 675 | 	 */ | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 676 | 	if (address_needs_mapping(dev, dev_addr, size)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | 		panic("map_single: bounce buffer is not DMA'ble"); | 
 | 678 |  | 
 | 679 | 	return dev_addr; | 
 | 680 | } | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 681 | EXPORT_SYMBOL_GPL(swiotlb_map_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 |  | 
 | 683 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 |  * Unmap a single streaming mode DMA translation.  The dma_addr and size must | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 685 |  * match what was provided for in a previous swiotlb_map_page call.  All | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 |  * other usages are undefined. | 
 | 687 |  * | 
 | 688 |  * After this call, reads by the cpu to the buffer are guaranteed to see | 
 | 689 |  * whatever the device wrote there. | 
 | 690 |  */ | 
| Becky Bruce | 7fcebbd | 2009-04-08 09:09:19 -0500 | [diff] [blame] | 691 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 
 | 692 | 			 size_t size, int dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | { | 
| Becky Bruce | 42d7c5e | 2009-04-08 09:09:21 -0500 | [diff] [blame] | 694 | 	char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 696 | 	BUG_ON(dir == DMA_NONE); | 
| Becky Bruce | 7fcebbd | 2009-04-08 09:09:19 -0500 | [diff] [blame] | 697 |  | 
 | 698 | 	if (is_swiotlb_buffer(dma_addr)) { | 
 | 699 | 		do_unmap_single(hwdev, dma_addr, size, dir); | 
 | 700 | 		return; | 
 | 701 | 	} | 
 | 702 |  | 
 | 703 | 	if (dir != DMA_FROM_DEVICE) | 
 | 704 | 		return; | 
 | 705 |  | 
 | 706 | 	dma_mark_clean(dma_addr, size); | 
 | 707 | } | 
 | 708 |  | 
 | 709 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 
 | 710 | 			size_t size, enum dma_data_direction dir, | 
 | 711 | 			struct dma_attrs *attrs) | 
 | 712 | { | 
 | 713 | 	unmap_single(hwdev, dev_addr, size, dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | } | 
| FUJITA Tomonori | f98eee8 | 2009-01-05 23:59:03 +0900 | [diff] [blame] | 715 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 716 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | /* | 
 | 718 |  * Make physical memory consistent for a single streaming mode DMA translation | 
 | 719 |  * after a transfer. | 
 | 720 |  * | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 721 |  * If you perform a swiotlb_map_page() but wish to interrogate the buffer | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 722 |  * using the cpu, yet do not wish to teardown the dma mapping, you must | 
 | 723 |  * call this function before doing so.  At the next point you give the dma | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 |  * address back to the card, you must first perform a | 
 | 725 |  * swiotlb_dma_sync_for_device, and then the device again owns the buffer | 
 | 726 |  */ | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 727 | static void | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 728 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 729 | 		    size_t size, int dir, int target) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | { | 
| Becky Bruce | 42d7c5e | 2009-04-08 09:09:21 -0500 | [diff] [blame] | 731 | 	char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 733 | 	BUG_ON(dir == DMA_NONE); | 
| Becky Bruce | 380d687 | 2009-04-08 09:09:20 -0500 | [diff] [blame] | 734 |  | 
 | 735 | 	if (is_swiotlb_buffer(dma_addr)) { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 736 | 		sync_single(hwdev, dma_addr, size, dir, target); | 
| Becky Bruce | 380d687 | 2009-04-08 09:09:20 -0500 | [diff] [blame] | 737 | 		return; | 
 | 738 | 	} | 
 | 739 |  | 
 | 740 | 	if (dir != DMA_FROM_DEVICE) | 
 | 741 | 		return; | 
 | 742 |  | 
 | 743 | 	dma_mark_clean(dma_addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | } | 
 | 745 |  | 
 | 746 | void | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 747 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 748 | 			    size_t size, enum dma_data_direction dir) | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 749 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 750 | 	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 751 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 752 | EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 753 |  | 
 | 754 | void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 756 | 			       size_t size, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 758 | 	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 760 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 |  | 
 | 762 | /* | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 763 |  * Same as above, but for a sub-range of the mapping. | 
 | 764 |  */ | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 765 | static void | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 766 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 767 | 			  unsigned long offset, size_t size, | 
 | 768 | 			  int dir, int target) | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 769 | { | 
| Becky Bruce | 380d687 | 2009-04-08 09:09:20 -0500 | [diff] [blame] | 770 | 	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 771 | } | 
 | 772 |  | 
 | 773 | void | 
 | 774 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 775 | 				  unsigned long offset, size_t size, | 
 | 776 | 				  enum dma_data_direction dir) | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 777 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 778 | 	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 
 | 779 | 				  SYNC_FOR_CPU); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 780 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 781 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 782 |  | 
 | 783 | void | 
 | 784 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 785 | 				     unsigned long offset, size_t size, | 
 | 786 | 				     enum dma_data_direction dir) | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 787 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 788 | 	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 
 | 789 | 				  SYNC_FOR_DEVICE); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 790 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 791 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 792 |  | 
 | 793 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 |  * Map a set of buffers described by scatterlist in streaming mode for DMA. | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 795 |  * This is the scatter-gather version of the above swiotlb_map_page | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 |  * interface.  Here the scatter gather list elements are each tagged with the | 
 | 797 |  * appropriate dma address and length.  They are obtained via | 
 | 798 |  * sg_dma_{address,length}(SG). | 
 | 799 |  * | 
 | 800 |  * NOTE: An implementation may be able to use a smaller number of | 
 | 801 |  *       DMA address/length pairs than there are SG table elements. | 
 | 802 |  *       (for example via virtual mapping capabilities) | 
 | 803 |  *       The routine returns the number of addr/length pairs actually | 
 | 804 |  *       used, at most nents. | 
 | 805 |  * | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 806 |  * Device ownership issues as mentioned above for swiotlb_map_page are the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 |  * same here. | 
 | 808 |  */ | 
 | 809 | int | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 810 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 811 | 		     enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | { | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 813 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | 	int i; | 
 | 815 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 816 | 	BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 |  | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 818 | 	for_each_sg(sgl, sg, nelems, i) { | 
| Ian Campbell | 961d7d0 | 2009-01-09 18:32:10 +0000 | [diff] [blame] | 819 | 		phys_addr_t paddr = sg_phys(sg); | 
 | 820 | 		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 821 |  | 
| Ian Campbell | 961d7d0 | 2009-01-09 18:32:10 +0000 | [diff] [blame] | 822 | 		if (range_needs_mapping(paddr, sg->length) || | 
| FUJITA Tomonori | 2797982 | 2008-09-10 01:06:49 +0900 | [diff] [blame] | 823 | 		    address_needs_mapping(hwdev, dev_addr, sg->length)) { | 
| Becky Bruce | bc40ac6 | 2008-12-22 10:26:08 -0800 | [diff] [blame] | 824 | 			void *map = map_single(hwdev, sg_phys(sg), | 
 | 825 | 					       sg->length, dir); | 
| Andi Kleen | 7e87023 | 2005-12-20 14:45:19 +0100 | [diff] [blame] | 826 | 			if (!map) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | 				/* Don't panic here, we expect map_sg users | 
 | 828 | 				   to do proper error handling. */ | 
 | 829 | 				swiotlb_full(hwdev, sg->length, dir, 0); | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 830 | 				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, | 
 | 831 | 						       attrs); | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 832 | 				sgl[0].dma_length = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | 				return 0; | 
 | 834 | 			} | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 835 | 			sg->dma_address = swiotlb_virt_to_bus(hwdev, map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | 		} else | 
 | 837 | 			sg->dma_address = dev_addr; | 
 | 838 | 		sg->dma_length = sg->length; | 
 | 839 | 	} | 
 | 840 | 	return nelems; | 
 | 841 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 842 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); | 
 | 843 |  | 
 | 844 | int | 
 | 845 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 
 | 846 | 	       int dir) | 
 | 847 | { | 
 | 848 | 	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 
 | 849 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 850 | EXPORT_SYMBOL(swiotlb_map_sg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 |  | 
 | 852 | /* | 
 | 853 |  * Unmap a set of streaming mode DMA translations.  Again, cpu read rules | 
| Becky Bruce | ceb5ac3 | 2009-04-08 09:09:15 -0500 | [diff] [blame] | 854 |  * concerning calls here are the same as for swiotlb_unmap_page() above. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 |  */ | 
 | 856 | void | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 857 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 858 | 		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | { | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 860 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | 	int i; | 
 | 862 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 863 | 	BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 |  | 
| Becky Bruce | 7fcebbd | 2009-04-08 09:09:19 -0500 | [diff] [blame] | 865 | 	for_each_sg(sgl, sg, nelems, i) | 
 | 866 | 		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); | 
 | 867 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 869 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 
 | 870 |  | 
 | 871 | void | 
 | 872 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 
 | 873 | 		 int dir) | 
 | 874 | { | 
 | 875 | 	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 
 | 876 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 877 | EXPORT_SYMBOL(swiotlb_unmap_sg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 |  | 
 | 879 | /* | 
 | 880 |  * Make physical memory consistent for a set of streaming mode DMA translations | 
 | 881 |  * after a transfer. | 
 | 882 |  * | 
 | 883 |  * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules | 
 | 884 |  * and usage. | 
 | 885 |  */ | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 886 | static void | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 887 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 888 | 		int nelems, int dir, int target) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | { | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 890 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | 	int i; | 
 | 892 |  | 
| Becky Bruce | 380d687 | 2009-04-08 09:09:20 -0500 | [diff] [blame] | 893 | 	for_each_sg(sgl, sg, nelems, i) | 
 | 894 | 		swiotlb_sync_single(hwdev, sg->dma_address, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 895 | 				    sg->dma_length, dir, target); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 | } | 
 | 897 |  | 
 | 898 | void | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 899 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 900 | 			int nelems, enum dma_data_direction dir) | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 901 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 902 | 	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 903 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 904 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 905 |  | 
 | 906 | void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 908 | 			   int nelems, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 910 | 	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 912 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 |  | 
 | 914 | int | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 915 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | { | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 917 | 	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | } | 
| FUJITA Tomonori | 874d6a9 | 2008-12-28 15:02:07 +0900 | [diff] [blame] | 919 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 |  | 
 | 921 | /* | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 922 |  * Return whether the given device DMA address mask can be supported | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 |  * properly.  For example, if your device can only drive the low 24-bits | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 924 |  * during bus mastering, then you would pass 0x00ffffff as the mask to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 |  * this function. | 
 | 926 |  */ | 
 | 927 | int | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 928 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | { | 
| Jeremy Fitzhardinge | 70a7d3c | 2008-12-22 10:26:05 -0800 | [diff] [blame] | 930 | 	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | EXPORT_SYMBOL(swiotlb_dma_supported); |