| Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 1 | #include <linux/dma-mapping.h> | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 2 | #include <linux/dmar.h> | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 3 | #include <linux/bootmem.h> | 
| Glauber Costa | bca5c09 | 2008-04-08 13:20:53 -0300 | [diff] [blame] | 4 | #include <linux/pci.h> | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 5 |  | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 6 | #include <asm/proto.h> | 
 | 7 | #include <asm/dma.h> | 
| FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 8 | #include <asm/iommu.h> | 
| Joerg Roedel | 1d9b16d | 2008-11-27 18:39:15 +0100 | [diff] [blame] | 9 | #include <asm/gart.h> | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 10 | #include <asm/calgary.h> | 
| Joerg Roedel | a69ca34 | 2008-06-26 21:28:08 +0200 | [diff] [blame] | 11 | #include <asm/amd_iommu.h> | 
| Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 12 |  | 
| Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 13 | static int forbid_dac __read_mostly; | 
 | 14 |  | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 15 | struct dma_mapping_ops *dma_ops; | 
| Glauber Costa | 85c246e | 2008-04-08 13:20:50 -0300 | [diff] [blame] | 16 | EXPORT_SYMBOL(dma_ops); | 
 | 17 |  | 
| Dmitri Vorobiev | b4cdc43 | 2008-04-28 03:15:58 +0400 | [diff] [blame] | 18 | static int iommu_sac_force __read_mostly; | 
| Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 19 |  | 
| Glauber Costa | f9c258d | 2008-04-08 13:20:52 -0300 | [diff] [blame] | 20 | #ifdef CONFIG_IOMMU_DEBUG | 
 | 21 | int panic_on_overflow __read_mostly = 1; | 
 | 22 | int force_iommu __read_mostly = 1; | 
 | 23 | #else | 
 | 24 | int panic_on_overflow __read_mostly = 0; | 
 | 25 | int force_iommu __read_mostly = 0; | 
 | 26 | #endif | 
 | 27 |  | 
| Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 28 | int iommu_merge __read_mostly = 0; | 
 | 29 |  | 
 | 30 | int no_iommu __read_mostly; | 
 | 31 | /* Set this to 1 if there is a HW IOMMU in the system */ | 
 | 32 | int iommu_detected __read_mostly = 0; | 
 | 33 |  | 
| Glauber Costa | cac6787 | 2008-04-08 13:21:00 -0300 | [diff] [blame] | 34 | dma_addr_t bad_dma_address __read_mostly = 0; | 
 | 35 | EXPORT_SYMBOL(bad_dma_address); | 
| Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 36 |  | 
| Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 37 | /* Dummy device used for NULL arguments (normally ISA). Better would | 
 | 38 |    be probably a smaller DMA mask, but this is bug-to-bug compatible | 
 | 39 |    to older i386. */ | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 40 | struct device x86_dma_fallback_dev = { | 
| Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 41 | 	.bus_id = "fallback device", | 
 | 42 | 	.coherent_dma_mask = DMA_32BIT_MASK, | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 43 | 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, | 
| Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 44 | }; | 
| Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 45 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 
| Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 46 |  | 
| Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 47 | int dma_set_mask(struct device *dev, u64 mask) | 
 | 48 | { | 
 | 49 | 	if (!dev->dma_mask || !dma_supported(dev, mask)) | 
 | 50 | 		return -EIO; | 
 | 51 |  | 
 | 52 | 	*dev->dma_mask = mask; | 
 | 53 |  | 
 | 54 | 	return 0; | 
 | 55 | } | 
 | 56 | EXPORT_SYMBOL(dma_set_mask); | 
 | 57 |  | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 58 | #ifdef CONFIG_X86_64 | 
 | 59 | static __initdata void *dma32_bootmem_ptr; | 
 | 60 | static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); | 
 | 61 |  | 
 | 62 | static int __init parse_dma32_size_opt(char *p) | 
 | 63 | { | 
 | 64 | 	if (!p) | 
 | 65 | 		return -EINVAL; | 
 | 66 | 	dma32_bootmem_size = memparse(p, &p); | 
 | 67 | 	return 0; | 
 | 68 | } | 
 | 69 | early_param("dma32_size", parse_dma32_size_opt); | 
 | 70 |  | 
 | 71 | void __init dma32_reserve_bootmem(void) | 
 | 72 | { | 
 | 73 | 	unsigned long size, align; | 
| Yinghai Lu | c987d12 | 2008-06-24 22:14:09 -0700 | [diff] [blame] | 74 | 	if (max_pfn <= MAX_DMA32_PFN) | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 75 | 		return; | 
 | 76 |  | 
| Yinghai Lu | 7677b2e | 2008-04-14 20:40:37 -0700 | [diff] [blame] | 77 | 	/* | 
 | 78 | 	 * check aperture_64.c allocate_aperture() for reason about | 
 | 79 | 	 * using 512M as goal | 
 | 80 | 	 */ | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 81 | 	align = 64ULL<<20; | 
| Joerg Roedel | 1ddb551 | 2008-07-25 16:48:55 +0200 | [diff] [blame] | 82 | 	size = roundup(dma32_bootmem_size, align); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 83 | 	dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 
| Yinghai Lu | 7677b2e | 2008-04-14 20:40:37 -0700 | [diff] [blame] | 84 | 				 512ULL<<20); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 85 | 	if (dma32_bootmem_ptr) | 
 | 86 | 		dma32_bootmem_size = size; | 
 | 87 | 	else | 
 | 88 | 		dma32_bootmem_size = 0; | 
 | 89 | } | 
 | 90 | static void __init dma32_free_bootmem(void) | 
 | 91 | { | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 92 |  | 
| Yinghai Lu | c987d12 | 2008-06-24 22:14:09 -0700 | [diff] [blame] | 93 | 	if (max_pfn <= MAX_DMA32_PFN) | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 94 | 		return; | 
 | 95 |  | 
 | 96 | 	if (!dma32_bootmem_ptr) | 
 | 97 | 		return; | 
 | 98 |  | 
| Yinghai Lu | 330fce2 | 2008-04-19 01:31:45 -0700 | [diff] [blame] | 99 | 	free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 100 |  | 
 | 101 | 	dma32_bootmem_ptr = NULL; | 
 | 102 | 	dma32_bootmem_size = 0; | 
 | 103 | } | 
 | 104 |  | 
 | 105 | void __init pci_iommu_alloc(void) | 
 | 106 | { | 
 | 107 | 	/* free the range so iommu could get some range less than 4G */ | 
 | 108 | 	dma32_free_bootmem(); | 
 | 109 | 	/* | 
 | 110 | 	 * The order of these functions is important for | 
 | 111 | 	 * fall-back/fail-over reasons | 
 | 112 | 	 */ | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 113 | 	gart_iommu_hole_init(); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 114 |  | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 115 | 	detect_calgary(); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 116 |  | 
 | 117 | 	detect_intel_iommu(); | 
 | 118 |  | 
| Joerg Roedel | a69ca34 | 2008-06-26 21:28:08 +0200 | [diff] [blame] | 119 | 	amd_iommu_detect(); | 
 | 120 |  | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 121 | 	pci_swiotlb_init(); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 122 | } | 
| FUJITA Tomonori | 8978b74 | 2008-07-29 13:38:53 +0900 | [diff] [blame] | 123 |  | 
| Joerg Roedel | bdab0ba | 2008-10-15 22:02:07 -0700 | [diff] [blame] | 124 | unsigned long iommu_nr_pages(unsigned long addr, unsigned long len) | 
| FUJITA Tomonori | 8978b74 | 2008-07-29 13:38:53 +0900 | [diff] [blame] | 125 | { | 
 | 126 | 	unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); | 
 | 127 |  | 
 | 128 | 	return size >> PAGE_SHIFT; | 
 | 129 | } | 
| Joerg Roedel | bdab0ba | 2008-10-15 22:02:07 -0700 | [diff] [blame] | 130 | EXPORT_SYMBOL(iommu_nr_pages); | 
| Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 131 | #endif | 
 | 132 |  | 
| FUJITA Tomonori | 9f6ac57 | 2008-09-24 20:48:35 +0900 | [diff] [blame] | 133 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 
 | 134 | 				 dma_addr_t *dma_addr, gfp_t flag) | 
 | 135 | { | 
 | 136 | 	unsigned long dma_mask; | 
 | 137 | 	struct page *page; | 
 | 138 | 	dma_addr_t addr; | 
 | 139 |  | 
 | 140 | 	dma_mask = dma_alloc_coherent_mask(dev, flag); | 
 | 141 |  | 
 | 142 | 	flag |= __GFP_ZERO; | 
 | 143 | again: | 
 | 144 | 	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); | 
 | 145 | 	if (!page) | 
 | 146 | 		return NULL; | 
 | 147 |  | 
 | 148 | 	addr = page_to_phys(page); | 
 | 149 | 	if (!is_buffer_dma_capable(dma_mask, addr, size)) { | 
 | 150 | 		__free_pages(page, get_order(size)); | 
 | 151 |  | 
 | 152 | 		if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) { | 
 | 153 | 			flag = (flag & ~GFP_DMA32) | GFP_DMA; | 
 | 154 | 			goto again; | 
 | 155 | 		} | 
 | 156 |  | 
 | 157 | 		return NULL; | 
 | 158 | 	} | 
 | 159 |  | 
 | 160 | 	*dma_addr = addr; | 
 | 161 | 	return page_address(page); | 
 | 162 | } | 
 | 163 |  | 
| Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 164 | /* | 
 | 165 |  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter | 
 | 166 |  * documentation. | 
 | 167 |  */ | 
 | 168 | static __init int iommu_setup(char *p) | 
 | 169 | { | 
 | 170 | 	iommu_merge = 1; | 
 | 171 |  | 
 | 172 | 	if (!p) | 
 | 173 | 		return -EINVAL; | 
 | 174 |  | 
 | 175 | 	while (*p) { | 
 | 176 | 		if (!strncmp(p, "off", 3)) | 
 | 177 | 			no_iommu = 1; | 
 | 178 | 		/* gart_parse_options has more force support */ | 
 | 179 | 		if (!strncmp(p, "force", 5)) | 
 | 180 | 			force_iommu = 1; | 
 | 181 | 		if (!strncmp(p, "noforce", 7)) { | 
 | 182 | 			iommu_merge = 0; | 
 | 183 | 			force_iommu = 0; | 
 | 184 | 		} | 
 | 185 |  | 
 | 186 | 		if (!strncmp(p, "biomerge", 8)) { | 
| Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 187 | 			iommu_merge = 1; | 
 | 188 | 			force_iommu = 1; | 
 | 189 | 		} | 
 | 190 | 		if (!strncmp(p, "panic", 5)) | 
 | 191 | 			panic_on_overflow = 1; | 
 | 192 | 		if (!strncmp(p, "nopanic", 7)) | 
 | 193 | 			panic_on_overflow = 0; | 
 | 194 | 		if (!strncmp(p, "merge", 5)) { | 
 | 195 | 			iommu_merge = 1; | 
 | 196 | 			force_iommu = 1; | 
 | 197 | 		} | 
 | 198 | 		if (!strncmp(p, "nomerge", 7)) | 
 | 199 | 			iommu_merge = 0; | 
 | 200 | 		if (!strncmp(p, "forcesac", 8)) | 
 | 201 | 			iommu_sac_force = 1; | 
 | 202 | 		if (!strncmp(p, "allowdac", 8)) | 
 | 203 | 			forbid_dac = 0; | 
 | 204 | 		if (!strncmp(p, "nodac", 5)) | 
 | 205 | 			forbid_dac = -1; | 
 | 206 | 		if (!strncmp(p, "usedac", 6)) { | 
 | 207 | 			forbid_dac = -1; | 
 | 208 | 			return 1; | 
 | 209 | 		} | 
 | 210 | #ifdef CONFIG_SWIOTLB | 
 | 211 | 		if (!strncmp(p, "soft", 4)) | 
 | 212 | 			swiotlb = 1; | 
 | 213 | #endif | 
 | 214 |  | 
| Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 215 | 		gart_parse_options(p); | 
| Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 216 |  | 
 | 217 | #ifdef CONFIG_CALGARY_IOMMU | 
 | 218 | 		if (!strncmp(p, "calgary", 7)) | 
 | 219 | 			use_calgary = 1; | 
 | 220 | #endif /* CONFIG_CALGARY_IOMMU */ | 
 | 221 |  | 
 | 222 | 		p += strcspn(p, ","); | 
 | 223 | 		if (*p == ',') | 
 | 224 | 			++p; | 
 | 225 | 	} | 
 | 226 | 	return 0; | 
 | 227 | } | 
 | 228 | early_param("iommu", iommu_setup); | 
 | 229 |  | 
| Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 230 | int dma_supported(struct device *dev, u64 mask) | 
 | 231 | { | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 232 | 	struct dma_mapping_ops *ops = get_dma_ops(dev); | 
 | 233 |  | 
| Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 234 | #ifdef CONFIG_PCI | 
 | 235 | 	if (mask > 0xffffffff && forbid_dac > 0) { | 
| Greg Kroah-Hartman | fc3a882 | 2008-05-02 06:02:41 +0200 | [diff] [blame] | 236 | 		dev_info(dev, "PCI: Disallowing DAC for device\n"); | 
| Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 237 | 		return 0; | 
 | 238 | 	} | 
 | 239 | #endif | 
 | 240 |  | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 241 | 	if (ops->dma_supported) | 
 | 242 | 		return ops->dma_supported(dev, mask); | 
| Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 243 |  | 
 | 244 | 	/* Copied from i386. Doesn't make much sense, because it will | 
 | 245 | 	   only work for pci_alloc_coherent. | 
 | 246 | 	   The caller just has to use GFP_DMA in this case. */ | 
 | 247 | 	if (mask < DMA_24BIT_MASK) | 
 | 248 | 		return 0; | 
 | 249 |  | 
 | 250 | 	/* Tell the device to use SAC when IOMMU force is on.  This | 
 | 251 | 	   allows the driver to use cheaper accesses in some cases. | 
 | 252 |  | 
 | 253 | 	   Problem with this is that if we overflow the IOMMU area and | 
 | 254 | 	   return DAC as fallback address the device may not handle it | 
 | 255 | 	   correctly. | 
 | 256 |  | 
 | 257 | 	   As a special case some controllers have a 39bit address | 
 | 258 | 	   mode that is as efficient as 32bit (aic79xx). Don't force | 
 | 259 | 	   SAC for these.  Assume all masks <= 40 bits are of this | 
 | 260 | 	   type. Normally this doesn't make any difference, but gives | 
 | 261 | 	   more gentle handling of IOMMU overflow. */ | 
 | 262 | 	if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | 
| Greg Kroah-Hartman | fc3a882 | 2008-05-02 06:02:41 +0200 | [diff] [blame] | 263 | 		dev_info(dev, "Force SAC with mask %Lx\n", mask); | 
| Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 264 | 		return 0; | 
 | 265 | 	} | 
 | 266 |  | 
 | 267 | 	return 1; | 
 | 268 | } | 
 | 269 | EXPORT_SYMBOL(dma_supported); | 
 | 270 |  | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 271 | static int __init pci_iommu_init(void) | 
 | 272 | { | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 273 | 	calgary_iommu_init(); | 
| Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 274 |  | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 275 | 	intel_iommu_init(); | 
 | 276 |  | 
| Joerg Roedel | a69ca34 | 2008-06-26 21:28:08 +0200 | [diff] [blame] | 277 | 	amd_iommu_init(); | 
 | 278 |  | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 279 | 	gart_iommu_init(); | 
| Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 280 |  | 
 | 281 | 	no_iommu_init(); | 
 | 282 | 	return 0; | 
 | 283 | } | 
 | 284 |  | 
 | 285 | void pci_iommu_shutdown(void) | 
 | 286 | { | 
 | 287 | 	gart_iommu_shutdown(); | 
 | 288 | } | 
 | 289 | /* Must execute after PCI subsystem */ | 
 | 290 | fs_initcall(pci_iommu_init); | 
| Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 291 |  | 
 | 292 | #ifdef CONFIG_PCI | 
 | 293 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | 
 | 294 |  | 
 | 295 | static __devinit void via_no_dac(struct pci_dev *dev) | 
 | 296 | { | 
 | 297 | 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | 
| Michael Tokarev | a0286c9 | 2008-12-05 15:47:29 +0300 | [diff] [blame] | 298 | 		printk(KERN_INFO | 
 | 299 | 			"PCI: VIA PCI bridge detected. Disabling DAC.\n"); | 
| Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 300 | 		forbid_dac = 1; | 
 | 301 | 	} | 
 | 302 | } | 
 | 303 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | 
 | 304 | #endif |