| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Dynamic DMA mapping support. | 
 | 3 |  */ | 
 | 4 |  | 
 | 5 | #include <linux/types.h> | 
 | 6 | #include <linux/mm.h> | 
 | 7 | #include <linux/string.h> | 
 | 8 | #include <linux/pci.h> | 
 | 9 | #include <linux/module.h> | 
| Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 10 | #include <linux/dmar.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/io.h> | 
| Joerg Roedel | 395624f | 2007-10-24 12:49:47 +0200 | [diff] [blame] | 12 | #include <asm/gart.h> | 
| Jon Mason | e465058 | 2006-06-26 13:58:14 +0200 | [diff] [blame] | 13 | #include <asm/calgary.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
| Ingo Molnar | bc84cf1 | 2007-11-26 20:42:19 +0100 | [diff] [blame] | 15 | int iommu_merge __read_mostly = 0; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 16 |  | 
 | 17 | dma_addr_t bad_dma_address __read_mostly; | 
 | 18 | EXPORT_SYMBOL(bad_dma_address); | 
 | 19 |  | 
 | 20 | /* This tells the BIO block layer to assume merging. Default to off | 
 | 21 |    because we cannot guarantee merging later. */ | 
 | 22 | int iommu_bio_merge __read_mostly = 0; | 
 | 23 | EXPORT_SYMBOL(iommu_bio_merge); | 
 | 24 |  | 
| Jan Beulich | caa5171 | 2007-07-09 11:55:51 -0700 | [diff] [blame] | 25 | static int iommu_sac_force __read_mostly = 0; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 26 |  | 
 | 27 | int no_iommu __read_mostly; | 
 | 28 | #ifdef CONFIG_IOMMU_DEBUG | 
 | 29 | int panic_on_overflow __read_mostly = 1; | 
 | 30 | int force_iommu __read_mostly = 1; | 
 | 31 | #else | 
 | 32 | int panic_on_overflow __read_mostly = 0; | 
 | 33 | int force_iommu __read_mostly= 0; | 
 | 34 | #endif | 
 | 35 |  | 
| Jon Mason | 8d4f6b9 | 2006-06-26 13:58:05 +0200 | [diff] [blame] | 36 | /* Set this to 1 if there is a HW IOMMU in the system */ | 
 | 37 | int iommu_detected __read_mostly = 0; | 
 | 38 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 39 | /* Dummy device used for NULL arguments (normally ISA). Better would | 
 | 40 |    be probably a smaller DMA mask, but this is bug-to-bug compatible | 
 | 41 |    to i386. */ | 
 | 42 | struct device fallback_dev = { | 
 | 43 | 	.bus_id = "fallback device", | 
| Jon Mason | 9f2036f | 2006-06-26 13:56:19 +0200 | [diff] [blame] | 44 | 	.coherent_dma_mask = DMA_32BIT_MASK, | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 45 | 	.dma_mask = &fallback_dev.coherent_dma_mask, | 
 | 46 | }; | 
 | 47 |  | 
 | 48 | /* Allocate DMA memory on node near device */ | 
 | 49 | noinline static void * | 
 | 50 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | { | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 52 | 	struct page *page; | 
 | 53 | 	int node; | 
| Yinghai Lu | f6855f7 | 2007-10-17 18:04:35 +0200 | [diff] [blame] | 54 |  | 
 | 55 | 	node = dev_to_node(dev); | 
 | 56 | 	if (node == -1) | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 57 | 		node = numa_node_id(); | 
| Daniel Yeisley | 0d01532 | 2006-05-30 22:47:57 +0200 | [diff] [blame] | 58 |  | 
 | 59 | 	if (node < first_node(node_online_map)) | 
 | 60 | 		node = first_node(node_online_map); | 
 | 61 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 62 | 	page = alloc_pages_node(node, gfp, order); | 
 | 63 | 	return page ? page_address(page) : NULL; | 
 | 64 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 66 | /* | 
 | 67 |  * Allocate memory for a coherent mapping. | 
 | 68 |  */ | 
 | 69 | void * | 
 | 70 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 
 | 71 | 		   gfp_t gfp) | 
 | 72 | { | 
 | 73 | 	void *memory; | 
 | 74 | 	unsigned long dma_mask = 0; | 
 | 75 | 	u64 bus; | 
 | 76 |  | 
 | 77 | 	if (!dev) | 
 | 78 | 		dev = &fallback_dev; | 
 | 79 | 	dma_mask = dev->coherent_dma_mask; | 
 | 80 | 	if (dma_mask == 0) | 
| Jon Mason | 9f2036f | 2006-06-26 13:56:19 +0200 | [diff] [blame] | 81 | 		dma_mask = DMA_32BIT_MASK; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 82 |  | 
| Andi Kleen | 8154549 | 2007-08-15 02:40:34 +0200 | [diff] [blame] | 83 | 	/* Device not DMA able */ | 
 | 84 | 	if (dev->dma_mask == NULL) | 
 | 85 | 		return NULL; | 
 | 86 |  | 
| Andi Kleen | 3056d6b | 2006-03-25 16:30:43 +0100 | [diff] [blame] | 87 | 	/* Don't invoke OOM killer */ | 
 | 88 | 	gfp |= __GFP_NORETRY; | 
 | 89 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 90 | 	/* Kludge to make it bug-to-bug compatible with i386. i386 | 
 | 91 | 	   uses the normal dma_mask for alloc_coherent. */ | 
 | 92 | 	dma_mask &= *dev->dma_mask; | 
 | 93 |  | 
 | 94 | 	/* Why <=? Even when the mask is smaller than 4GB it is often | 
 | 95 | 	   larger than 16MB and in this case we have a chance of | 
 | 96 | 	   finding fitting memory in the next higher zone first. If | 
 | 97 | 	   not retry with true GFP_DMA. -AK */ | 
| Jon Mason | 9f2036f | 2006-06-26 13:56:19 +0200 | [diff] [blame] | 98 | 	if (dma_mask <= DMA_32BIT_MASK) | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 99 | 		gfp |= GFP_DMA32; | 
 | 100 |  | 
 | 101 |  again: | 
 | 102 | 	memory = dma_alloc_pages(dev, gfp, get_order(size)); | 
 | 103 | 	if (memory == NULL) | 
 | 104 | 		return NULL; | 
 | 105 |  | 
 | 106 | 	{ | 
 | 107 | 		int high, mmu; | 
 | 108 | 		bus = virt_to_bus(memory); | 
 | 109 | 	        high = (bus + size) >= dma_mask; | 
 | 110 | 		mmu = high; | 
 | 111 | 		if (force_iommu && !(gfp & GFP_DMA)) | 
 | 112 | 			mmu = 1; | 
 | 113 | 		else if (high) { | 
 | 114 | 			free_pages((unsigned long)memory, | 
 | 115 | 				   get_order(size)); | 
 | 116 |  | 
 | 117 | 			/* Don't use the 16MB ZONE_DMA unless absolutely | 
 | 118 | 			   needed. It's better to use remapping first. */ | 
| Jon Mason | 9f2036f | 2006-06-26 13:56:19 +0200 | [diff] [blame] | 119 | 			if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 120 | 				gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | 
 | 121 | 				goto again; | 
 | 122 | 			} | 
 | 123 |  | 
| Andi Kleen | 6bca52b | 2006-02-03 21:50:59 +0100 | [diff] [blame] | 124 | 			/* Let low level make its own zone decisions */ | 
 | 125 | 			gfp &= ~(GFP_DMA32|GFP_DMA); | 
 | 126 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 127 | 			if (dma_ops->alloc_coherent) | 
 | 128 | 				return dma_ops->alloc_coherent(dev, size, | 
 | 129 | 							   dma_handle, gfp); | 
 | 130 | 			return NULL; | 
 | 131 | 		} | 
 | 132 |  | 
 | 133 | 		memset(memory, 0, size); | 
 | 134 | 		if (!mmu) { | 
 | 135 | 			*dma_handle = virt_to_bus(memory); | 
 | 136 | 			return memory; | 
 | 137 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | 	} | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 139 |  | 
 | 140 | 	if (dma_ops->alloc_coherent) { | 
 | 141 | 		free_pages((unsigned long)memory, get_order(size)); | 
 | 142 | 		gfp &= ~(GFP_DMA|GFP_DMA32); | 
 | 143 | 		return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | 
 | 144 | 	} | 
 | 145 |  | 
 | 146 | 	if (dma_ops->map_simple) { | 
 | 147 | 		*dma_handle = dma_ops->map_simple(dev, memory, | 
 | 148 | 					      size, | 
 | 149 | 					      PCI_DMA_BIDIRECTIONAL); | 
 | 150 | 		if (*dma_handle != bad_dma_address) | 
 | 151 | 			return memory; | 
 | 152 | 	} | 
 | 153 |  | 
 | 154 | 	if (panic_on_overflow) | 
 | 155 | 		panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size); | 
 | 156 | 	free_pages((unsigned long)memory, get_order(size)); | 
 | 157 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 159 | EXPORT_SYMBOL(dma_alloc_coherent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 161 | /* | 
 | 162 |  * Unmap coherent memory. | 
 | 163 |  * The caller must ensure that the device has finished accessing the mapping. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  */ | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 165 | void dma_free_coherent(struct device *dev, size_t size, | 
 | 166 | 			 void *vaddr, dma_addr_t bus) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | { | 
| David Brownell | aa24886 | 2007-08-10 13:10:27 -0700 | [diff] [blame] | 168 | 	WARN_ON(irqs_disabled());	/* for portability */ | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 169 | 	if (dma_ops->unmap_single) | 
 | 170 | 		dma_ops->unmap_single(dev, bus, size, 0); | 
 | 171 | 	free_pages((unsigned long)vaddr, get_order(size)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | } | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 173 | EXPORT_SYMBOL(dma_free_coherent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  | 
| Andi Kleen | ece6684 | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 175 | static int forbid_dac __read_mostly; | 
 | 176 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 177 | int dma_supported(struct device *dev, u64 mask) | 
 | 178 | { | 
| Andi Kleen | ece6684 | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 179 | #ifdef CONFIG_PCI | 
 | 180 | 	if (mask > 0xffffffff && forbid_dac > 0) { | 
 | 181 |  | 
 | 182 |  | 
 | 183 |  | 
 | 184 | 		printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", dev->bus_id); | 
 | 185 | 		return 0; | 
 | 186 | 	} | 
 | 187 | #endif | 
 | 188 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 189 | 	if (dma_ops->dma_supported) | 
 | 190 | 		return dma_ops->dma_supported(dev, mask); | 
 | 191 |  | 
 | 192 | 	/* Copied from i386. Doesn't make much sense, because it will | 
 | 193 | 	   only work for pci_alloc_coherent. | 
 | 194 | 	   The caller just has to use GFP_DMA in this case. */ | 
| Jon Mason | 9f2036f | 2006-06-26 13:56:19 +0200 | [diff] [blame] | 195 |         if (mask < DMA_24BIT_MASK) | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 196 |                 return 0; | 
 | 197 |  | 
 | 198 | 	/* Tell the device to use SAC when IOMMU force is on.  This | 
 | 199 | 	   allows the driver to use cheaper accesses in some cases. | 
 | 200 |  | 
 | 201 | 	   Problem with this is that if we overflow the IOMMU area and | 
 | 202 | 	   return DAC as fallback address the device may not handle it | 
 | 203 | 	   correctly. | 
 | 204 |  | 
 | 205 | 	   As a special case some controllers have a 39bit address | 
 | 206 | 	   mode that is as efficient as 32bit (aic79xx). Don't force | 
 | 207 | 	   SAC for these.  Assume all masks <= 40 bits are of this | 
 | 208 | 	   type. Normally this doesn't make any difference, but gives | 
 | 209 | 	   more gentle handling of IOMMU overflow. */ | 
| Jon Mason | 9f2036f | 2006-06-26 13:56:19 +0200 | [diff] [blame] | 210 | 	if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 211 | 		printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); | 
 | 212 | 		return 0; | 
 | 213 | 	} | 
 | 214 |  | 
 | 215 | 	return 1; | 
 | 216 | } | 
 | 217 | EXPORT_SYMBOL(dma_supported); | 
 | 218 |  | 
 | 219 | int dma_set_mask(struct device *dev, u64 mask) | 
 | 220 | { | 
 | 221 | 	if (!dev->dma_mask || !dma_supported(dev, mask)) | 
 | 222 | 		return -EIO; | 
 | 223 | 	*dev->dma_mask = mask; | 
 | 224 | 	return 0; | 
 | 225 | } | 
 | 226 | EXPORT_SYMBOL(dma_set_mask); | 
 | 227 |  | 
| Karsten Weiss | 5558870 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 228 | /* | 
 | 229 |  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter | 
 | 230 |  * documentation. | 
 | 231 |  */ | 
| Adrian Bunk | 3e75939 | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 232 | static __init int iommu_setup(char *p) | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 233 | { | 
| Andi Kleen | ded318e | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 234 | 	iommu_merge = 1; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 235 |  | 
| Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 236 | 	if (!p) | 
 | 237 | 		return -EINVAL; | 
 | 238 |  | 
| Andi Kleen | ded318e | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 239 | 	while (*p) { | 
 | 240 | 		if (!strncmp(p,"off",3)) | 
 | 241 | 			no_iommu = 1; | 
 | 242 | 		/* gart_parse_options has more force support */ | 
 | 243 | 		if (!strncmp(p,"force",5)) | 
 | 244 | 			force_iommu = 1; | 
 | 245 | 		if (!strncmp(p,"noforce",7)) { | 
 | 246 | 			iommu_merge = 0; | 
 | 247 | 			force_iommu = 0; | 
 | 248 | 		} | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 249 |  | 
| Andi Kleen | ded318e | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 250 | 		if (!strncmp(p, "biomerge",8)) { | 
 | 251 | 			iommu_bio_merge = 4096; | 
 | 252 | 			iommu_merge = 1; | 
 | 253 | 			force_iommu = 1; | 
 | 254 | 		} | 
 | 255 | 		if (!strncmp(p, "panic",5)) | 
 | 256 | 			panic_on_overflow = 1; | 
 | 257 | 		if (!strncmp(p, "nopanic",7)) | 
 | 258 | 			panic_on_overflow = 0; | 
 | 259 | 		if (!strncmp(p, "merge",5)) { | 
 | 260 | 			iommu_merge = 1; | 
 | 261 | 			force_iommu = 1; | 
 | 262 | 		} | 
 | 263 | 		if (!strncmp(p, "nomerge",7)) | 
 | 264 | 			iommu_merge = 0; | 
 | 265 | 		if (!strncmp(p, "forcesac",8)) | 
 | 266 | 			iommu_sac_force = 1; | 
 | 267 | 		if (!strncmp(p, "allowdac", 8)) | 
 | 268 | 			forbid_dac = 0; | 
 | 269 | 		if (!strncmp(p, "nodac", 5)) | 
 | 270 | 			forbid_dac = -1; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 271 |  | 
 | 272 | #ifdef CONFIG_SWIOTLB | 
| Andi Kleen | ded318e | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 273 | 		if (!strncmp(p, "soft",4)) | 
 | 274 | 			swiotlb = 1; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 275 | #endif | 
 | 276 |  | 
| Joerg Roedel | 966396d | 2007-10-24 12:49:48 +0200 | [diff] [blame] | 277 | #ifdef CONFIG_GART_IOMMU | 
| Andi Kleen | ded318e | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 278 | 		gart_parse_options(p); | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 279 | #endif | 
 | 280 |  | 
| Muli Ben-Yehuda | bff6547 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 281 | #ifdef CONFIG_CALGARY_IOMMU | 
 | 282 | 		if (!strncmp(p, "calgary", 7)) | 
 | 283 | 			use_calgary = 1; | 
 | 284 | #endif /* CONFIG_CALGARY_IOMMU */ | 
 | 285 |  | 
| Andi Kleen | ded318e | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 286 | 		p += strcspn(p, ","); | 
 | 287 | 		if (*p == ',') | 
 | 288 | 			++p; | 
 | 289 | 	} | 
 | 290 | 	return 0; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 291 | } | 
| Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 292 | early_param("iommu", iommu_setup); | 
| Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 293 |  | 
 | 294 | void __init pci_iommu_alloc(void) | 
 | 295 | { | 
 | 296 | 	/* | 
 | 297 | 	 * The order of these functions is important for | 
 | 298 | 	 * fall-back/fail-over reasons | 
 | 299 | 	 */ | 
| Joerg Roedel | 966396d | 2007-10-24 12:49:48 +0200 | [diff] [blame] | 300 | #ifdef CONFIG_GART_IOMMU | 
| Joerg Roedel | 0440d4c | 2007-10-24 12:49:50 +0200 | [diff] [blame] | 301 | 	gart_iommu_hole_init(); | 
| Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 302 | #endif | 
 | 303 |  | 
| Jon Mason | e465058 | 2006-06-26 13:58:14 +0200 | [diff] [blame] | 304 | #ifdef CONFIG_CALGARY_IOMMU | 
 | 305 | 	detect_calgary(); | 
 | 306 | #endif | 
 | 307 |  | 
| Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 308 | 	detect_intel_iommu(); | 
 | 309 |  | 
| Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 310 | #ifdef CONFIG_SWIOTLB | 
 | 311 | 	pci_swiotlb_init(); | 
 | 312 | #endif | 
 | 313 | } | 
 | 314 |  | 
 | 315 | static int __init pci_iommu_init(void) | 
 | 316 | { | 
| Jon Mason | e465058 | 2006-06-26 13:58:14 +0200 | [diff] [blame] | 317 | #ifdef CONFIG_CALGARY_IOMMU | 
 | 318 | 	calgary_iommu_init(); | 
 | 319 | #endif | 
 | 320 |  | 
| Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 321 | 	intel_iommu_init(); | 
 | 322 |  | 
| Joerg Roedel | 966396d | 2007-10-24 12:49:48 +0200 | [diff] [blame] | 323 | #ifdef CONFIG_GART_IOMMU | 
| Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 324 | 	gart_iommu_init(); | 
 | 325 | #endif | 
 | 326 |  | 
 | 327 | 	no_iommu_init(); | 
 | 328 | 	return 0; | 
 | 329 | } | 
 | 330 |  | 
| Yinghai Lu | bc2cea6 | 2007-07-21 17:11:28 +0200 | [diff] [blame] | 331 | void pci_iommu_shutdown(void) | 
 | 332 | { | 
 | 333 | 	gart_iommu_shutdown(); | 
 | 334 | } | 
 | 335 |  | 
| Andi Kleen | 388c19e | 2007-06-20 12:23:32 +0200 | [diff] [blame] | 336 | #ifdef CONFIG_PCI | 
 | 337 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | 
 | 338 |  | 
 | 339 | static __devinit void via_no_dac(struct pci_dev *dev) | 
 | 340 | { | 
 | 341 | 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | 
 | 342 | 		printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n"); | 
 | 343 | 		forbid_dac = 1; | 
 | 344 | 	} | 
 | 345 | } | 
 | 346 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | 
 | 347 | #endif | 
| Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 348 | /* Must execute after PCI subsystem */ | 
 | 349 | fs_initcall(pci_iommu_init); |