Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 1 | #include <linux/dma-mapping.h> |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 2 | #include <linux/dmar.h> |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 3 | #include <linux/bootmem.h> |
Glauber Costa | bca5c09 | 2008-04-08 13:20:53 -0300 | [diff] [blame] | 4 | #include <linux/pci.h> |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 5 | |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 6 | #include <asm/proto.h> |
| 7 | #include <asm/dma.h> |
FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 8 | #include <asm/iommu.h> |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 9 | #include <asm/calgary.h> |
Joerg Roedel | a69ca34 | 2008-06-26 21:28:08 +0200 | [diff] [blame] | 10 | #include <asm/amd_iommu.h> |
Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 11 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 12 | struct dma_mapping_ops *dma_ops; |
Glauber Costa | 85c246e | 2008-04-08 13:20:50 -0300 | [diff] [blame] | 13 | EXPORT_SYMBOL(dma_ops); |
| 14 | |
Dmitri Vorobiev | b4cdc43 | 2008-04-28 03:15:58 +0400 | [diff] [blame] | 15 | static int iommu_sac_force __read_mostly; |
Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 16 | |
Glauber Costa | f9c258d | 2008-04-08 13:20:52 -0300 | [diff] [blame] | 17 | #ifdef CONFIG_IOMMU_DEBUG |
| 18 | int panic_on_overflow __read_mostly = 1; |
| 19 | int force_iommu __read_mostly = 1; |
| 20 | #else |
| 21 | int panic_on_overflow __read_mostly = 0; |
| 22 | int force_iommu __read_mostly = 0; |
| 23 | #endif |
| 24 | |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 25 | int iommu_merge __read_mostly = 0; |
| 26 | |
| 27 | int no_iommu __read_mostly; |
| 28 | /* Set this to 1 if there is a HW IOMMU in the system */ |
| 29 | int iommu_detected __read_mostly = 0; |
| 30 | |
| 31 | /* This tells the BIO block layer to assume merging. Default to off |
| 32 | because we cannot guarantee merging later. */ |
| 33 | int iommu_bio_merge __read_mostly = 0; |
| 34 | EXPORT_SYMBOL(iommu_bio_merge); |
| 35 | |
Glauber Costa | cac6787 | 2008-04-08 13:21:00 -0300 | [diff] [blame] | 36 | dma_addr_t bad_dma_address __read_mostly = 0; |
| 37 | EXPORT_SYMBOL(bad_dma_address); |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 38 | |
Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 39 | /* Dummy device used for NULL arguments (normally ISA). Better would |
| 40 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
| 41 | to older i386. */ |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 42 | struct device x86_dma_fallback_dev = { |
Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 43 | .bus_id = "fallback device", |
| 44 | .coherent_dma_mask = DMA_32BIT_MASK, |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 45 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, |
Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 46 | }; |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 47 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
Glauber Costa | 098cb7f | 2008-04-09 13:18:10 -0300 | [diff] [blame] | 48 | |
Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 49 | int dma_set_mask(struct device *dev, u64 mask) |
| 50 | { |
| 51 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 52 | return -EIO; |
| 53 | |
| 54 | *dev->dma_mask = mask; |
| 55 | |
| 56 | return 0; |
| 57 | } |
| 58 | EXPORT_SYMBOL(dma_set_mask); |
| 59 | |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 60 | #ifdef CONFIG_X86_64 |
| 61 | static __initdata void *dma32_bootmem_ptr; |
| 62 | static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); |
| 63 | |
| 64 | static int __init parse_dma32_size_opt(char *p) |
| 65 | { |
| 66 | if (!p) |
| 67 | return -EINVAL; |
| 68 | dma32_bootmem_size = memparse(p, &p); |
| 69 | return 0; |
| 70 | } |
| 71 | early_param("dma32_size", parse_dma32_size_opt); |
| 72 | |
| 73 | void __init dma32_reserve_bootmem(void) |
| 74 | { |
| 75 | unsigned long size, align; |
Yinghai Lu | c987d12 | 2008-06-24 22:14:09 -0700 | [diff] [blame] | 76 | if (max_pfn <= MAX_DMA32_PFN) |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 77 | return; |
| 78 | |
Yinghai Lu | 7677b2e | 2008-04-14 20:40:37 -0700 | [diff] [blame] | 79 | /* |
| 80 | * check aperture_64.c allocate_aperture() for reason about |
| 81 | * using 512M as goal |
| 82 | */ |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 83 | align = 64ULL<<20; |
Joerg Roedel | 1ddb551 | 2008-07-25 16:48:55 +0200 | [diff] [blame] | 84 | size = roundup(dma32_bootmem_size, align); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 85 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
Yinghai Lu | 7677b2e | 2008-04-14 20:40:37 -0700 | [diff] [blame] | 86 | 512ULL<<20); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 87 | if (dma32_bootmem_ptr) |
| 88 | dma32_bootmem_size = size; |
| 89 | else |
| 90 | dma32_bootmem_size = 0; |
| 91 | } |
| 92 | static void __init dma32_free_bootmem(void) |
| 93 | { |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 94 | |
Yinghai Lu | c987d12 | 2008-06-24 22:14:09 -0700 | [diff] [blame] | 95 | if (max_pfn <= MAX_DMA32_PFN) |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 96 | return; |
| 97 | |
| 98 | if (!dma32_bootmem_ptr) |
| 99 | return; |
| 100 | |
Yinghai Lu | 330fce2 | 2008-04-19 01:31:45 -0700 | [diff] [blame] | 101 | free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 102 | |
| 103 | dma32_bootmem_ptr = NULL; |
| 104 | dma32_bootmem_size = 0; |
| 105 | } |
| 106 | |
| 107 | void __init pci_iommu_alloc(void) |
| 108 | { |
| 109 | /* free the range so iommu could get some range less than 4G */ |
| 110 | dma32_free_bootmem(); |
| 111 | /* |
| 112 | * The order of these functions is important for |
| 113 | * fall-back/fail-over reasons |
| 114 | */ |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 115 | gart_iommu_hole_init(); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 116 | |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 117 | detect_calgary(); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 118 | |
| 119 | detect_intel_iommu(); |
| 120 | |
Joerg Roedel | a69ca34 | 2008-06-26 21:28:08 +0200 | [diff] [blame] | 121 | amd_iommu_detect(); |
| 122 | |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 123 | pci_swiotlb_init(); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 124 | } |
FUJITA Tomonori | 8978b74 | 2008-07-29 13:38:53 +0900 | [diff] [blame] | 125 | |
Joerg Roedel | bdab0ba | 2008-10-15 22:02:07 -0700 | [diff] [blame] | 126 | unsigned long iommu_nr_pages(unsigned long addr, unsigned long len) |
FUJITA Tomonori | 8978b74 | 2008-07-29 13:38:53 +0900 | [diff] [blame] | 127 | { |
| 128 | unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); |
| 129 | |
| 130 | return size >> PAGE_SHIFT; |
| 131 | } |
Joerg Roedel | bdab0ba | 2008-10-15 22:02:07 -0700 | [diff] [blame] | 132 | EXPORT_SYMBOL(iommu_nr_pages); |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 133 | #endif |
| 134 | |
FUJITA Tomonori | 9f6ac57 | 2008-09-24 20:48:35 +0900 | [diff] [blame] | 135 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
| 136 | dma_addr_t *dma_addr, gfp_t flag) |
| 137 | { |
| 138 | unsigned long dma_mask; |
| 139 | struct page *page; |
| 140 | dma_addr_t addr; |
| 141 | |
| 142 | dma_mask = dma_alloc_coherent_mask(dev, flag); |
| 143 | |
| 144 | flag |= __GFP_ZERO; |
| 145 | again: |
| 146 | page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); |
| 147 | if (!page) |
| 148 | return NULL; |
| 149 | |
| 150 | addr = page_to_phys(page); |
| 151 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { |
| 152 | __free_pages(page, get_order(size)); |
| 153 | |
| 154 | if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) { |
| 155 | flag = (flag & ~GFP_DMA32) | GFP_DMA; |
| 156 | goto again; |
| 157 | } |
| 158 | |
| 159 | return NULL; |
| 160 | } |
| 161 | |
| 162 | *dma_addr = addr; |
| 163 | return page_address(page); |
| 164 | } |
| 165 | |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 166 | /* |
| 167 | * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter |
| 168 | * documentation. |
| 169 | */ |
| 170 | static __init int iommu_setup(char *p) |
| 171 | { |
| 172 | iommu_merge = 1; |
| 173 | |
| 174 | if (!p) |
| 175 | return -EINVAL; |
| 176 | |
| 177 | while (*p) { |
| 178 | if (!strncmp(p, "off", 3)) |
| 179 | no_iommu = 1; |
| 180 | /* gart_parse_options has more force support */ |
| 181 | if (!strncmp(p, "force", 5)) |
| 182 | force_iommu = 1; |
| 183 | if (!strncmp(p, "noforce", 7)) { |
| 184 | iommu_merge = 0; |
| 185 | force_iommu = 0; |
| 186 | } |
| 187 | |
| 188 | if (!strncmp(p, "biomerge", 8)) { |
| 189 | iommu_bio_merge = 4096; |
| 190 | iommu_merge = 1; |
| 191 | force_iommu = 1; |
| 192 | } |
| 193 | if (!strncmp(p, "panic", 5)) |
| 194 | panic_on_overflow = 1; |
| 195 | if (!strncmp(p, "nopanic", 7)) |
| 196 | panic_on_overflow = 0; |
| 197 | if (!strncmp(p, "merge", 5)) { |
| 198 | iommu_merge = 1; |
| 199 | force_iommu = 1; |
| 200 | } |
| 201 | if (!strncmp(p, "nomerge", 7)) |
| 202 | iommu_merge = 0; |
| 203 | if (!strncmp(p, "forcesac", 8)) |
| 204 | iommu_sac_force = 1; |
| 205 | if (!strncmp(p, "allowdac", 8)) |
| 206 | forbid_dac = 0; |
| 207 | if (!strncmp(p, "nodac", 5)) |
| 208 | forbid_dac = -1; |
| 209 | if (!strncmp(p, "usedac", 6)) { |
| 210 | forbid_dac = -1; |
| 211 | return 1; |
| 212 | } |
| 213 | #ifdef CONFIG_SWIOTLB |
| 214 | if (!strncmp(p, "soft", 4)) |
| 215 | swiotlb = 1; |
| 216 | #endif |
| 217 | |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 218 | gart_parse_options(p); |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 219 | |
| 220 | #ifdef CONFIG_CALGARY_IOMMU |
| 221 | if (!strncmp(p, "calgary", 7)) |
| 222 | use_calgary = 1; |
| 223 | #endif /* CONFIG_CALGARY_IOMMU */ |
| 224 | |
| 225 | p += strcspn(p, ","); |
| 226 | if (*p == ',') |
| 227 | ++p; |
| 228 | } |
| 229 | return 0; |
| 230 | } |
| 231 | early_param("iommu", iommu_setup); |
| 232 | |
Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 233 | int dma_supported(struct device *dev, u64 mask) |
| 234 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 235 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 236 | |
Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 237 | #ifdef CONFIG_PCI |
| 238 | if (mask > 0xffffffff && forbid_dac > 0) { |
Greg Kroah-Hartman | fc3a882 | 2008-05-02 06:02:41 +0200 | [diff] [blame] | 239 | dev_info(dev, "PCI: Disallowing DAC for device\n"); |
Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 240 | return 0; |
| 241 | } |
| 242 | #endif |
| 243 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 244 | if (ops->dma_supported) |
| 245 | return ops->dma_supported(dev, mask); |
Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 246 | |
| 247 | /* Copied from i386. Doesn't make much sense, because it will |
| 248 | only work for pci_alloc_coherent. |
| 249 | The caller just has to use GFP_DMA in this case. */ |
| 250 | if (mask < DMA_24BIT_MASK) |
| 251 | return 0; |
| 252 | |
| 253 | /* Tell the device to use SAC when IOMMU force is on. This |
| 254 | allows the driver to use cheaper accesses in some cases. |
| 255 | |
| 256 | Problem with this is that if we overflow the IOMMU area and |
| 257 | return DAC as fallback address the device may not handle it |
| 258 | correctly. |
| 259 | |
| 260 | As a special case some controllers have a 39bit address |
| 261 | mode that is as efficient as 32bit (aic79xx). Don't force |
| 262 | SAC for these. Assume all masks <= 40 bits are of this |
| 263 | type. Normally this doesn't make any difference, but gives |
| 264 | more gentle handling of IOMMU overflow. */ |
| 265 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { |
Greg Kroah-Hartman | fc3a882 | 2008-05-02 06:02:41 +0200 | [diff] [blame] | 266 | dev_info(dev, "Force SAC with mask %Lx\n", mask); |
Glauber Costa | 8e0c379 | 2008-04-08 13:20:55 -0300 | [diff] [blame] | 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | return 1; |
| 271 | } |
| 272 | EXPORT_SYMBOL(dma_supported); |
| 273 | |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 274 | static int __init pci_iommu_init(void) |
| 275 | { |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 276 | calgary_iommu_init(); |
Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 277 | |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 278 | intel_iommu_init(); |
| 279 | |
Joerg Roedel | a69ca34 | 2008-06-26 21:28:08 +0200 | [diff] [blame] | 280 | amd_iommu_init(); |
| 281 | |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 282 | gart_iommu_init(); |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 283 | |
| 284 | no_iommu_init(); |
| 285 | return 0; |
| 286 | } |
| 287 | |
| 288 | void pci_iommu_shutdown(void) |
| 289 | { |
| 290 | gart_iommu_shutdown(); |
| 291 | } |
| 292 | /* Must execute after PCI subsystem */ |
| 293 | fs_initcall(pci_iommu_init); |