blob: d2f2c0158dc12f04021122740f42a63e5c17d195 [file] [log] [blame]
Glauber Costa459121c92008-04-08 13:20:43 -03001#include <linux/dma-mapping.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03002#include <linux/dmar.h>
Glauber Costa116890d2008-04-08 13:20:54 -03003#include <linux/bootmem.h>
Glauber Costabca5c092008-04-08 13:20:53 -03004#include <linux/pci.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03005
Glauber Costa116890d2008-04-08 13:20:54 -03006#include <asm/proto.h>
7#include <asm/dma.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +09008#include <asm/iommu.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03009#include <asm/calgary.h>
Joerg Roedela69ca342008-06-26 21:28:08 +020010#include <asm/amd_iommu.h>
Glauber Costa459121c92008-04-08 13:20:43 -030011
Jan Beulich08e1a132008-07-18 13:44:16 +010012static int forbid_dac __read_mostly;
Glauber Costabca5c092008-04-08 13:20:53 -030013
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070014struct dma_mapping_ops *dma_ops;
Glauber Costa85c246e2008-04-08 13:20:50 -030015EXPORT_SYMBOL(dma_ops);
16
Dmitri Vorobievb4cdc432008-04-28 03:15:58 +040017static int iommu_sac_force __read_mostly;
Glauber Costa8e0c3792008-04-08 13:20:55 -030018
Joerg Roedel2842e5b2008-09-18 15:23:43 +020019/*
20 * If this is disabled the IOMMU will use an optimized flushing strategy
21 * of only flushing when an mapping is reused. With it true the GART is
22 * flushed for every mapping. Problem is that doing the lazy flush seems
23 * to trigger bugs with some popular PCI cards, in particular 3ware (but
24 * has been also also seen with Qlogic at least).
25 */
26int iommu_fullflush;
27
Glauber Costaf9c258d2008-04-08 13:20:52 -030028#ifdef CONFIG_IOMMU_DEBUG
29int panic_on_overflow __read_mostly = 1;
30int force_iommu __read_mostly = 1;
31#else
32int panic_on_overflow __read_mostly = 0;
33int force_iommu __read_mostly = 0;
34#endif
35
Glauber Costafae9a0d2008-04-08 13:20:56 -030036int iommu_merge __read_mostly = 0;
37
38int no_iommu __read_mostly;
39/* Set this to 1 if there is a HW IOMMU in the system */
40int iommu_detected __read_mostly = 0;
41
42/* This tells the BIO block layer to assume merging. Default to off
43 because we cannot guarantee merging later. */
44int iommu_bio_merge __read_mostly = 0;
45EXPORT_SYMBOL(iommu_bio_merge);
46
Glauber Costacac67872008-04-08 13:21:00 -030047dma_addr_t bad_dma_address __read_mostly = 0;
48EXPORT_SYMBOL(bad_dma_address);
Glauber Costafae9a0d2008-04-08 13:20:56 -030049
Glauber Costa098cb7f2008-04-09 13:18:10 -030050/* Dummy device used for NULL arguments (normally ISA). Better would
51 be probably a smaller DMA mask, but this is bug-to-bug compatible
52 to older i386. */
Joerg Roedel6c505ce2008-08-19 16:32:45 +020053struct device x86_dma_fallback_dev = {
Glauber Costa098cb7f2008-04-09 13:18:10 -030054 .bus_id = "fallback device",
55 .coherent_dma_mask = DMA_32BIT_MASK,
Joerg Roedel6c505ce2008-08-19 16:32:45 +020056 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
Glauber Costa098cb7f2008-04-09 13:18:10 -030057};
Joerg Roedel6c505ce2008-08-19 16:32:45 +020058EXPORT_SYMBOL(x86_dma_fallback_dev);
Glauber Costa098cb7f2008-04-09 13:18:10 -030059
Glauber Costa459121c92008-04-08 13:20:43 -030060int dma_set_mask(struct device *dev, u64 mask)
61{
62 if (!dev->dma_mask || !dma_supported(dev, mask))
63 return -EIO;
64
65 *dev->dma_mask = mask;
66
67 return 0;
68}
69EXPORT_SYMBOL(dma_set_mask);
70
Glauber Costa116890d2008-04-08 13:20:54 -030071#ifdef CONFIG_X86_64
72static __initdata void *dma32_bootmem_ptr;
73static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
74
75static int __init parse_dma32_size_opt(char *p)
76{
77 if (!p)
78 return -EINVAL;
79 dma32_bootmem_size = memparse(p, &p);
80 return 0;
81}
82early_param("dma32_size", parse_dma32_size_opt);
83
84void __init dma32_reserve_bootmem(void)
85{
86 unsigned long size, align;
Yinghai Luc987d122008-06-24 22:14:09 -070087 if (max_pfn <= MAX_DMA32_PFN)
Glauber Costa116890d2008-04-08 13:20:54 -030088 return;
89
Yinghai Lu7677b2e2008-04-14 20:40:37 -070090 /*
91 * check aperture_64.c allocate_aperture() for reason about
92 * using 512M as goal
93 */
Glauber Costa116890d2008-04-08 13:20:54 -030094 align = 64ULL<<20;
95 size = round_up(dma32_bootmem_size, align);
96 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
Yinghai Lu7677b2e2008-04-14 20:40:37 -070097 512ULL<<20);
Glauber Costa116890d2008-04-08 13:20:54 -030098 if (dma32_bootmem_ptr)
99 dma32_bootmem_size = size;
100 else
101 dma32_bootmem_size = 0;
102}
103static void __init dma32_free_bootmem(void)
104{
Glauber Costa116890d2008-04-08 13:20:54 -0300105
Yinghai Luc987d122008-06-24 22:14:09 -0700106 if (max_pfn <= MAX_DMA32_PFN)
Glauber Costa116890d2008-04-08 13:20:54 -0300107 return;
108
109 if (!dma32_bootmem_ptr)
110 return;
111
Yinghai Lu330fce22008-04-19 01:31:45 -0700112 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
Glauber Costa116890d2008-04-08 13:20:54 -0300113
114 dma32_bootmem_ptr = NULL;
115 dma32_bootmem_size = 0;
116}
117
118void __init pci_iommu_alloc(void)
119{
120 /* free the range so iommu could get some range less than 4G */
121 dma32_free_bootmem();
122 /*
123 * The order of these functions is important for
124 * fall-back/fail-over reasons
125 */
Glauber Costa116890d2008-04-08 13:20:54 -0300126 gart_iommu_hole_init();
Glauber Costa116890d2008-04-08 13:20:54 -0300127
Glauber Costa116890d2008-04-08 13:20:54 -0300128 detect_calgary();
Glauber Costa116890d2008-04-08 13:20:54 -0300129
130 detect_intel_iommu();
131
Joerg Roedela69ca342008-06-26 21:28:08 +0200132 amd_iommu_detect();
133
Glauber Costa116890d2008-04-08 13:20:54 -0300134 pci_swiotlb_init();
Glauber Costa116890d2008-04-08 13:20:54 -0300135}
FUJITA Tomonori8978b742008-07-29 13:38:53 +0900136
137unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
138{
139 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
140
141 return size >> PAGE_SHIFT;
142}
143EXPORT_SYMBOL(iommu_num_pages);
Glauber Costa116890d2008-04-08 13:20:54 -0300144#endif
145
Glauber Costafae9a0d2008-04-08 13:20:56 -0300146/*
147 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
148 * documentation.
149 */
150static __init int iommu_setup(char *p)
151{
152 iommu_merge = 1;
153
154 if (!p)
155 return -EINVAL;
156
157 while (*p) {
158 if (!strncmp(p, "off", 3))
159 no_iommu = 1;
160 /* gart_parse_options has more force support */
161 if (!strncmp(p, "force", 5))
162 force_iommu = 1;
163 if (!strncmp(p, "noforce", 7)) {
164 iommu_merge = 0;
165 force_iommu = 0;
166 }
167
168 if (!strncmp(p, "biomerge", 8)) {
169 iommu_bio_merge = 4096;
170 iommu_merge = 1;
171 force_iommu = 1;
172 }
173 if (!strncmp(p, "panic", 5))
174 panic_on_overflow = 1;
175 if (!strncmp(p, "nopanic", 7))
176 panic_on_overflow = 0;
177 if (!strncmp(p, "merge", 5)) {
178 iommu_merge = 1;
179 force_iommu = 1;
180 }
181 if (!strncmp(p, "nomerge", 7))
182 iommu_merge = 0;
Joerg Roedel2842e5b2008-09-18 15:23:43 +0200183 if (!strncmp(p, "fullflush", 8))
184 iommu_fullflush = 1;
185 if (!strncmp(p, "nofullflush", 11))
186 iommu_fullflush = 0;
Glauber Costafae9a0d2008-04-08 13:20:56 -0300187 if (!strncmp(p, "forcesac", 8))
188 iommu_sac_force = 1;
189 if (!strncmp(p, "allowdac", 8))
190 forbid_dac = 0;
191 if (!strncmp(p, "nodac", 5))
192 forbid_dac = -1;
193 if (!strncmp(p, "usedac", 6)) {
194 forbid_dac = -1;
195 return 1;
196 }
197#ifdef CONFIG_SWIOTLB
198 if (!strncmp(p, "soft", 4))
199 swiotlb = 1;
200#endif
201
Glauber Costafae9a0d2008-04-08 13:20:56 -0300202 gart_parse_options(p);
Glauber Costafae9a0d2008-04-08 13:20:56 -0300203
204#ifdef CONFIG_CALGARY_IOMMU
205 if (!strncmp(p, "calgary", 7))
206 use_calgary = 1;
207#endif /* CONFIG_CALGARY_IOMMU */
208
209 p += strcspn(p, ",");
210 if (*p == ',')
211 ++p;
212 }
213 return 0;
214}
215early_param("iommu", iommu_setup);
216
Glauber Costa8e0c3792008-04-08 13:20:55 -0300217int dma_supported(struct device *dev, u64 mask)
218{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700219 struct dma_mapping_ops *ops = get_dma_ops(dev);
220
Glauber Costa8e0c3792008-04-08 13:20:55 -0300221#ifdef CONFIG_PCI
222 if (mask > 0xffffffff && forbid_dac > 0) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200223 dev_info(dev, "PCI: Disallowing DAC for device\n");
Glauber Costa8e0c3792008-04-08 13:20:55 -0300224 return 0;
225 }
226#endif
227
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700228 if (ops->dma_supported)
229 return ops->dma_supported(dev, mask);
Glauber Costa8e0c3792008-04-08 13:20:55 -0300230
231 /* Copied from i386. Doesn't make much sense, because it will
232 only work for pci_alloc_coherent.
233 The caller just has to use GFP_DMA in this case. */
234 if (mask < DMA_24BIT_MASK)
235 return 0;
236
237 /* Tell the device to use SAC when IOMMU force is on. This
238 allows the driver to use cheaper accesses in some cases.
239
240 Problem with this is that if we overflow the IOMMU area and
241 return DAC as fallback address the device may not handle it
242 correctly.
243
244 As a special case some controllers have a 39bit address
245 mode that is as efficient as 32bit (aic79xx). Don't force
246 SAC for these. Assume all masks <= 40 bits are of this
247 type. Normally this doesn't make any difference, but gives
248 more gentle handling of IOMMU overflow. */
249 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200250 dev_info(dev, "Force SAC with mask %Lx\n", mask);
Glauber Costa8e0c3792008-04-08 13:20:55 -0300251 return 0;
252 }
253
254 return 1;
255}
256EXPORT_SYMBOL(dma_supported);
257
Glauber Costacb5867a2008-04-08 13:20:51 -0300258static int __init pci_iommu_init(void)
259{
Glauber Costacb5867a2008-04-08 13:20:51 -0300260 calgary_iommu_init();
Glauber Costa459121c92008-04-08 13:20:43 -0300261
Glauber Costacb5867a2008-04-08 13:20:51 -0300262 intel_iommu_init();
263
Joerg Roedela69ca342008-06-26 21:28:08 +0200264 amd_iommu_init();
265
Glauber Costacb5867a2008-04-08 13:20:51 -0300266 gart_iommu_init();
Glauber Costacb5867a2008-04-08 13:20:51 -0300267
268 no_iommu_init();
269 return 0;
270}
271
272void pci_iommu_shutdown(void)
273{
274 gart_iommu_shutdown();
275}
276/* Must execute after PCI subsystem */
277fs_initcall(pci_iommu_init);
Glauber Costabca5c092008-04-08 13:20:53 -0300278
279#ifdef CONFIG_PCI
280/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
281
282static __devinit void via_no_dac(struct pci_dev *dev)
283{
284 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
285 printk(KERN_INFO "PCI: VIA PCI bridge detected."
286 "Disabling DAC.\n");
287 forbid_dac = 1;
288 }
289}
290DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
291#endif