blob: 48ab52d052b644f9a005385be6c0c0931d47f8fe [file] [log] [blame]
Glauber Costa459121c92008-04-08 13:20:43 -03001#include <linux/dma-mapping.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03002#include <linux/dmar.h>
Glauber Costa116890d2008-04-08 13:20:54 -03003#include <linux/bootmem.h>
Glauber Costabca5c092008-04-08 13:20:53 -03004#include <linux/pci.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03005
Glauber Costa116890d2008-04-08 13:20:54 -03006#include <asm/proto.h>
7#include <asm/dma.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03008#include <asm/gart.h>
9#include <asm/calgary.h>
Glauber Costa459121c92008-04-08 13:20:43 -030010
Glauber Costabca5c092008-04-08 13:20:53 -030011int forbid_dac __read_mostly;
12EXPORT_SYMBOL(forbid_dac);
13
Glauber Costa85c246e2008-04-08 13:20:50 -030014const struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops);
16
Glauber Costa8e0c3792008-04-08 13:20:55 -030017int iommu_sac_force __read_mostly = 0;
18
Glauber Costaf9c258d2008-04-08 13:20:52 -030019#ifdef CONFIG_IOMMU_DEBUG
20int panic_on_overflow __read_mostly = 1;
21int force_iommu __read_mostly = 1;
22#else
23int panic_on_overflow __read_mostly = 0;
24int force_iommu __read_mostly = 0;
25#endif
26
Glauber Costafae9a0d2008-04-08 13:20:56 -030027int iommu_merge __read_mostly = 0;
28
29int no_iommu __read_mostly;
30/* Set this to 1 if there is a HW IOMMU in the system */
31int iommu_detected __read_mostly = 0;
32
33/* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35int iommu_bio_merge __read_mostly = 0;
36EXPORT_SYMBOL(iommu_bio_merge);
37
38
Glauber Costa459121c92008-04-08 13:20:43 -030039int dma_set_mask(struct device *dev, u64 mask)
40{
41 if (!dev->dma_mask || !dma_supported(dev, mask))
42 return -EIO;
43
44 *dev->dma_mask = mask;
45
46 return 0;
47}
48EXPORT_SYMBOL(dma_set_mask);
49
Glauber Costa116890d2008-04-08 13:20:54 -030050#ifdef CONFIG_X86_64
51static __initdata void *dma32_bootmem_ptr;
52static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
53
54static int __init parse_dma32_size_opt(char *p)
55{
56 if (!p)
57 return -EINVAL;
58 dma32_bootmem_size = memparse(p, &p);
59 return 0;
60}
61early_param("dma32_size", parse_dma32_size_opt);
62
63void __init dma32_reserve_bootmem(void)
64{
65 unsigned long size, align;
66 if (end_pfn <= MAX_DMA32_PFN)
67 return;
68
69 align = 64ULL<<20;
70 size = round_up(dma32_bootmem_size, align);
71 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
72 __pa(MAX_DMA_ADDRESS));
73 if (dma32_bootmem_ptr)
74 dma32_bootmem_size = size;
75 else
76 dma32_bootmem_size = 0;
77}
78static void __init dma32_free_bootmem(void)
79{
80 int node;
81
82 if (end_pfn <= MAX_DMA32_PFN)
83 return;
84
85 if (!dma32_bootmem_ptr)
86 return;
87
88 for_each_online_node(node)
89 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
90 dma32_bootmem_size);
91
92 dma32_bootmem_ptr = NULL;
93 dma32_bootmem_size = 0;
94}
95
96void __init pci_iommu_alloc(void)
97{
98 /* free the range so iommu could get some range less than 4G */
99 dma32_free_bootmem();
100 /*
101 * The order of these functions is important for
102 * fall-back/fail-over reasons
103 */
104#ifdef CONFIG_GART_IOMMU
105 gart_iommu_hole_init();
106#endif
107
108#ifdef CONFIG_CALGARY_IOMMU
109 detect_calgary();
110#endif
111
112 detect_intel_iommu();
113
114#ifdef CONFIG_SWIOTLB
115 pci_swiotlb_init();
116#endif
117}
118#endif
119
Glauber Costafae9a0d2008-04-08 13:20:56 -0300120/*
121 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
122 * documentation.
123 */
124static __init int iommu_setup(char *p)
125{
126 iommu_merge = 1;
127
128 if (!p)
129 return -EINVAL;
130
131 while (*p) {
132 if (!strncmp(p, "off", 3))
133 no_iommu = 1;
134 /* gart_parse_options has more force support */
135 if (!strncmp(p, "force", 5))
136 force_iommu = 1;
137 if (!strncmp(p, "noforce", 7)) {
138 iommu_merge = 0;
139 force_iommu = 0;
140 }
141
142 if (!strncmp(p, "biomerge", 8)) {
143 iommu_bio_merge = 4096;
144 iommu_merge = 1;
145 force_iommu = 1;
146 }
147 if (!strncmp(p, "panic", 5))
148 panic_on_overflow = 1;
149 if (!strncmp(p, "nopanic", 7))
150 panic_on_overflow = 0;
151 if (!strncmp(p, "merge", 5)) {
152 iommu_merge = 1;
153 force_iommu = 1;
154 }
155 if (!strncmp(p, "nomerge", 7))
156 iommu_merge = 0;
157 if (!strncmp(p, "forcesac", 8))
158 iommu_sac_force = 1;
159 if (!strncmp(p, "allowdac", 8))
160 forbid_dac = 0;
161 if (!strncmp(p, "nodac", 5))
162 forbid_dac = -1;
163 if (!strncmp(p, "usedac", 6)) {
164 forbid_dac = -1;
165 return 1;
166 }
167#ifdef CONFIG_SWIOTLB
168 if (!strncmp(p, "soft", 4))
169 swiotlb = 1;
170#endif
171
172#ifdef CONFIG_GART_IOMMU
173 gart_parse_options(p);
174#endif
175
176#ifdef CONFIG_CALGARY_IOMMU
177 if (!strncmp(p, "calgary", 7))
178 use_calgary = 1;
179#endif /* CONFIG_CALGARY_IOMMU */
180
181 p += strcspn(p, ",");
182 if (*p == ',')
183 ++p;
184 }
185 return 0;
186}
187early_param("iommu", iommu_setup);
188
Glauber Costa8e0c3792008-04-08 13:20:55 -0300189int dma_supported(struct device *dev, u64 mask)
190{
191#ifdef CONFIG_PCI
192 if (mask > 0xffffffff && forbid_dac > 0) {
193 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
194 dev->bus_id);
195 return 0;
196 }
197#endif
198
199 if (dma_ops->dma_supported)
200 return dma_ops->dma_supported(dev, mask);
201
202 /* Copied from i386. Doesn't make much sense, because it will
203 only work for pci_alloc_coherent.
204 The caller just has to use GFP_DMA in this case. */
205 if (mask < DMA_24BIT_MASK)
206 return 0;
207
208 /* Tell the device to use SAC when IOMMU force is on. This
209 allows the driver to use cheaper accesses in some cases.
210
211 Problem with this is that if we overflow the IOMMU area and
212 return DAC as fallback address the device may not handle it
213 correctly.
214
215 As a special case some controllers have a 39bit address
216 mode that is as efficient as 32bit (aic79xx). Don't force
217 SAC for these. Assume all masks <= 40 bits are of this
218 type. Normally this doesn't make any difference, but gives
219 more gentle handling of IOMMU overflow. */
220 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
221 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
222 dev->bus_id, mask);
223 return 0;
224 }
225
226 return 1;
227}
228EXPORT_SYMBOL(dma_supported);
229
230
Glauber Costacb5867a2008-04-08 13:20:51 -0300231static int __init pci_iommu_init(void)
232{
233#ifdef CONFIG_CALGARY_IOMMU
234 calgary_iommu_init();
235#endif
Glauber Costa459121c92008-04-08 13:20:43 -0300236
Glauber Costacb5867a2008-04-08 13:20:51 -0300237 intel_iommu_init();
238
239#ifdef CONFIG_GART_IOMMU
240 gart_iommu_init();
241#endif
242
243 no_iommu_init();
244 return 0;
245}
246
247void pci_iommu_shutdown(void)
248{
249 gart_iommu_shutdown();
250}
251/* Must execute after PCI subsystem */
252fs_initcall(pci_iommu_init);
Glauber Costabca5c092008-04-08 13:20:53 -0300253
254#ifdef CONFIG_PCI
255/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
256
257static __devinit void via_no_dac(struct pci_dev *dev)
258{
259 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
260 printk(KERN_INFO "PCI: VIA PCI bridge detected."
261 "Disabling DAC.\n");
262 forbid_dac = 1;
263 }
264}
265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
266#endif