blob: 12eeb4bfcdeb39727cf4b1e70c1c08590249682d [file] [log] [blame]
Glauber Costa459121c92008-04-08 13:20:43 -03001#include <linux/dma-mapping.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03002#include <linux/dmar.h>
Glauber Costa116890d2008-04-08 13:20:54 -03003#include <linux/bootmem.h>
Glauber Costabca5c092008-04-08 13:20:53 -03004#include <linux/pci.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03005
Glauber Costa116890d2008-04-08 13:20:54 -03006#include <asm/proto.h>
7#include <asm/dma.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +09008#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +01009#include <asm/gart.h>
Glauber Costacb5867a2008-04-08 13:20:51 -030010#include <asm/calgary.h>
Joerg Roedela69ca342008-06-26 21:28:08 +020011#include <asm/amd_iommu.h>
Glauber Costa459121c92008-04-08 13:20:43 -030012
Fenghua Yu3b15e582008-10-23 16:51:00 -070013static int forbid_dac __read_mostly;
14
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070015struct dma_mapping_ops *dma_ops;
Glauber Costa85c246e2008-04-08 13:20:50 -030016EXPORT_SYMBOL(dma_ops);
17
Dmitri Vorobievb4cdc432008-04-28 03:15:58 +040018static int iommu_sac_force __read_mostly;
Glauber Costa8e0c3792008-04-08 13:20:55 -030019
Glauber Costaf9c258d2008-04-08 13:20:52 -030020#ifdef CONFIG_IOMMU_DEBUG
21int panic_on_overflow __read_mostly = 1;
22int force_iommu __read_mostly = 1;
23#else
24int panic_on_overflow __read_mostly = 0;
25int force_iommu __read_mostly = 0;
26#endif
27
Glauber Costafae9a0d2008-04-08 13:20:56 -030028int iommu_merge __read_mostly = 0;
29
30int no_iommu __read_mostly;
31/* Set this to 1 if there is a HW IOMMU in the system */
32int iommu_detected __read_mostly = 0;
33
34/* This tells the BIO block layer to assume merging. Default to off
35 because we cannot guarantee merging later. */
36int iommu_bio_merge __read_mostly = 0;
37EXPORT_SYMBOL(iommu_bio_merge);
38
Glauber Costacac67872008-04-08 13:21:00 -030039dma_addr_t bad_dma_address __read_mostly = 0;
40EXPORT_SYMBOL(bad_dma_address);
Glauber Costafae9a0d2008-04-08 13:20:56 -030041
Glauber Costa098cb7f2008-04-09 13:18:10 -030042/* Dummy device used for NULL arguments (normally ISA). Better would
43 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 to older i386. */
Joerg Roedel6c505ce2008-08-19 16:32:45 +020045struct device x86_dma_fallback_dev = {
Glauber Costa098cb7f2008-04-09 13:18:10 -030046 .bus_id = "fallback device",
47 .coherent_dma_mask = DMA_32BIT_MASK,
Joerg Roedel6c505ce2008-08-19 16:32:45 +020048 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
Glauber Costa098cb7f2008-04-09 13:18:10 -030049};
Joerg Roedel6c505ce2008-08-19 16:32:45 +020050EXPORT_SYMBOL(x86_dma_fallback_dev);
Glauber Costa098cb7f2008-04-09 13:18:10 -030051
Glauber Costa459121c92008-04-08 13:20:43 -030052int dma_set_mask(struct device *dev, u64 mask)
53{
54 if (!dev->dma_mask || !dma_supported(dev, mask))
55 return -EIO;
56
57 *dev->dma_mask = mask;
58
59 return 0;
60}
61EXPORT_SYMBOL(dma_set_mask);
62
Glauber Costa116890d2008-04-08 13:20:54 -030063#ifdef CONFIG_X86_64
64static __initdata void *dma32_bootmem_ptr;
65static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
66
67static int __init parse_dma32_size_opt(char *p)
68{
69 if (!p)
70 return -EINVAL;
71 dma32_bootmem_size = memparse(p, &p);
72 return 0;
73}
74early_param("dma32_size", parse_dma32_size_opt);
75
76void __init dma32_reserve_bootmem(void)
77{
78 unsigned long size, align;
Yinghai Luc987d122008-06-24 22:14:09 -070079 if (max_pfn <= MAX_DMA32_PFN)
Glauber Costa116890d2008-04-08 13:20:54 -030080 return;
81
Yinghai Lu7677b2e2008-04-14 20:40:37 -070082 /*
83 * check aperture_64.c allocate_aperture() for reason about
84 * using 512M as goal
85 */
Glauber Costa116890d2008-04-08 13:20:54 -030086 align = 64ULL<<20;
Joerg Roedel1ddb5512008-07-25 16:48:55 +020087 size = roundup(dma32_bootmem_size, align);
Glauber Costa116890d2008-04-08 13:20:54 -030088 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
Yinghai Lu7677b2e2008-04-14 20:40:37 -070089 512ULL<<20);
Glauber Costa116890d2008-04-08 13:20:54 -030090 if (dma32_bootmem_ptr)
91 dma32_bootmem_size = size;
92 else
93 dma32_bootmem_size = 0;
94}
95static void __init dma32_free_bootmem(void)
96{
Glauber Costa116890d2008-04-08 13:20:54 -030097
Yinghai Luc987d122008-06-24 22:14:09 -070098 if (max_pfn <= MAX_DMA32_PFN)
Glauber Costa116890d2008-04-08 13:20:54 -030099 return;
100
101 if (!dma32_bootmem_ptr)
102 return;
103
Yinghai Lu330fce22008-04-19 01:31:45 -0700104 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
Glauber Costa116890d2008-04-08 13:20:54 -0300105
106 dma32_bootmem_ptr = NULL;
107 dma32_bootmem_size = 0;
108}
109
110void __init pci_iommu_alloc(void)
111{
112 /* free the range so iommu could get some range less than 4G */
113 dma32_free_bootmem();
114 /*
115 * The order of these functions is important for
116 * fall-back/fail-over reasons
117 */
Glauber Costa116890d2008-04-08 13:20:54 -0300118 gart_iommu_hole_init();
Glauber Costa116890d2008-04-08 13:20:54 -0300119
Glauber Costa116890d2008-04-08 13:20:54 -0300120 detect_calgary();
Glauber Costa116890d2008-04-08 13:20:54 -0300121
122 detect_intel_iommu();
123
Joerg Roedela69ca342008-06-26 21:28:08 +0200124 amd_iommu_detect();
125
Glauber Costa116890d2008-04-08 13:20:54 -0300126 pci_swiotlb_init();
Glauber Costa116890d2008-04-08 13:20:54 -0300127}
FUJITA Tomonori8978b742008-07-29 13:38:53 +0900128
Joerg Roedelbdab0ba2008-10-15 22:02:07 -0700129unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
FUJITA Tomonori8978b742008-07-29 13:38:53 +0900130{
131 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
132
133 return size >> PAGE_SHIFT;
134}
Joerg Roedelbdab0ba2008-10-15 22:02:07 -0700135EXPORT_SYMBOL(iommu_nr_pages);
Glauber Costa116890d2008-04-08 13:20:54 -0300136#endif
137
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +0900138void *dma_generic_alloc_coherent(struct device *dev, size_t size,
139 dma_addr_t *dma_addr, gfp_t flag)
140{
141 unsigned long dma_mask;
142 struct page *page;
143 dma_addr_t addr;
144
145 dma_mask = dma_alloc_coherent_mask(dev, flag);
146
147 flag |= __GFP_ZERO;
148again:
149 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
150 if (!page)
151 return NULL;
152
153 addr = page_to_phys(page);
154 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
155 __free_pages(page, get_order(size));
156
157 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
158 flag = (flag & ~GFP_DMA32) | GFP_DMA;
159 goto again;
160 }
161
162 return NULL;
163 }
164
165 *dma_addr = addr;
166 return page_address(page);
167}
168
Glauber Costafae9a0d2008-04-08 13:20:56 -0300169/*
170 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
171 * documentation.
172 */
173static __init int iommu_setup(char *p)
174{
175 iommu_merge = 1;
176
177 if (!p)
178 return -EINVAL;
179
180 while (*p) {
181 if (!strncmp(p, "off", 3))
182 no_iommu = 1;
183 /* gart_parse_options has more force support */
184 if (!strncmp(p, "force", 5))
185 force_iommu = 1;
186 if (!strncmp(p, "noforce", 7)) {
187 iommu_merge = 0;
188 force_iommu = 0;
189 }
190
191 if (!strncmp(p, "biomerge", 8)) {
192 iommu_bio_merge = 4096;
193 iommu_merge = 1;
194 force_iommu = 1;
195 }
196 if (!strncmp(p, "panic", 5))
197 panic_on_overflow = 1;
198 if (!strncmp(p, "nopanic", 7))
199 panic_on_overflow = 0;
200 if (!strncmp(p, "merge", 5)) {
201 iommu_merge = 1;
202 force_iommu = 1;
203 }
204 if (!strncmp(p, "nomerge", 7))
205 iommu_merge = 0;
206 if (!strncmp(p, "forcesac", 8))
207 iommu_sac_force = 1;
208 if (!strncmp(p, "allowdac", 8))
209 forbid_dac = 0;
210 if (!strncmp(p, "nodac", 5))
211 forbid_dac = -1;
212 if (!strncmp(p, "usedac", 6)) {
213 forbid_dac = -1;
214 return 1;
215 }
216#ifdef CONFIG_SWIOTLB
217 if (!strncmp(p, "soft", 4))
218 swiotlb = 1;
219#endif
220
Glauber Costafae9a0d2008-04-08 13:20:56 -0300221 gart_parse_options(p);
Glauber Costafae9a0d2008-04-08 13:20:56 -0300222
223#ifdef CONFIG_CALGARY_IOMMU
224 if (!strncmp(p, "calgary", 7))
225 use_calgary = 1;
226#endif /* CONFIG_CALGARY_IOMMU */
227
228 p += strcspn(p, ",");
229 if (*p == ',')
230 ++p;
231 }
232 return 0;
233}
234early_param("iommu", iommu_setup);
235
Glauber Costa8e0c3792008-04-08 13:20:55 -0300236int dma_supported(struct device *dev, u64 mask)
237{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700238 struct dma_mapping_ops *ops = get_dma_ops(dev);
239
Glauber Costa8e0c3792008-04-08 13:20:55 -0300240#ifdef CONFIG_PCI
241 if (mask > 0xffffffff && forbid_dac > 0) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200242 dev_info(dev, "PCI: Disallowing DAC for device\n");
Glauber Costa8e0c3792008-04-08 13:20:55 -0300243 return 0;
244 }
245#endif
246
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700247 if (ops->dma_supported)
248 return ops->dma_supported(dev, mask);
Glauber Costa8e0c3792008-04-08 13:20:55 -0300249
250 /* Copied from i386. Doesn't make much sense, because it will
251 only work for pci_alloc_coherent.
252 The caller just has to use GFP_DMA in this case. */
253 if (mask < DMA_24BIT_MASK)
254 return 0;
255
256 /* Tell the device to use SAC when IOMMU force is on. This
257 allows the driver to use cheaper accesses in some cases.
258
259 Problem with this is that if we overflow the IOMMU area and
260 return DAC as fallback address the device may not handle it
261 correctly.
262
263 As a special case some controllers have a 39bit address
264 mode that is as efficient as 32bit (aic79xx). Don't force
265 SAC for these. Assume all masks <= 40 bits are of this
266 type. Normally this doesn't make any difference, but gives
267 more gentle handling of IOMMU overflow. */
268 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200269 dev_info(dev, "Force SAC with mask %Lx\n", mask);
Glauber Costa8e0c3792008-04-08 13:20:55 -0300270 return 0;
271 }
272
273 return 1;
274}
275EXPORT_SYMBOL(dma_supported);
276
Glauber Costacb5867a2008-04-08 13:20:51 -0300277static int __init pci_iommu_init(void)
278{
Glauber Costacb5867a2008-04-08 13:20:51 -0300279 calgary_iommu_init();
Glauber Costa459121c92008-04-08 13:20:43 -0300280
Glauber Costacb5867a2008-04-08 13:20:51 -0300281 intel_iommu_init();
282
Joerg Roedela69ca342008-06-26 21:28:08 +0200283 amd_iommu_init();
284
Glauber Costacb5867a2008-04-08 13:20:51 -0300285 gart_iommu_init();
Glauber Costacb5867a2008-04-08 13:20:51 -0300286
287 no_iommu_init();
288 return 0;
289}
290
291void pci_iommu_shutdown(void)
292{
293 gart_iommu_shutdown();
294}
295/* Must execute after PCI subsystem */
296fs_initcall(pci_iommu_init);
Fenghua Yu3b15e582008-10-23 16:51:00 -0700297
298#ifdef CONFIG_PCI
299/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
300
301static __devinit void via_no_dac(struct pci_dev *dev)
302{
303 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
304 printk(KERN_INFO "PCI: VIA PCI bridge detected."
305 "Disabling DAC.\n");
306 forbid_dac = 1;
307 }
308}
309DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
310#endif