blob: 1972266e8ba59fb845bec4df4e59c5d04707ec5d [file] [log] [blame]
Glauber Costa459121c92008-04-08 13:20:43 -03001#include <linux/dma-mapping.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03002#include <linux/dmar.h>
Glauber Costa116890d2008-04-08 13:20:54 -03003#include <linux/bootmem.h>
Glauber Costabca5c092008-04-08 13:20:53 -03004#include <linux/pci.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03005
Glauber Costa116890d2008-04-08 13:20:54 -03006#include <asm/proto.h>
7#include <asm/dma.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +09008#include <asm/iommu.h>
Glauber Costacb5867a2008-04-08 13:20:51 -03009#include <asm/calgary.h>
Joerg Roedela69ca342008-06-26 21:28:08 +020010#include <asm/amd_iommu.h>
Glauber Costa459121c92008-04-08 13:20:43 -030011
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070012struct dma_mapping_ops *dma_ops;
Glauber Costa85c246e2008-04-08 13:20:50 -030013EXPORT_SYMBOL(dma_ops);
14
Dmitri Vorobievb4cdc432008-04-28 03:15:58 +040015static int iommu_sac_force __read_mostly;
Glauber Costa8e0c3792008-04-08 13:20:55 -030016
Glauber Costaf9c258d2008-04-08 13:20:52 -030017#ifdef CONFIG_IOMMU_DEBUG
18int panic_on_overflow __read_mostly = 1;
19int force_iommu __read_mostly = 1;
20#else
21int panic_on_overflow __read_mostly = 0;
22int force_iommu __read_mostly = 0;
23#endif
24
Glauber Costafae9a0d2008-04-08 13:20:56 -030025int iommu_merge __read_mostly = 0;
26
27int no_iommu __read_mostly;
28/* Set this to 1 if there is a HW IOMMU in the system */
29int iommu_detected __read_mostly = 0;
30
31/* This tells the BIO block layer to assume merging. Default to off
32 because we cannot guarantee merging later. */
33int iommu_bio_merge __read_mostly = 0;
34EXPORT_SYMBOL(iommu_bio_merge);
35
Glauber Costacac67872008-04-08 13:21:00 -030036dma_addr_t bad_dma_address __read_mostly = 0;
37EXPORT_SYMBOL(bad_dma_address);
Glauber Costafae9a0d2008-04-08 13:20:56 -030038
Glauber Costa098cb7f2008-04-09 13:18:10 -030039/* Dummy device used for NULL arguments (normally ISA). Better would
40 be probably a smaller DMA mask, but this is bug-to-bug compatible
41 to older i386. */
Joerg Roedel6c505ce2008-08-19 16:32:45 +020042struct device x86_dma_fallback_dev = {
Glauber Costa098cb7f2008-04-09 13:18:10 -030043 .bus_id = "fallback device",
44 .coherent_dma_mask = DMA_32BIT_MASK,
Joerg Roedel6c505ce2008-08-19 16:32:45 +020045 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
Glauber Costa098cb7f2008-04-09 13:18:10 -030046};
Joerg Roedel6c505ce2008-08-19 16:32:45 +020047EXPORT_SYMBOL(x86_dma_fallback_dev);
Glauber Costa098cb7f2008-04-09 13:18:10 -030048
Glauber Costa459121c92008-04-08 13:20:43 -030049int dma_set_mask(struct device *dev, u64 mask)
50{
51 if (!dev->dma_mask || !dma_supported(dev, mask))
52 return -EIO;
53
54 *dev->dma_mask = mask;
55
56 return 0;
57}
58EXPORT_SYMBOL(dma_set_mask);
59
Glauber Costa116890d2008-04-08 13:20:54 -030060#ifdef CONFIG_X86_64
61static __initdata void *dma32_bootmem_ptr;
62static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
63
64static int __init parse_dma32_size_opt(char *p)
65{
66 if (!p)
67 return -EINVAL;
68 dma32_bootmem_size = memparse(p, &p);
69 return 0;
70}
71early_param("dma32_size", parse_dma32_size_opt);
72
73void __init dma32_reserve_bootmem(void)
74{
75 unsigned long size, align;
Yinghai Luc987d122008-06-24 22:14:09 -070076 if (max_pfn <= MAX_DMA32_PFN)
Glauber Costa116890d2008-04-08 13:20:54 -030077 return;
78
Yinghai Lu7677b2e2008-04-14 20:40:37 -070079 /*
80 * check aperture_64.c allocate_aperture() for reason about
81 * using 512M as goal
82 */
Glauber Costa116890d2008-04-08 13:20:54 -030083 align = 64ULL<<20;
Joerg Roedel1ddb5512008-07-25 16:48:55 +020084 size = roundup(dma32_bootmem_size, align);
Glauber Costa116890d2008-04-08 13:20:54 -030085 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
Yinghai Lu7677b2e2008-04-14 20:40:37 -070086 512ULL<<20);
Glauber Costa116890d2008-04-08 13:20:54 -030087 if (dma32_bootmem_ptr)
88 dma32_bootmem_size = size;
89 else
90 dma32_bootmem_size = 0;
91}
92static void __init dma32_free_bootmem(void)
93{
Glauber Costa116890d2008-04-08 13:20:54 -030094
Yinghai Luc987d122008-06-24 22:14:09 -070095 if (max_pfn <= MAX_DMA32_PFN)
Glauber Costa116890d2008-04-08 13:20:54 -030096 return;
97
98 if (!dma32_bootmem_ptr)
99 return;
100
Yinghai Lu330fce22008-04-19 01:31:45 -0700101 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
Glauber Costa116890d2008-04-08 13:20:54 -0300102
103 dma32_bootmem_ptr = NULL;
104 dma32_bootmem_size = 0;
105}
106
107void __init pci_iommu_alloc(void)
108{
109 /* free the range so iommu could get some range less than 4G */
110 dma32_free_bootmem();
111 /*
112 * The order of these functions is important for
113 * fall-back/fail-over reasons
114 */
Glauber Costa116890d2008-04-08 13:20:54 -0300115 gart_iommu_hole_init();
Glauber Costa116890d2008-04-08 13:20:54 -0300116
Glauber Costa116890d2008-04-08 13:20:54 -0300117 detect_calgary();
Glauber Costa116890d2008-04-08 13:20:54 -0300118
119 detect_intel_iommu();
120
Joerg Roedela69ca342008-06-26 21:28:08 +0200121 amd_iommu_detect();
122
Glauber Costa116890d2008-04-08 13:20:54 -0300123 pci_swiotlb_init();
Glauber Costa116890d2008-04-08 13:20:54 -0300124}
FUJITA Tomonori8978b742008-07-29 13:38:53 +0900125
Joerg Roedelbdab0ba2008-10-15 22:02:07 -0700126unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
FUJITA Tomonori8978b742008-07-29 13:38:53 +0900127{
128 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
129
130 return size >> PAGE_SHIFT;
131}
Joerg Roedelbdab0ba2008-10-15 22:02:07 -0700132EXPORT_SYMBOL(iommu_nr_pages);
Glauber Costa116890d2008-04-08 13:20:54 -0300133#endif
134
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +0900135void *dma_generic_alloc_coherent(struct device *dev, size_t size,
136 dma_addr_t *dma_addr, gfp_t flag)
137{
138 unsigned long dma_mask;
139 struct page *page;
140 dma_addr_t addr;
141
142 dma_mask = dma_alloc_coherent_mask(dev, flag);
143
144 flag |= __GFP_ZERO;
145again:
146 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
147 if (!page)
148 return NULL;
149
150 addr = page_to_phys(page);
151 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
152 __free_pages(page, get_order(size));
153
154 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
155 flag = (flag & ~GFP_DMA32) | GFP_DMA;
156 goto again;
157 }
158
159 return NULL;
160 }
161
162 *dma_addr = addr;
163 return page_address(page);
164}
165
Glauber Costafae9a0d2008-04-08 13:20:56 -0300166/*
167 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
168 * documentation.
169 */
170static __init int iommu_setup(char *p)
171{
172 iommu_merge = 1;
173
174 if (!p)
175 return -EINVAL;
176
177 while (*p) {
178 if (!strncmp(p, "off", 3))
179 no_iommu = 1;
180 /* gart_parse_options has more force support */
181 if (!strncmp(p, "force", 5))
182 force_iommu = 1;
183 if (!strncmp(p, "noforce", 7)) {
184 iommu_merge = 0;
185 force_iommu = 0;
186 }
187
188 if (!strncmp(p, "biomerge", 8)) {
189 iommu_bio_merge = 4096;
190 iommu_merge = 1;
191 force_iommu = 1;
192 }
193 if (!strncmp(p, "panic", 5))
194 panic_on_overflow = 1;
195 if (!strncmp(p, "nopanic", 7))
196 panic_on_overflow = 0;
197 if (!strncmp(p, "merge", 5)) {
198 iommu_merge = 1;
199 force_iommu = 1;
200 }
201 if (!strncmp(p, "nomerge", 7))
202 iommu_merge = 0;
203 if (!strncmp(p, "forcesac", 8))
204 iommu_sac_force = 1;
205 if (!strncmp(p, "allowdac", 8))
206 forbid_dac = 0;
207 if (!strncmp(p, "nodac", 5))
208 forbid_dac = -1;
209 if (!strncmp(p, "usedac", 6)) {
210 forbid_dac = -1;
211 return 1;
212 }
213#ifdef CONFIG_SWIOTLB
214 if (!strncmp(p, "soft", 4))
215 swiotlb = 1;
216#endif
217
Glauber Costafae9a0d2008-04-08 13:20:56 -0300218 gart_parse_options(p);
Glauber Costafae9a0d2008-04-08 13:20:56 -0300219
220#ifdef CONFIG_CALGARY_IOMMU
221 if (!strncmp(p, "calgary", 7))
222 use_calgary = 1;
223#endif /* CONFIG_CALGARY_IOMMU */
224
225 p += strcspn(p, ",");
226 if (*p == ',')
227 ++p;
228 }
229 return 0;
230}
231early_param("iommu", iommu_setup);
232
Glauber Costa8e0c3792008-04-08 13:20:55 -0300233int dma_supported(struct device *dev, u64 mask)
234{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700235 struct dma_mapping_ops *ops = get_dma_ops(dev);
236
Glauber Costa8e0c3792008-04-08 13:20:55 -0300237#ifdef CONFIG_PCI
238 if (mask > 0xffffffff && forbid_dac > 0) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200239 dev_info(dev, "PCI: Disallowing DAC for device\n");
Glauber Costa8e0c3792008-04-08 13:20:55 -0300240 return 0;
241 }
242#endif
243
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700244 if (ops->dma_supported)
245 return ops->dma_supported(dev, mask);
Glauber Costa8e0c3792008-04-08 13:20:55 -0300246
247 /* Copied from i386. Doesn't make much sense, because it will
248 only work for pci_alloc_coherent.
249 The caller just has to use GFP_DMA in this case. */
250 if (mask < DMA_24BIT_MASK)
251 return 0;
252
253 /* Tell the device to use SAC when IOMMU force is on. This
254 allows the driver to use cheaper accesses in some cases.
255
256 Problem with this is that if we overflow the IOMMU area and
257 return DAC as fallback address the device may not handle it
258 correctly.
259
260 As a special case some controllers have a 39bit address
261 mode that is as efficient as 32bit (aic79xx). Don't force
262 SAC for these. Assume all masks <= 40 bits are of this
263 type. Normally this doesn't make any difference, but gives
264 more gentle handling of IOMMU overflow. */
265 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200266 dev_info(dev, "Force SAC with mask %Lx\n", mask);
Glauber Costa8e0c3792008-04-08 13:20:55 -0300267 return 0;
268 }
269
270 return 1;
271}
272EXPORT_SYMBOL(dma_supported);
273
Glauber Costacb5867a2008-04-08 13:20:51 -0300274static int __init pci_iommu_init(void)
275{
Glauber Costacb5867a2008-04-08 13:20:51 -0300276 calgary_iommu_init();
Glauber Costa459121c92008-04-08 13:20:43 -0300277
Glauber Costacb5867a2008-04-08 13:20:51 -0300278 intel_iommu_init();
279
Joerg Roedela69ca342008-06-26 21:28:08 +0200280 amd_iommu_init();
281
Glauber Costacb5867a2008-04-08 13:20:51 -0300282 gart_iommu_init();
Glauber Costacb5867a2008-04-08 13:20:51 -0300283
284 no_iommu_init();
285 return 0;
286}
287
288void pci_iommu_shutdown(void)
289{
290 gart_iommu_shutdown();
291}
292/* Must execute after PCI subsystem */
293fs_initcall(pci_iommu_init);