blob: 9a93954bed37d89d008b5a350582977613734dd8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support for AMD Hammer.
3 *
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 */
12
13#include <linux/config.h>
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
26#include <asm/atomic.h>
27#include <asm/io.h>
28#include <asm/mtrr.h>
29#include <asm/pgtable.h>
30#include <asm/proto.h>
31#include <asm/cacheflush.h>
32#include <asm/kdebug.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010033#include <asm/swiotlb.h>
34#include <asm/dma.h>
Andi Kleena32073b2006-06-26 13:56:40 +020035#include <asm/k8.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37unsigned long iommu_bus_base; /* GART remapping area (physical) */
38static unsigned long iommu_size; /* size of remapping area bytes */
39static unsigned long iommu_pages; /* .. and in pages */
40
41u32 *iommu_gatt_base; /* Remapping table */
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/* If this is disabled the IOMMU will use an optimized flushing strategy
44 of only flushing when an mapping is reused. With it true the GART is flushed
45 for every mapping. Problem is that doing the lazy flush seems to trigger
46 bugs with some popular PCI cards, in particular 3ware (but has been also
47 also seen with Qlogic at least). */
48int iommu_fullflush = 1;
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/* Allocation bitmap for the remapping area */
51static DEFINE_SPINLOCK(iommu_bitmap_lock);
52static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
53
54static u32 gart_unmapped_entry;
55
56#define GPTE_VALID 1
57#define GPTE_COHERENT 2
58#define GPTE_ENCODE(x) \
59 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
60#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
61
62#define to_pages(addr,size) \
63 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#define EMERGENCY_PAGES 32 /* = 128KB */
66
67#ifdef CONFIG_AGP
68#define AGPEXTERN extern
69#else
70#define AGPEXTERN
71#endif
72
73/* backdoor interface to AGP driver */
74AGPEXTERN int agp_memory_reserved;
75AGPEXTERN __u32 *agp_gatt_table;
76
77static unsigned long next_bit; /* protected by iommu_bitmap_lock */
78static int need_flush; /* global flush state. set for each gart wrap */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80static unsigned long alloc_iommu(int size)
81{
82 unsigned long offset, flags;
83
84 spin_lock_irqsave(&iommu_bitmap_lock, flags);
85 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
86 if (offset == -1) {
87 need_flush = 1;
Mike Waychisonf5adc9c2006-06-26 13:56:31 +020088 offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 }
90 if (offset != -1) {
91 set_bit_string(iommu_gart_bitmap, offset, size);
92 next_bit = offset+size;
93 if (next_bit >= iommu_pages) {
94 next_bit = 0;
95 need_flush = 1;
96 }
97 }
98 if (iommu_fullflush)
99 need_flush = 1;
100 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
101 return offset;
102}
103
104static void free_iommu(unsigned long offset, int size)
105{
106 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 spin_lock_irqsave(&iommu_bitmap_lock, flags);
108 __clear_bit_string(iommu_gart_bitmap, offset, size);
109 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
110}
111
112/*
113 * Use global flush state to avoid races with multiple flushers.
114 */
Andi Kleena32073b2006-06-26 13:56:40 +0200115static void flush_gart(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
117 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 spin_lock_irqsave(&iommu_bitmap_lock, flags);
Andi Kleena32073b2006-06-26 13:56:40 +0200119 if (need_flush) {
120 k8_flush_garts();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 need_flush = 0;
122 }
123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#ifdef CONFIG_IOMMU_LEAK
127
128#define SET_LEAK(x) if (iommu_leak_tab) \
129 iommu_leak_tab[x] = __builtin_return_address(0);
130#define CLEAR_LEAK(x) if (iommu_leak_tab) \
131 iommu_leak_tab[x] = NULL;
132
133/* Debugging aid for drivers that don't free their IOMMU tables */
134static void **iommu_leak_tab;
135static int leak_trace;
136int iommu_leak_pages = 20;
137void dump_leak(void)
138{
139 int i;
140 static int dump;
141 if (dump || !iommu_leak_tab) return;
142 dump = 1;
143 show_stack(NULL,NULL);
144 /* Very crude. dump some from the end of the table too */
145 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
146 for (i = 0; i < iommu_leak_pages; i+=2) {
147 printk("%lu: ", iommu_pages-i);
148 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
149 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
150 }
151 printk("\n");
152}
153#else
154#define SET_LEAK(x)
155#define CLEAR_LEAK(x)
156#endif
157
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100158static void iommu_full(struct device *dev, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
160 /*
161 * Ran out of IOMMU space for this operation. This is very bad.
162 * Unfortunately the drivers cannot handle this operation properly.
163 * Return some non mapped prereserved space in the aperture and
164 * let the Northbridge deal with it. This will result in garbage
165 * in the IO operation. When the size exceeds the prereserved space
166 * memory corruption will occur or random memory will be DMAed
167 * out. Hopefully no network devices use single mappings that big.
168 */
169
170 printk(KERN_ERR
171 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
172 size, dev->bus_id);
173
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100174 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
176 panic("PCI-DMA: Memory would be corrupted\n");
177 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100178 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 }
180
181#ifdef CONFIG_IOMMU_LEAK
182 dump_leak();
183#endif
184}
185
186static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
187{
188 u64 mask = *dev->dma_mask;
189 int high = addr + size >= mask;
190 int mmu = high;
191 if (force_iommu)
192 mmu = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return mmu;
194}
195
196static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
197{
198 u64 mask = *dev->dma_mask;
199 int high = addr + size >= mask;
200 int mmu = high;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 return mmu;
202}
203
204/* Map a single continuous physical area into the IOMMU.
205 * Caller needs to check if the iommu is needed and flush.
206 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100207static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
208 size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210 unsigned long npages = to_pages(phys_mem, size);
211 unsigned long iommu_page = alloc_iommu(npages);
212 int i;
213 if (iommu_page == -1) {
214 if (!nonforced_iommu(dev, phys_mem, size))
215 return phys_mem;
216 if (panic_on_overflow)
217 panic("dma_map_area overflow %lu bytes\n", size);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100218 iommu_full(dev, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 return bad_dma_address;
220 }
221
222 for (i = 0; i < npages; i++) {
223 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
224 SET_LEAK(iommu_page + i);
225 phys_mem += PAGE_SIZE;
226 }
227 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
228}
229
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100230static dma_addr_t gart_map_simple(struct device *dev, char *buf,
231 size_t size, int dir)
232{
233 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
Andi Kleena32073b2006-06-26 13:56:40 +0200234 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100235 return map;
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/* Map a single area into the IOMMU */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100239dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
241 unsigned long phys_mem, bus;
242
243 BUG_ON(dir == DMA_NONE);
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (!dev)
246 dev = &fallback_dev;
247
248 phys_mem = virt_to_phys(addr);
249 if (!need_iommu(dev, phys_mem, size))
250 return phys_mem;
251
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100252 bus = gart_map_simple(dev, addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 return bus;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100254}
255
256/*
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200257 * Free a DMA mapping.
258 */
259void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
260 size_t size, int direction)
261{
262 unsigned long iommu_page;
263 int npages;
264 int i;
265
266 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
267 dma_addr >= iommu_bus_base + iommu_size)
268 return;
269 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
270 npages = to_pages(dma_addr, size);
271 for (i = 0; i < npages; i++) {
272 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
273 CLEAR_LEAK(iommu_page + i);
274 }
275 free_iommu(iommu_page, npages);
276}
277
278/*
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100279 * Wrapper for pci_unmap_single working with scatterlists.
280 */
281void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
282{
283 int i;
284
285 for (i = 0; i < nents; i++) {
286 struct scatterlist *s = &sg[i];
Jon Mason60b08c62006-02-26 04:18:22 +0100287 if (!s->dma_length || !s->length)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100288 break;
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200289 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100290 }
291}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293/* Fallback for dma_map_sg in case of overflow */
294static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
295 int nents, int dir)
296{
297 int i;
298
299#ifdef CONFIG_IOMMU_DEBUG
300 printk(KERN_DEBUG "dma_map_sg overflow\n");
301#endif
302
303 for (i = 0; i < nents; i++ ) {
304 struct scatterlist *s = &sg[i];
305 unsigned long addr = page_to_phys(s->page) + s->offset;
306 if (nonforced_iommu(dev, addr, s->length)) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100307 addr = dma_map_area(dev, addr, s->length, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if (addr == bad_dma_address) {
309 if (i > 0)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100310 gart_unmap_sg(dev, sg, i, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 nents = 0;
312 sg[0].dma_length = 0;
313 break;
314 }
315 }
316 s->dma_address = addr;
317 s->dma_length = s->length;
318 }
Andi Kleena32073b2006-06-26 13:56:40 +0200319 flush_gart();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 return nents;
321}
322
323/* Map multiple scatterlist entries continuous into the first. */
324static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
325 struct scatterlist *sout, unsigned long pages)
326{
327 unsigned long iommu_start = alloc_iommu(pages);
328 unsigned long iommu_page = iommu_start;
329 int i;
330
331 if (iommu_start == -1)
332 return -1;
333
334 for (i = start; i < stopat; i++) {
335 struct scatterlist *s = &sg[i];
336 unsigned long pages, addr;
337 unsigned long phys_addr = s->dma_address;
338
339 BUG_ON(i > start && s->offset);
340 if (i == start) {
Jon Mason60b08c62006-02-26 04:18:22 +0100341 *sout = *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 sout->dma_address = iommu_bus_base;
343 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
344 sout->dma_length = s->length;
345 } else {
346 sout->dma_length += s->length;
347 }
348
349 addr = phys_addr;
350 pages = to_pages(s->offset, s->length);
351 while (pages--) {
352 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
353 SET_LEAK(iommu_page);
354 addr += PAGE_SIZE;
355 iommu_page++;
Andi Kleen0d5410642006-02-12 14:34:59 -0800356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358 BUG_ON(iommu_page - iommu_start != pages);
359 return 0;
360}
361
362static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
363 struct scatterlist *sout,
364 unsigned long pages, int need)
365{
366 if (!need) {
367 BUG_ON(stopat - start != 1);
Jon Mason60b08c62006-02-26 04:18:22 +0100368 *sout = sg[start];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 sout->dma_length = sg[start].length;
370 return 0;
371 }
372 return __dma_map_cont(sg, start, stopat, sout, pages);
373}
374
375/*
376 * DMA map all entries in a scatterlist.
377 * Merge chunks that have page aligned sizes into a continuous mapping.
378 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100379int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 int i;
382 int out;
383 int start;
384 unsigned long pages = 0;
385 int need = 0, nextneed;
386
387 BUG_ON(dir == DMA_NONE);
388 if (nents == 0)
389 return 0;
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 if (!dev)
392 dev = &fallback_dev;
393
394 out = 0;
395 start = 0;
396 for (i = 0; i < nents; i++) {
397 struct scatterlist *s = &sg[i];
398 dma_addr_t addr = page_to_phys(s->page) + s->offset;
399 s->dma_address = addr;
400 BUG_ON(s->length == 0);
401
402 nextneed = need_iommu(dev, addr, s->length);
403
404 /* Handle the previous not yet processed entries */
405 if (i > start) {
406 struct scatterlist *ps = &sg[i-1];
407 /* Can only merge when the last chunk ends on a page
408 boundary and the new one doesn't have an offset. */
409 if (!iommu_merge || !nextneed || !need || s->offset ||
410 (ps->offset + ps->length) % PAGE_SIZE) {
411 if (dma_map_cont(sg, start, i, sg+out, pages,
412 need) < 0)
413 goto error;
414 out++;
415 pages = 0;
416 start = i;
417 }
418 }
419
420 need = nextneed;
421 pages += to_pages(s->offset, s->length);
422 }
423 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
424 goto error;
425 out++;
Andi Kleena32073b2006-06-26 13:56:40 +0200426 flush_gart();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 if (out < nents)
428 sg[out].dma_length = 0;
429 return out;
430
431error:
Andi Kleena32073b2006-06-26 13:56:40 +0200432 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100433 gart_unmap_sg(dev, sg, nents, dir);
Kevin VanMarena1002a42006-02-03 21:51:32 +0100434 /* When it was forced or merged try again in a dumb way */
435 if (force_iommu || iommu_merge) {
436 out = dma_map_sg_nonforce(dev, sg, nents, dir);
437 if (out > 0)
438 return out;
439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 if (panic_on_overflow)
441 panic("dma_map_sg: overflow on %lu pages\n", pages);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100442 iommu_full(dev, pages << PAGE_SHIFT, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 for (i = 0; i < nents; i++)
444 sg[i].dma_address = bad_dma_address;
445 return 0;
446}
447
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100448static int no_agp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
451{
452 unsigned long a;
453 if (!iommu_size) {
454 iommu_size = aper_size;
455 if (!no_agp)
456 iommu_size /= 2;
457 }
458
459 a = aper + iommu_size;
460 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
461
462 if (iommu_size < 64*1024*1024)
463 printk(KERN_WARNING
464 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
465
466 return iommu_size;
467}
468
469static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
470{
471 unsigned aper_size = 0, aper_base_32;
472 u64 aper_base;
473 unsigned aper_order;
474
475 pci_read_config_dword(dev, 0x94, &aper_base_32);
476 pci_read_config_dword(dev, 0x90, &aper_order);
477 aper_order = (aper_order >> 1) & 7;
478
479 aper_base = aper_base_32 & 0x7fff;
480 aper_base <<= 25;
481
482 aper_size = (32 * 1024 * 1024) << aper_order;
483 if (aper_base + aper_size >= 0xffffffff || !aper_size)
484 aper_base = 0;
485
486 *size = aper_size;
487 return aper_base;
488}
489
490/*
491 * Private Northbridge GATT initialization in case we cannot use the
492 * AGP driver for some reason.
493 */
494static __init int init_k8_gatt(struct agp_kern_info *info)
495{
496 struct pci_dev *dev;
497 void *gatt;
498 unsigned aper_base, new_aper_base;
499 unsigned aper_size, gatt_size, new_aper_size;
Andi Kleena32073b2006-06-26 13:56:40 +0200500 int i;
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
503 aper_size = aper_base = info->aper_size = 0;
Andi Kleena32073b2006-06-26 13:56:40 +0200504 dev = NULL;
505 for (i = 0; i < num_k8_northbridges; i++) {
506 dev = k8_northbridges[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 new_aper_base = read_aperture(dev, &new_aper_size);
508 if (!new_aper_base)
509 goto nommu;
510
511 if (!aper_base) {
512 aper_size = new_aper_size;
513 aper_base = new_aper_base;
514 }
515 if (aper_size != new_aper_size || aper_base != new_aper_base)
516 goto nommu;
517 }
518 if (!aper_base)
519 goto nommu;
520 info->aper_base = aper_base;
521 info->aper_size = aper_size>>20;
522
523 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
524 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
525 if (!gatt)
526 panic("Cannot allocate GATT table");
527 memset(gatt, 0, gatt_size);
528 agp_gatt_table = gatt;
Andi Kleena32073b2006-06-26 13:56:40 +0200529
530 for (i = 0; i < num_k8_northbridges; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 u32 ctl;
532 u32 gatt_reg;
533
Andi Kleena32073b2006-06-26 13:56:40 +0200534 dev = k8_northbridges[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 gatt_reg = __pa(gatt) >> 12;
536 gatt_reg <<= 4;
537 pci_write_config_dword(dev, 0x98, gatt_reg);
538 pci_read_config_dword(dev, 0x90, &ctl);
539
540 ctl |= 1;
541 ctl &= ~((1<<4) | (1<<5));
542
543 pci_write_config_dword(dev, 0x90, ctl);
544 }
Andi Kleena32073b2006-06-26 13:56:40 +0200545 flush_gart();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
548 return 0;
549
550 nommu:
551 /* Should not happen anymore */
552 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
Andi Kleenf46ace62006-01-11 22:43:27 +0100553 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 return -1;
555}
556
557extern int agp_amd64_init(void);
558
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100559static struct dma_mapping_ops gart_dma_ops = {
560 .mapping_error = NULL,
561 .map_single = gart_map_single,
562 .map_simple = gart_map_simple,
563 .unmap_single = gart_unmap_single,
564 .sync_single_for_cpu = NULL,
565 .sync_single_for_device = NULL,
566 .sync_single_range_for_cpu = NULL,
567 .sync_single_range_for_device = NULL,
568 .sync_sg_for_cpu = NULL,
569 .sync_sg_for_device = NULL,
570 .map_sg = gart_map_sg,
571 .unmap_sg = gart_unmap_sg,
572};
573
Jon Mason0dc243a2006-06-26 13:58:11 +0200574void __init gart_iommu_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 struct agp_kern_info info;
577 unsigned long aper_size;
578 unsigned long iommu_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 unsigned long scratch;
580 long i;
581
Andi Kleena32073b2006-06-26 13:56:40 +0200582 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
583 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
Jon Mason0dc243a2006-06-26 13:58:11 +0200584 return;
Andi Kleena32073b2006-06-26 13:56:40 +0200585 }
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587#ifndef CONFIG_AGP_AMD64
588 no_agp = 1;
589#else
590 /* Makefile puts PCI initialization via subsys_initcall first. */
591 /* Add other K8 AGP bridge drivers here */
592 no_agp = no_agp ||
593 (agp_amd64_init() < 0) ||
594 (agp_copy_info(agp_bridge, &info) < 0);
595#endif
596
Jon Mason60b08c62006-02-26 04:18:22 +0100597 if (swiotlb)
Jon Mason0dc243a2006-06-26 13:58:11 +0200598 return;
Jon Mason60b08c62006-02-26 04:18:22 +0100599
Jon Mason8d4f6b92006-06-26 13:58:05 +0200600 /* Did we detect a different HW IOMMU? */
601 if (iommu_detected && !iommu_aperture)
Jon Mason0dc243a2006-06-26 13:58:11 +0200602 return;
Jon Mason8d4f6b92006-06-26 13:58:05 +0200603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (no_iommu ||
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100605 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 !iommu_aperture ||
607 (no_agp && init_k8_gatt(&info) < 0)) {
Jon Mason5b7b6442006-02-03 21:51:59 +0100608 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
609 if (end_pfn > MAX_DMA32_PFN) {
610 printk(KERN_ERR "WARNING more than 4GB of memory "
Andi Kleendc9a7192006-05-30 22:47:48 +0200611 "but IOMMU not available.\n"
612 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
Jon Mason5b7b6442006-02-03 21:51:59 +0100613 }
Jon Mason0dc243a2006-06-26 13:58:11 +0200614 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
616
Jon Mason5b7b6442006-02-03 21:51:59 +0100617 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 aper_size = info.aper_size * 1024 * 1024;
619 iommu_size = check_iommu_size(info.aper_base, aper_size);
620 iommu_pages = iommu_size >> PAGE_SHIFT;
621
622 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
623 get_order(iommu_pages/8));
624 if (!iommu_gart_bitmap)
625 panic("Cannot allocate iommu bitmap\n");
626 memset(iommu_gart_bitmap, 0, iommu_pages/8);
627
628#ifdef CONFIG_IOMMU_LEAK
629 if (leak_trace) {
630 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
631 get_order(iommu_pages*sizeof(void *)));
632 if (iommu_leak_tab)
633 memset(iommu_leak_tab, 0, iommu_pages * 8);
634 else
635 printk("PCI-DMA: Cannot allocate leak trace area\n");
636 }
637#endif
638
639 /*
640 * Out of IOMMU space handling.
641 * Reserve some invalid pages at the beginning of the GART.
642 */
643 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
644
645 agp_memory_reserved = iommu_size;
646 printk(KERN_INFO
647 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
648 iommu_size>>20);
649
650 iommu_start = aper_size - iommu_size;
651 iommu_bus_base = info.aper_base + iommu_start;
652 bad_dma_address = iommu_bus_base;
653 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
654
655 /*
656 * Unmap the IOMMU part of the GART. The alias of the page is
657 * always mapped with cache enabled and there is no full cache
658 * coherency across the GART remapping. The unmapping avoids
659 * automatic prefetches from the CPU allocating cache lines in
660 * there. All CPU accesses are done via the direct mapping to
661 * the backing memory. The GART address is only used by PCI
662 * devices.
663 */
664 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
665
666 /*
667 * Try to workaround a bug (thanks to BenH)
668 * Set unmapped entries to a scratch page instead of 0.
669 * Any prefetches that hit unmapped entries won't get an bus abort
670 * then.
671 */
672 scratch = get_zeroed_page(GFP_KERNEL);
673 if (!scratch)
674 panic("Cannot allocate iommu scratch page");
675 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
676 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
677 iommu_gatt_base[i] = gart_unmapped_entry;
678
Andi Kleena32073b2006-06-26 13:56:40 +0200679 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100680 dma_ops = &gart_dma_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
683/* Must execute after PCI subsystem */
Jon Mason0dc243a2006-06-26 13:58:11 +0200684fs_initcall(gart_iommu_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100686void gart_parse_options(char *p)
687{
688 int arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690#ifdef CONFIG_IOMMU_LEAK
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100691 if (!strncmp(p,"leak",4)) {
692 leak_trace = 1;
693 p += 4;
694 if (*p == '=') ++p;
695 if (isdigit(*p) && get_option(&p, &arg))
696 iommu_leak_pages = arg;
697 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100699 if (isdigit(*p) && get_option(&p, &arg))
700 iommu_size = arg;
701 if (!strncmp(p, "fullflush",8))
702 iommu_fullflush = 1;
703 if (!strncmp(p, "nofullflush",11))
704 iommu_fullflush = 0;
705 if (!strncmp(p,"noagp",5))
706 no_agp = 1;
707 if (!strncmp(p, "noaperture",10))
708 fix_aperture = 0;
709 /* duplicated from pci-dma.c */
710 if (!strncmp(p,"force",5))
711 iommu_aperture_allowed = 1;
712 if (!strncmp(p,"allowed",7))
713 iommu_aperture_allowed = 1;
714 if (!strncmp(p, "memaper", 7)) {
715 fallback_aper_force = 1;
716 p += 7;
717 if (*p == '=') {
718 ++p;
719 if (get_option(&p, &arg))
720 fallback_aper_order = arg;
721 }
722 }
723}