blob: 0c3f052ba6cebb8af7a6c79ca961611288be6727 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support for AMD Hammer.
3 *
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 */
12
13#include <linux/config.h>
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
26#include <asm/atomic.h>
27#include <asm/io.h>
28#include <asm/mtrr.h>
29#include <asm/pgtable.h>
30#include <asm/proto.h>
31#include <asm/cacheflush.h>
32#include <asm/kdebug.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010033#include <asm/swiotlb.h>
34#include <asm/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36unsigned long iommu_bus_base; /* GART remapping area (physical) */
37static unsigned long iommu_size; /* size of remapping area bytes */
38static unsigned long iommu_pages; /* .. and in pages */
39
40u32 *iommu_gatt_base; /* Remapping table */
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* If this is disabled the IOMMU will use an optimized flushing strategy
43 of only flushing when an mapping is reused. With it true the GART is flushed
44 for every mapping. Problem is that doing the lazy flush seems to trigger
45 bugs with some popular PCI cards, in particular 3ware (but has been also
46 also seen with Qlogic at least). */
47int iommu_fullflush = 1;
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#define MAX_NB 8
50
51/* Allocation bitmap for the remapping area */
52static DEFINE_SPINLOCK(iommu_bitmap_lock);
53static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
54
55static u32 gart_unmapped_entry;
56
57#define GPTE_VALID 1
58#define GPTE_COHERENT 2
59#define GPTE_ENCODE(x) \
60 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
61#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
62
63#define to_pages(addr,size) \
64 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
65
66#define for_all_nb(dev) \
67 dev = NULL; \
68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
69 if (dev->bus->number == 0 && \
70 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
71
72static struct pci_dev *northbridges[MAX_NB];
73static u32 northbridge_flush_word[MAX_NB];
74
75#define EMERGENCY_PAGES 32 /* = 128KB */
76
77#ifdef CONFIG_AGP
78#define AGPEXTERN extern
79#else
80#define AGPEXTERN
81#endif
82
83/* backdoor interface to AGP driver */
84AGPEXTERN int agp_memory_reserved;
85AGPEXTERN __u32 *agp_gatt_table;
86
87static unsigned long next_bit; /* protected by iommu_bitmap_lock */
88static int need_flush; /* global flush state. set for each gart wrap */
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90static unsigned long alloc_iommu(int size)
91{
92 unsigned long offset, flags;
93
94 spin_lock_irqsave(&iommu_bitmap_lock, flags);
95 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
96 if (offset == -1) {
97 need_flush = 1;
98 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
99 }
100 if (offset != -1) {
101 set_bit_string(iommu_gart_bitmap, offset, size);
102 next_bit = offset+size;
103 if (next_bit >= iommu_pages) {
104 next_bit = 0;
105 need_flush = 1;
106 }
107 }
108 if (iommu_fullflush)
109 need_flush = 1;
110 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
111 return offset;
112}
113
114static void free_iommu(unsigned long offset, int size)
115{
116 unsigned long flags;
117 if (size == 1) {
118 clear_bit(offset, iommu_gart_bitmap);
119 return;
120 }
121 spin_lock_irqsave(&iommu_bitmap_lock, flags);
122 __clear_bit_string(iommu_gart_bitmap, offset, size);
123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
124}
125
126/*
127 * Use global flush state to avoid races with multiple flushers.
128 */
129static void flush_gart(struct device *dev)
130{
131 unsigned long flags;
132 int flushed = 0;
133 int i, max;
134
135 spin_lock_irqsave(&iommu_bitmap_lock, flags);
136 if (need_flush) {
137 max = 0;
138 for (i = 0; i < MAX_NB; i++) {
139 if (!northbridges[i])
140 continue;
141 pci_write_config_dword(northbridges[i], 0x9c,
142 northbridge_flush_word[i] | 1);
143 flushed++;
144 max = i;
145 }
146 for (i = 0; i <= max; i++) {
147 u32 w;
148 if (!northbridges[i])
149 continue;
150 /* Make sure the hardware actually executed the flush. */
151 do {
152 pci_read_config_dword(northbridges[i], 0x9c, &w);
153 } while (w & 1);
154 }
155 if (!flushed)
156 printk("nothing to flush?\n");
157 need_flush = 0;
158 }
159 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
160}
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164#ifdef CONFIG_IOMMU_LEAK
165
166#define SET_LEAK(x) if (iommu_leak_tab) \
167 iommu_leak_tab[x] = __builtin_return_address(0);
168#define CLEAR_LEAK(x) if (iommu_leak_tab) \
169 iommu_leak_tab[x] = NULL;
170
171/* Debugging aid for drivers that don't free their IOMMU tables */
172static void **iommu_leak_tab;
173static int leak_trace;
174int iommu_leak_pages = 20;
175void dump_leak(void)
176{
177 int i;
178 static int dump;
179 if (dump || !iommu_leak_tab) return;
180 dump = 1;
181 show_stack(NULL,NULL);
182 /* Very crude. dump some from the end of the table too */
183 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
184 for (i = 0; i < iommu_leak_pages; i+=2) {
185 printk("%lu: ", iommu_pages-i);
186 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
187 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
188 }
189 printk("\n");
190}
191#else
192#define SET_LEAK(x)
193#define CLEAR_LEAK(x)
194#endif
195
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100196static void iommu_full(struct device *dev, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
198 /*
199 * Ran out of IOMMU space for this operation. This is very bad.
200 * Unfortunately the drivers cannot handle this operation properly.
201 * Return some non mapped prereserved space in the aperture and
202 * let the Northbridge deal with it. This will result in garbage
203 * in the IO operation. When the size exceeds the prereserved space
204 * memory corruption will occur or random memory will be DMAed
205 * out. Hopefully no network devices use single mappings that big.
206 */
207
208 printk(KERN_ERR
209 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
210 size, dev->bus_id);
211
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100212 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
214 panic("PCI-DMA: Memory would be corrupted\n");
215 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100216 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218
219#ifdef CONFIG_IOMMU_LEAK
220 dump_leak();
221#endif
222}
223
224static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
225{
226 u64 mask = *dev->dma_mask;
227 int high = addr + size >= mask;
228 int mmu = high;
229 if (force_iommu)
230 mmu = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 return mmu;
232}
233
234static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
235{
236 u64 mask = *dev->dma_mask;
237 int high = addr + size >= mask;
238 int mmu = high;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return mmu;
240}
241
242/* Map a single continuous physical area into the IOMMU.
243 * Caller needs to check if the iommu is needed and flush.
244 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100245static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
246 size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
248 unsigned long npages = to_pages(phys_mem, size);
249 unsigned long iommu_page = alloc_iommu(npages);
250 int i;
251 if (iommu_page == -1) {
252 if (!nonforced_iommu(dev, phys_mem, size))
253 return phys_mem;
254 if (panic_on_overflow)
255 panic("dma_map_area overflow %lu bytes\n", size);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100256 iommu_full(dev, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 return bad_dma_address;
258 }
259
260 for (i = 0; i < npages; i++) {
261 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
262 SET_LEAK(iommu_page + i);
263 phys_mem += PAGE_SIZE;
264 }
265 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
266}
267
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100268static dma_addr_t gart_map_simple(struct device *dev, char *buf,
269 size_t size, int dir)
270{
271 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
272 flush_gart(dev);
273 return map;
274}
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276/* Map a single area into the IOMMU */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100277dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
279 unsigned long phys_mem, bus;
280
281 BUG_ON(dir == DMA_NONE);
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 if (!dev)
284 dev = &fallback_dev;
285
286 phys_mem = virt_to_phys(addr);
287 if (!need_iommu(dev, phys_mem, size))
288 return phys_mem;
289
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100290 bus = gart_map_simple(dev, addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 return bus;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100292}
293
294/*
295 * Wrapper for pci_unmap_single working with scatterlists.
296 */
297void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
298{
299 int i;
300
301 for (i = 0; i < nents; i++) {
302 struct scatterlist *s = &sg[i];
Jon Mason60b08c62006-02-26 04:18:22 +0100303 if (!s->dma_length || !s->length)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100304 break;
305 dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
306 }
307}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309/* Fallback for dma_map_sg in case of overflow */
310static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
311 int nents, int dir)
312{
313 int i;
314
315#ifdef CONFIG_IOMMU_DEBUG
316 printk(KERN_DEBUG "dma_map_sg overflow\n");
317#endif
318
319 for (i = 0; i < nents; i++ ) {
320 struct scatterlist *s = &sg[i];
321 unsigned long addr = page_to_phys(s->page) + s->offset;
322 if (nonforced_iommu(dev, addr, s->length)) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100323 addr = dma_map_area(dev, addr, s->length, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 if (addr == bad_dma_address) {
325 if (i > 0)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100326 gart_unmap_sg(dev, sg, i, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 nents = 0;
328 sg[0].dma_length = 0;
329 break;
330 }
331 }
332 s->dma_address = addr;
333 s->dma_length = s->length;
334 }
335 flush_gart(dev);
336 return nents;
337}
338
339/* Map multiple scatterlist entries continuous into the first. */
340static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
341 struct scatterlist *sout, unsigned long pages)
342{
343 unsigned long iommu_start = alloc_iommu(pages);
344 unsigned long iommu_page = iommu_start;
345 int i;
346
347 if (iommu_start == -1)
348 return -1;
349
350 for (i = start; i < stopat; i++) {
351 struct scatterlist *s = &sg[i];
352 unsigned long pages, addr;
353 unsigned long phys_addr = s->dma_address;
354
355 BUG_ON(i > start && s->offset);
356 if (i == start) {
Jon Mason60b08c62006-02-26 04:18:22 +0100357 *sout = *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 sout->dma_address = iommu_bus_base;
359 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
360 sout->dma_length = s->length;
361 } else {
362 sout->dma_length += s->length;
363 }
364
365 addr = phys_addr;
366 pages = to_pages(s->offset, s->length);
367 while (pages--) {
368 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
369 SET_LEAK(iommu_page);
370 addr += PAGE_SIZE;
371 iommu_page++;
Andi Kleen0d5410642006-02-12 14:34:59 -0800372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 }
374 BUG_ON(iommu_page - iommu_start != pages);
375 return 0;
376}
377
378static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
379 struct scatterlist *sout,
380 unsigned long pages, int need)
381{
382 if (!need) {
383 BUG_ON(stopat - start != 1);
Jon Mason60b08c62006-02-26 04:18:22 +0100384 *sout = sg[start];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 sout->dma_length = sg[start].length;
386 return 0;
387 }
388 return __dma_map_cont(sg, start, stopat, sout, pages);
389}
390
391/*
392 * DMA map all entries in a scatterlist.
393 * Merge chunks that have page aligned sizes into a continuous mapping.
394 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100395int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
397 int i;
398 int out;
399 int start;
400 unsigned long pages = 0;
401 int need = 0, nextneed;
402
403 BUG_ON(dir == DMA_NONE);
404 if (nents == 0)
405 return 0;
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 if (!dev)
408 dev = &fallback_dev;
409
410 out = 0;
411 start = 0;
412 for (i = 0; i < nents; i++) {
413 struct scatterlist *s = &sg[i];
414 dma_addr_t addr = page_to_phys(s->page) + s->offset;
415 s->dma_address = addr;
416 BUG_ON(s->length == 0);
417
418 nextneed = need_iommu(dev, addr, s->length);
419
420 /* Handle the previous not yet processed entries */
421 if (i > start) {
422 struct scatterlist *ps = &sg[i-1];
423 /* Can only merge when the last chunk ends on a page
424 boundary and the new one doesn't have an offset. */
425 if (!iommu_merge || !nextneed || !need || s->offset ||
426 (ps->offset + ps->length) % PAGE_SIZE) {
427 if (dma_map_cont(sg, start, i, sg+out, pages,
428 need) < 0)
429 goto error;
430 out++;
431 pages = 0;
432 start = i;
433 }
434 }
435
436 need = nextneed;
437 pages += to_pages(s->offset, s->length);
438 }
439 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
440 goto error;
441 out++;
442 flush_gart(dev);
443 if (out < nents)
444 sg[out].dma_length = 0;
445 return out;
446
447error:
448 flush_gart(NULL);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100449 gart_unmap_sg(dev, sg, nents, dir);
Kevin VanMarena1002a42006-02-03 21:51:32 +0100450 /* When it was forced or merged try again in a dumb way */
451 if (force_iommu || iommu_merge) {
452 out = dma_map_sg_nonforce(dev, sg, nents, dir);
453 if (out > 0)
454 return out;
455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 if (panic_on_overflow)
457 panic("dma_map_sg: overflow on %lu pages\n", pages);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100458 iommu_full(dev, pages << PAGE_SHIFT, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 for (i = 0; i < nents; i++)
460 sg[i].dma_address = bad_dma_address;
461 return 0;
462}
463
464/*
465 * Free a DMA mapping.
466 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100467void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 size_t size, int direction)
469{
470 unsigned long iommu_page;
471 int npages;
472 int i;
473
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
475 dma_addr >= iommu_bus_base + iommu_size)
476 return;
477 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
478 npages = to_pages(dma_addr, size);
479 for (i = 0; i < npages; i++) {
480 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
481 CLEAR_LEAK(iommu_page + i);
482 }
483 free_iommu(iommu_page, npages);
484}
485
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100486static int no_agp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
489{
490 unsigned long a;
491 if (!iommu_size) {
492 iommu_size = aper_size;
493 if (!no_agp)
494 iommu_size /= 2;
495 }
496
497 a = aper + iommu_size;
498 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
499
500 if (iommu_size < 64*1024*1024)
501 printk(KERN_WARNING
502 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
503
504 return iommu_size;
505}
506
507static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
508{
509 unsigned aper_size = 0, aper_base_32;
510 u64 aper_base;
511 unsigned aper_order;
512
513 pci_read_config_dword(dev, 0x94, &aper_base_32);
514 pci_read_config_dword(dev, 0x90, &aper_order);
515 aper_order = (aper_order >> 1) & 7;
516
517 aper_base = aper_base_32 & 0x7fff;
518 aper_base <<= 25;
519
520 aper_size = (32 * 1024 * 1024) << aper_order;
521 if (aper_base + aper_size >= 0xffffffff || !aper_size)
522 aper_base = 0;
523
524 *size = aper_size;
525 return aper_base;
526}
527
528/*
529 * Private Northbridge GATT initialization in case we cannot use the
530 * AGP driver for some reason.
531 */
532static __init int init_k8_gatt(struct agp_kern_info *info)
533{
534 struct pci_dev *dev;
535 void *gatt;
536 unsigned aper_base, new_aper_base;
537 unsigned aper_size, gatt_size, new_aper_size;
538
539 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
540 aper_size = aper_base = info->aper_size = 0;
541 for_all_nb(dev) {
542 new_aper_base = read_aperture(dev, &new_aper_size);
543 if (!new_aper_base)
544 goto nommu;
545
546 if (!aper_base) {
547 aper_size = new_aper_size;
548 aper_base = new_aper_base;
549 }
550 if (aper_size != new_aper_size || aper_base != new_aper_base)
551 goto nommu;
552 }
553 if (!aper_base)
554 goto nommu;
555 info->aper_base = aper_base;
556 info->aper_size = aper_size>>20;
557
558 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
559 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
560 if (!gatt)
561 panic("Cannot allocate GATT table");
562 memset(gatt, 0, gatt_size);
563 agp_gatt_table = gatt;
564
565 for_all_nb(dev) {
566 u32 ctl;
567 u32 gatt_reg;
568
569 gatt_reg = __pa(gatt) >> 12;
570 gatt_reg <<= 4;
571 pci_write_config_dword(dev, 0x98, gatt_reg);
572 pci_read_config_dword(dev, 0x90, &ctl);
573
574 ctl |= 1;
575 ctl &= ~((1<<4) | (1<<5));
576
577 pci_write_config_dword(dev, 0x90, ctl);
578 }
579 flush_gart(NULL);
580
581 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
582 return 0;
583
584 nommu:
585 /* Should not happen anymore */
586 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
Andi Kleenf46ace62006-01-11 22:43:27 +0100587 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 return -1;
589}
590
591extern int agp_amd64_init(void);
592
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100593static struct dma_mapping_ops gart_dma_ops = {
594 .mapping_error = NULL,
595 .map_single = gart_map_single,
596 .map_simple = gart_map_simple,
597 .unmap_single = gart_unmap_single,
598 .sync_single_for_cpu = NULL,
599 .sync_single_for_device = NULL,
600 .sync_single_range_for_cpu = NULL,
601 .sync_single_range_for_device = NULL,
602 .sync_sg_for_cpu = NULL,
603 .sync_sg_for_device = NULL,
604 .map_sg = gart_map_sg,
605 .unmap_sg = gart_unmap_sg,
606};
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608static int __init pci_iommu_init(void)
609{
610 struct agp_kern_info info;
611 unsigned long aper_size;
612 unsigned long iommu_start;
613 struct pci_dev *dev;
614 unsigned long scratch;
615 long i;
616
617#ifndef CONFIG_AGP_AMD64
618 no_agp = 1;
619#else
620 /* Makefile puts PCI initialization via subsys_initcall first. */
621 /* Add other K8 AGP bridge drivers here */
622 no_agp = no_agp ||
623 (agp_amd64_init() < 0) ||
624 (agp_copy_info(agp_bridge, &info) < 0);
625#endif
626
Jon Mason60b08c62006-02-26 04:18:22 +0100627 if (swiotlb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return -1;
Jon Mason60b08c62006-02-26 04:18:22 +0100629
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (no_iommu ||
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100631 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 !iommu_aperture ||
633 (no_agp && init_k8_gatt(&info) < 0)) {
Jon Mason5b7b6442006-02-03 21:51:59 +0100634 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
635 if (end_pfn > MAX_DMA32_PFN) {
636 printk(KERN_ERR "WARNING more than 4GB of memory "
637 "but IOMMU not compiled in.\n"
638 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
639 KERN_ERR "You might want to enable "
640 "CONFIG_GART_IOMMU\n");
641 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 return -1;
643 }
644
Jon Mason5b7b6442006-02-03 21:51:59 +0100645 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 aper_size = info.aper_size * 1024 * 1024;
647 iommu_size = check_iommu_size(info.aper_base, aper_size);
648 iommu_pages = iommu_size >> PAGE_SHIFT;
649
650 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
651 get_order(iommu_pages/8));
652 if (!iommu_gart_bitmap)
653 panic("Cannot allocate iommu bitmap\n");
654 memset(iommu_gart_bitmap, 0, iommu_pages/8);
655
656#ifdef CONFIG_IOMMU_LEAK
657 if (leak_trace) {
658 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
659 get_order(iommu_pages*sizeof(void *)));
660 if (iommu_leak_tab)
661 memset(iommu_leak_tab, 0, iommu_pages * 8);
662 else
663 printk("PCI-DMA: Cannot allocate leak trace area\n");
664 }
665#endif
666
667 /*
668 * Out of IOMMU space handling.
669 * Reserve some invalid pages at the beginning of the GART.
670 */
671 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
672
673 agp_memory_reserved = iommu_size;
674 printk(KERN_INFO
675 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
676 iommu_size>>20);
677
678 iommu_start = aper_size - iommu_size;
679 iommu_bus_base = info.aper_base + iommu_start;
680 bad_dma_address = iommu_bus_base;
681 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
682
683 /*
684 * Unmap the IOMMU part of the GART. The alias of the page is
685 * always mapped with cache enabled and there is no full cache
686 * coherency across the GART remapping. The unmapping avoids
687 * automatic prefetches from the CPU allocating cache lines in
688 * there. All CPU accesses are done via the direct mapping to
689 * the backing memory. The GART address is only used by PCI
690 * devices.
691 */
692 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
693
694 /*
695 * Try to workaround a bug (thanks to BenH)
696 * Set unmapped entries to a scratch page instead of 0.
697 * Any prefetches that hit unmapped entries won't get an bus abort
698 * then.
699 */
700 scratch = get_zeroed_page(GFP_KERNEL);
701 if (!scratch)
702 panic("Cannot allocate iommu scratch page");
703 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
704 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
705 iommu_gatt_base[i] = gart_unmapped_entry;
706
707 for_all_nb(dev) {
708 u32 flag;
709 int cpu = PCI_SLOT(dev->devfn) - 24;
710 if (cpu >= MAX_NB)
711 continue;
712 northbridges[cpu] = dev;
713 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
714 northbridge_flush_word[cpu] = flag;
715 }
716
717 flush_gart(NULL);
718
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100719 dma_ops = &gart_dma_ops;
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 return 0;
722}
723
724/* Must execute after PCI subsystem */
725fs_initcall(pci_iommu_init);
726
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100727void gart_parse_options(char *p)
728{
729 int arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731#ifdef CONFIG_IOMMU_LEAK
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100732 if (!strncmp(p,"leak",4)) {
733 leak_trace = 1;
734 p += 4;
735 if (*p == '=') ++p;
736 if (isdigit(*p) && get_option(&p, &arg))
737 iommu_leak_pages = arg;
738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100740 if (isdigit(*p) && get_option(&p, &arg))
741 iommu_size = arg;
742 if (!strncmp(p, "fullflush",8))
743 iommu_fullflush = 1;
744 if (!strncmp(p, "nofullflush",11))
745 iommu_fullflush = 0;
746 if (!strncmp(p,"noagp",5))
747 no_agp = 1;
748 if (!strncmp(p, "noaperture",10))
749 fix_aperture = 0;
750 /* duplicated from pci-dma.c */
751 if (!strncmp(p,"force",5))
752 iommu_aperture_allowed = 1;
753 if (!strncmp(p,"allowed",7))
754 iommu_aperture_allowed = 1;
755 if (!strncmp(p, "memaper", 7)) {
756 fallback_aper_force = 1;
757 p += 7;
758 if (*p == '=') {
759 ++p;
760 if (get_option(&p, &arg))
761 fallback_aper_order = arg;
762 }
763 }
764}