blob: 076e64b2d4f3e007751582986ac9411a9acdcda7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support for AMD Hammer.
Ingo Molnar05fccb02008-01-30 13:30:12 +01003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
Ingo Molnar05fccb02008-01-30 13:30:12 +01006 * with more than 4GB.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
Ingo Molnar05fccb02008-01-30 13:30:12 +01009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Copyright 2002 Andi Kleen, SuSE Labs.
Andi Kleenff7f3642007-10-17 18:04:37 +020011 * Subject to the GNU General Public License v2 only.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070026#include <linux/kdebug.h>
Jens Axboe9ee1bea2007-10-04 09:35:37 +020027#include <linux/scatterlist.h>
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080028#include <linux/iommu-helper.h>
Pavel Machekcd763742008-05-29 00:30:21 -070029#include <linux/sysdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/mtrr.h>
33#include <asm/pgtable.h>
34#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090035#include <asm/iommu.h>
Joerg Roedel395624f2007-10-24 12:49:47 +020036#include <asm/gart.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/cacheflush.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010038#include <asm/swiotlb.h>
39#include <asm/dma.h>
Andi Kleena32073b2006-06-26 13:56:40 +020040#include <asm/k8.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Joerg Roedel79da0872007-10-24 12:49:49 +020042static unsigned long iommu_bus_base; /* GART remapping area (physical) */
Ingo Molnar05fccb02008-01-30 13:30:12 +010043static unsigned long iommu_size; /* size of remapping area bytes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static unsigned long iommu_pages; /* .. and in pages */
45
Ingo Molnar05fccb02008-01-30 13:30:12 +010046static u32 *iommu_gatt_base; /* Remapping table */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Ingo Molnar05fccb02008-01-30 13:30:12 +010048/*
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
54 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int iommu_fullflush = 1;
56
Ingo Molnar05fccb02008-01-30 13:30:12 +010057/* Allocation bitmap for the remapping area: */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static DEFINE_SPINLOCK(iommu_bitmap_lock);
Ingo Molnar05fccb02008-01-30 13:30:12 +010059/* Guarded by iommu_bitmap_lock: */
60static unsigned long *iommu_gart_bitmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Ingo Molnar05fccb02008-01-30 13:30:12 +010062static u32 gart_unmapped_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#define GPTE_VALID 1
65#define GPTE_COHERENT 2
66#define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69
Ingo Molnar05fccb02008-01-30 13:30:12 +010070#define EMERGENCY_PAGES 32 /* = 128KB */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#ifdef CONFIG_AGP
73#define AGPEXTERN extern
74#else
75#define AGPEXTERN
76#endif
77
78/* backdoor interface to AGP driver */
79AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table;
81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
Ingo Molnar05fccb02008-01-30 13:30:12 +010083static int need_flush; /* global flush state. set for each gart wrap */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080085static unsigned long alloc_iommu(struct device *dev, int size)
Ingo Molnar05fccb02008-01-30 13:30:12 +010086{
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 unsigned long offset, flags;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080088 unsigned long boundary_size;
89 unsigned long base_index;
90
91 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
92 PAGE_SIZE) >> PAGE_SHIFT;
93 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
94 PAGE_SIZE) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Ingo Molnar05fccb02008-01-30 13:30:12 +010096 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080097 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 if (offset == -1) {
100 need_flush = 1;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100104 if (offset != -1) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100105 next_bit = offset+size;
106 if (next_bit >= iommu_pages) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 next_bit = 0;
108 need_flush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100109 }
110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 if (iommu_fullflush)
112 need_flush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100113 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 return offset;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100116}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118static void free_iommu(unsigned long offset, int size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100119{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800123 iommu_area_free(iommu_gart_bitmap, offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100125}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Ingo Molnar05fccb02008-01-30 13:30:12 +0100127/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 * Use global flush state to avoid races with multiple flushers.
129 */
Andi Kleena32073b2006-06-26 13:56:40 +0200130static void flush_gart(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100131{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 spin_lock_irqsave(&iommu_bitmap_lock, flags);
Andi Kleena32073b2006-06-26 13:56:40 +0200135 if (need_flush) {
136 k8_flush_garts();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 need_flush = 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100140}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142#ifdef CONFIG_IOMMU_LEAK
143
Ingo Molnar05fccb02008-01-30 13:30:12 +0100144#define SET_LEAK(x) \
145 do { \
146 if (iommu_leak_tab) \
147 iommu_leak_tab[x] = __builtin_return_address(0);\
148 } while (0)
149
150#define CLEAR_LEAK(x) \
151 do { \
152 if (iommu_leak_tab) \
153 iommu_leak_tab[x] = NULL; \
154 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156/* Debugging aid for drivers that don't free their IOMMU tables */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100157static void **iommu_leak_tab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static int leak_trace;
Joerg Roedel79da0872007-10-24 12:49:49 +0200159static int iommu_leak_pages = 20;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100160
Joerg Roedel79da0872007-10-24 12:49:49 +0200161static void dump_leak(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
163 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100164 static int dump;
165
166 if (dump || !iommu_leak_tab)
167 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 dump = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100169 show_stack(NULL, NULL);
170
171 /* Very crude. dump some from the end of the table too */
172 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
173 iommu_leak_pages);
174 for (i = 0; i < iommu_leak_pages; i += 2) {
175 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
Arjan van de Venbc850d62008-01-30 13:33:07 +0100176 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100177 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
178 }
179 printk(KERN_DEBUG "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181#else
Ingo Molnar05fccb02008-01-30 13:30:12 +0100182# define SET_LEAK(x)
183# define CLEAR_LEAK(x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#endif
185
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100186static void iommu_full(struct device *dev, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
Ingo Molnar05fccb02008-01-30 13:30:12 +0100188 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * Ran out of IOMMU space for this operation. This is very bad.
190 * Unfortunately the drivers cannot handle this operation properly.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100191 * Return some non mapped prereserved space in the aperture and
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * let the Northbridge deal with it. This will result in garbage
193 * in the IO operation. When the size exceeds the prereserved space
Ingo Molnar05fccb02008-01-30 13:30:12 +0100194 * memory corruption will occur or random memory will be DMAed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * out. Hopefully no network devices use single mappings that big.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100196 */
197
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200198 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100200 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
202 panic("PCI-DMA: Memory would be corrupted\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100203 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
204 panic(KERN_ERR
205 "PCI-DMA: Random memory would be DMAed\n");
206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100208 dump_leak();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Ingo Molnar05fccb02008-01-30 13:30:12 +0100212static inline int
213need_iommu(struct device *dev, unsigned long addr, size_t size)
214{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 u64 mask = *dev->dma_mask;
Andi Kleen00edefa2007-02-13 13:26:24 +0100216 int high = addr + size > mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 int mmu = high;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100218
219 if (force_iommu)
220 mmu = 1;
221
222 return mmu;
223}
224
225static inline int
226nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
227{
228 u64 mask = *dev->dma_mask;
229 int high = addr + size > mask;
230 int mmu = high;
231
232 return mmu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
234
235/* Map a single continuous physical area into the IOMMU.
236 * Caller needs to check if the iommu is needed and flush.
237 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100240{
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200241 unsigned long npages = iommu_num_pages(phys_mem, size);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800242 unsigned long iommu_page = alloc_iommu(dev, npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (iommu_page == -1) {
246 if (!nonforced_iommu(dev, phys_mem, size))
Ingo Molnar05fccb02008-01-30 13:30:12 +0100247 return phys_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 if (panic_on_overflow)
249 panic("dma_map_area overflow %lu bytes\n", size);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100250 iommu_full(dev, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 return bad_dma_address;
252 }
253
254 for (i = 0; i < npages; i++) {
255 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
256 SET_LEAK(iommu_page + i);
257 phys_mem += PAGE_SIZE;
258 }
259 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
260}
261
Ingo Molnar05fccb02008-01-30 13:30:12 +0100262static dma_addr_t
Ingo Molnar2be62142008-04-19 19:19:56 +0200263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100264{
Ingo Molnar2be62142008-04-19 19:19:56 +0200265 dma_addr_t map = dma_map_area(dev, paddr, size, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100266
Andi Kleena32073b2006-06-26 13:56:40 +0200267 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100268
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100269 return map;
270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272/* Map a single area into the IOMMU */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100273static dma_addr_t
Ingo Molnar2be62142008-04-19 19:19:56 +0200274gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Ingo Molnar2be62142008-04-19 19:19:56 +0200276 unsigned long bus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (!dev)
279 dev = &fallback_dev;
280
Ingo Molnar2be62142008-04-19 19:19:56 +0200281 if (!need_iommu(dev, paddr, size))
282 return paddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Ingo Molnar2be62142008-04-19 19:19:56 +0200284 bus = gart_map_simple(dev, paddr, size, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100285
286 return bus;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100287}
288
289/*
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200290 * Free a DMA mapping.
291 */
Yinghai Lu1048fa52007-07-21 17:11:23 +0200292static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100293 size_t size, int direction)
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200294{
295 unsigned long iommu_page;
296 int npages;
297 int i;
298
299 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
300 dma_addr >= iommu_bus_base + iommu_size)
301 return;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100302
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200303 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200304 npages = iommu_num_pages(dma_addr, size);
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200305 for (i = 0; i < npages; i++) {
306 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
307 CLEAR_LEAK(iommu_page + i);
308 }
309 free_iommu(iommu_page, npages);
310}
311
312/*
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100313 * Wrapper for pci_unmap_single working with scatterlists.
314 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100315static void
316gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100317{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200318 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100319 int i;
320
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200321 for_each_sg(sg, s, nents, i) {
Jon Mason60b08c62006-02-26 04:18:22 +0100322 if (!s->dma_length || !s->length)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100323 break;
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200324 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100325 }
326}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
328/* Fallback for dma_map_sg in case of overflow */
329static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
330 int nents, int dir)
331{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200332 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 int i;
334
335#ifdef CONFIG_IOMMU_DEBUG
336 printk(KERN_DEBUG "dma_map_sg overflow\n");
337#endif
338
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200339 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200340 unsigned long addr = sg_phys(s);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100341
342 if (nonforced_iommu(dev, addr, s->length)) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100343 addr = dma_map_area(dev, addr, s->length, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100344 if (addr == bad_dma_address) {
345 if (i > 0)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100346 gart_unmap_sg(dev, sg, i, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100347 nents = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 sg[0].dma_length = 0;
349 break;
350 }
351 }
352 s->dma_address = addr;
353 s->dma_length = s->length;
354 }
Andi Kleena32073b2006-06-26 13:56:40 +0200355 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return nents;
358}
359
360/* Map multiple scatterlist entries continuous into the first. */
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800361static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout,
363 unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800365 unsigned long iommu_start = alloc_iommu(dev, pages);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100366 unsigned long iommu_page = iommu_start;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200367 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 int i;
369
370 if (iommu_start == -1)
371 return -1;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200372
373 for_each_sg(start, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 unsigned long pages, addr;
375 unsigned long phys_addr = s->dma_address;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100376
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200377 BUG_ON(s != start && s->offset);
378 if (s == start) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 sout->dma_address = iommu_bus_base;
380 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
381 sout->dma_length = s->length;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100382 } else {
383 sout->dma_length += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
386 addr = phys_addr;
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200387 pages = iommu_num_pages(s->offset, s->length);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100388 while (pages--) {
389 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 SET_LEAK(iommu_page);
391 addr += PAGE_SIZE;
392 iommu_page++;
Andi Kleen0d5410642006-02-12 14:34:59 -0800393 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100394 }
395 BUG_ON(iommu_page - iommu_start != pages);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return 0;
398}
399
Ingo Molnar05fccb02008-01-30 13:30:12 +0100400static inline int
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800401dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
402 struct scatterlist *sout, unsigned long pages, int need)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200404 if (!need) {
405 BUG_ON(nelems != 1);
FUJITA Tomonorie88a39d2007-10-25 09:13:32 +0200406 sout->dma_address = start->dma_address;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200407 sout->dma_length = start->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 return 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200409 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800410 return __dma_map_cont(dev, start, nelems, sout, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411}
Ingo Molnar05fccb02008-01-30 13:30:12 +0100412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413/*
414 * DMA map all entries in a scatterlist.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100415 * Merge chunks that have page aligned sizes into a continuous mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100417static int
418gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200420 struct scatterlist *s, *ps, *start_sg, *sgmap;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100421 int need = 0, nextneed, i, out, start;
422 unsigned long pages = 0;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800423 unsigned int seg_size;
424 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Ingo Molnar05fccb02008-01-30 13:30:12 +0100426 if (nents == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return 0;
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (!dev)
430 dev = &fallback_dev;
431
432 out = 0;
433 start = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200434 start_sg = sgmap = sg;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800435 seg_size = 0;
436 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200437 ps = NULL; /* shut up gcc */
438 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200439 dma_addr_t addr = sg_phys(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Ingo Molnar05fccb02008-01-30 13:30:12 +0100441 s->dma_address = addr;
442 BUG_ON(s->length == 0);
443
444 nextneed = need_iommu(dev, addr, s->length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 /* Handle the previous not yet processed entries */
447 if (i > start) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100448 /*
449 * Can only merge when the last chunk ends on a
450 * page boundary and the new one doesn't have an
451 * offset.
452 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 if (!iommu_merge || !nextneed || !need || s->offset ||
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800454 (s->length + seg_size > max_seg_size) ||
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200455 (ps->offset + ps->length) % PAGE_SIZE) {
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800456 if (dma_map_cont(dev, start_sg, i - start,
457 sgmap, pages, need) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 goto error;
459 out++;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800460 seg_size = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200461 sgmap = sg_next(sgmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 pages = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200463 start = i;
464 start_sg = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 }
466 }
467
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800468 seg_size += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 need = nextneed;
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200470 pages += iommu_num_pages(s->offset, s->length);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200471 ps = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800473 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 goto error;
475 out++;
Andi Kleena32073b2006-06-26 13:56:40 +0200476 flush_gart();
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200477 if (out < nents) {
478 sgmap = sg_next(sgmap);
479 sgmap->dma_length = 0;
480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return out;
482
483error:
Andi Kleena32073b2006-06-26 13:56:40 +0200484 flush_gart();
FUJITA Tomonori53369402007-10-26 13:56:24 +0200485 gart_unmap_sg(dev, sg, out, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100486
Kevin VanMarena1002a42006-02-03 21:51:32 +0100487 /* When it was forced or merged try again in a dumb way */
488 if (force_iommu || iommu_merge) {
489 out = dma_map_sg_nonforce(dev, sg, nents, dir);
490 if (out > 0)
491 return out;
492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (panic_on_overflow)
494 panic("dma_map_sg: overflow on %lu pages\n", pages);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100495
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100496 iommu_full(dev, pages << PAGE_SHIFT, dir);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200497 for_each_sg(sg, s, nents, i)
498 s->dma_address = bad_dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 return 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100500}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Joerg Roedel94581092008-08-19 16:32:39 +0200502/* allocate and map a coherent mapping */
503static void *
504gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
505 gfp_t flag)
506{
507 void *vaddr;
508
509 vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
510 if (!vaddr)
511 return NULL;
512
513 *dma_addr = gart_map_single(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL);
514 if (*dma_addr != bad_dma_address)
515 return vaddr;
516
517 free_pages((unsigned long)vaddr, get_order(size));
518
519 return NULL;
520}
521
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100522static int no_agp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100525{
526 unsigned long a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Ingo Molnar05fccb02008-01-30 13:30:12 +0100528 if (!iommu_size) {
529 iommu_size = aper_size;
530 if (!no_agp)
531 iommu_size /= 2;
532 }
533
534 a = aper + iommu_size;
Andi Kleen31422c52008-02-04 16:48:08 +0100535 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Ingo Molnar05fccb02008-01-30 13:30:12 +0100537 if (iommu_size < 64*1024*1024) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 printk(KERN_WARNING
Ingo Molnar05fccb02008-01-30 13:30:12 +0100539 "PCI-DMA: Warning: Small IOMMU %luMB."
540 " Consider increasing the AGP aperture in BIOS\n",
541 iommu_size >> 20);
542 }
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 return iommu_size;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100545}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Ingo Molnar05fccb02008-01-30 13:30:12 +0100547static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
548{
549 unsigned aper_size = 0, aper_base_32, aper_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 u64 aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200552 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
553 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100554 aper_order = (aper_order >> 1) & 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Ingo Molnar05fccb02008-01-30 13:30:12 +0100556 aper_base = aper_base_32 & 0x7fff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 aper_base <<= 25;
558
Ingo Molnar05fccb02008-01-30 13:30:12 +0100559 aper_size = (32 * 1024 * 1024) << aper_order;
560 if (aper_base + aper_size > 0x100000000UL || !aper_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 aper_base = 0;
562
563 *size = aper_size;
564 return aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100565}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200567static void enable_gart_translations(void)
568{
569 int i;
570
571 for (i = 0; i < num_k8_northbridges; i++) {
572 struct pci_dev *dev = k8_northbridges[i];
573
574 enable_gart_translation(dev, __pa(agp_gatt_table));
575 }
576}
577
578/*
579 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
580 * resume in the same way as they are handled in gart_iommu_hole_init().
581 */
582static bool fix_up_north_bridges;
583static u32 aperture_order;
584static u32 aperture_alloc;
585
586void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
587{
588 fix_up_north_bridges = true;
589 aperture_order = aper_order;
590 aperture_alloc = aper_alloc;
591}
592
Pavel Machekcd763742008-05-29 00:30:21 -0700593static int gart_resume(struct sys_device *dev)
594{
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200595 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
596
597 if (fix_up_north_bridges) {
598 int i;
599
600 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
601
602 for (i = 0; i < num_k8_northbridges; i++) {
603 struct pci_dev *dev = k8_northbridges[i];
604
605 /*
606 * Don't enable translations just yet. That is the next
607 * step. Restore the pre-suspend aperture settings.
608 */
609 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
610 aperture_order << 1);
611 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
612 aperture_alloc >> 25);
613 }
614 }
615
616 enable_gart_translations();
617
Pavel Machekcd763742008-05-29 00:30:21 -0700618 return 0;
619}
620
621static int gart_suspend(struct sys_device *dev, pm_message_t state)
622{
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200623 return 0;
Pavel Machekcd763742008-05-29 00:30:21 -0700624}
625
626static struct sysdev_class gart_sysdev_class = {
627 .name = "gart",
628 .suspend = gart_suspend,
629 .resume = gart_resume,
630
631};
632
633static struct sys_device device_gart = {
634 .id = 0,
635 .cls = &gart_sysdev_class,
636};
637
Ingo Molnar05fccb02008-01-30 13:30:12 +0100638/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 * Private Northbridge GATT initialization in case we cannot use the
Ingo Molnar05fccb02008-01-30 13:30:12 +0100640 * AGP driver for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 */
642static __init int init_k8_gatt(struct agp_kern_info *info)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100643{
644 unsigned aper_size, gatt_size, new_aper_size;
645 unsigned aper_base, new_aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 struct pci_dev *dev;
647 void *gatt;
Pavel Machekcd763742008-05-29 00:30:21 -0700648 int i, error;
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700649 unsigned long start_pfn, end_pfn;
Andi Kleena32073b2006-06-26 13:56:40 +0200650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
652 aper_size = aper_base = info->aper_size = 0;
Andi Kleena32073b2006-06-26 13:56:40 +0200653 dev = NULL;
654 for (i = 0; i < num_k8_northbridges; i++) {
655 dev = k8_northbridges[i];
Ingo Molnar05fccb02008-01-30 13:30:12 +0100656 new_aper_base = read_aperture(dev, &new_aper_size);
657 if (!new_aper_base)
658 goto nommu;
659
660 if (!aper_base) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 aper_size = new_aper_size;
662 aper_base = new_aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100663 }
664 if (aper_size != new_aper_size || aper_base != new_aper_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 goto nommu;
666 }
667 if (!aper_base)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100668 goto nommu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 info->aper_base = aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100670 info->aper_size = aper_size >> 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Ingo Molnar05fccb02008-01-30 13:30:12 +0100672 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
673 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
674 if (!gatt)
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200675 panic("Cannot allocate GATT table");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100676 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200677 panic("Could not set GART PTEs to uncacheable pages");
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200678
Ingo Molnar05fccb02008-01-30 13:30:12 +0100679 memset(gatt, 0, gatt_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 agp_gatt_table = gatt;
Andi Kleena32073b2006-06-26 13:56:40 +0200681
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200682 enable_gart_translations();
Pavel Machekcd763742008-05-29 00:30:21 -0700683
684 error = sysdev_class_register(&gart_sysdev_class);
685 if (!error)
686 error = sysdev_register(&device_gart);
687 if (error)
688 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200689
Andi Kleena32073b2006-06-26 13:56:40 +0200690 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100691
692 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
693 aper_base, aper_size>>10);
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700694
695 /* need to map that range */
696 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
697 if (end_pfn > max_low_pfn_mapped) {
Yinghai Lu32b23e92008-07-13 14:29:41 -0700698 start_pfn = (aper_base>>PAGE_SHIFT);
699 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return 0;
702
703 nommu:
Ingo Molnar05fccb02008-01-30 13:30:12 +0100704 /* Should not happen anymore */
Pavel Machek8f596102008-04-01 14:24:03 +0200705 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
706 KERN_WARNING "falling back to iommu=soft.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100707 return -1;
708}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710extern int agp_amd64_init(void);
711
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700712static struct dma_mapping_ops gart_dma_ops = {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100713 .map_single = gart_map_single,
714 .map_simple = gart_map_simple,
715 .unmap_single = gart_unmap_single,
716 .sync_single_for_cpu = NULL,
717 .sync_single_for_device = NULL,
718 .sync_single_range_for_cpu = NULL,
719 .sync_single_range_for_device = NULL,
720 .sync_sg_for_cpu = NULL,
721 .sync_sg_for_device = NULL,
722 .map_sg = gart_map_sg,
723 .unmap_sg = gart_unmap_sg,
Joerg Roedel94581092008-08-19 16:32:39 +0200724 .alloc_coherent = gart_alloc_coherent,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100725};
726
Yinghai Lubc2cea62007-07-21 17:11:28 +0200727void gart_iommu_shutdown(void)
728{
729 struct pci_dev *dev;
730 int i;
731
732 if (no_agp && (dma_ops != &gart_dma_ops))
733 return;
734
Ingo Molnar05fccb02008-01-30 13:30:12 +0100735 for (i = 0; i < num_k8_northbridges; i++) {
736 u32 ctl;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200737
Ingo Molnar05fccb02008-01-30 13:30:12 +0100738 dev = k8_northbridges[i];
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200739 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
Yinghai Lubc2cea62007-07-21 17:11:28 +0200740
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200741 ctl &= ~GARTEN;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200742
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200743 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100744 }
Yinghai Lubc2cea62007-07-21 17:11:28 +0200745}
746
Jon Mason0dc243a2006-06-26 13:58:11 +0200747void __init gart_iommu_init(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100748{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 struct agp_kern_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 unsigned long iommu_start;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100751 unsigned long aper_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 unsigned long scratch;
753 long i;
754
Andi Kleena32073b2006-06-26 13:56:40 +0200755 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
756 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
Jon Mason0dc243a2006-06-26 13:58:11 +0200757 return;
Andi Kleena32073b2006-06-26 13:56:40 +0200758 }
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760#ifndef CONFIG_AGP_AMD64
Ingo Molnar05fccb02008-01-30 13:30:12 +0100761 no_agp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762#else
763 /* Makefile puts PCI initialization via subsys_initcall first. */
764 /* Add other K8 AGP bridge drivers here */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100765 no_agp = no_agp ||
766 (agp_amd64_init() < 0) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 (agp_copy_info(agp_bridge, &info) < 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100768#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Jon Mason60b08c62006-02-26 04:18:22 +0100770 if (swiotlb)
Jon Mason0dc243a2006-06-26 13:58:11 +0200771 return;
Jon Mason60b08c62006-02-26 04:18:22 +0100772
Jon Mason8d4f6b92006-06-26 13:58:05 +0200773 /* Did we detect a different HW IOMMU? */
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200774 if (iommu_detected && !gart_iommu_aperture)
Jon Mason0dc243a2006-06-26 13:58:11 +0200775 return;
Jon Mason8d4f6b92006-06-26 13:58:05 +0200776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (no_iommu ||
Yinghai Luc987d122008-06-24 22:14:09 -0700778 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200779 !gart_iommu_aperture ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 (no_agp && init_k8_gatt(&info) < 0)) {
Yinghai Luc987d122008-06-24 22:14:09 -0700781 if (max_pfn > MAX_DMA32_PFN) {
Pavel Machek8f596102008-04-01 14:24:03 +0200782 printk(KERN_WARNING "More than 4GB of memory "
783 "but GART IOMMU not available.\n"
784 KERN_WARNING "falling back to iommu=soft.\n");
Jon Mason5b7b6442006-02-03 21:51:59 +0100785 }
Jon Mason0dc243a2006-06-26 13:58:11 +0200786 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
788
Jon Mason5b7b6442006-02-03 21:51:59 +0100789 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100790 aper_size = info.aper_size * 1024 * 1024;
791 iommu_size = check_iommu_size(info.aper_base, aper_size);
792 iommu_pages = iommu_size >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Ingo Molnar05fccb02008-01-30 13:30:12 +0100794 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
795 get_order(iommu_pages/8));
796 if (!iommu_gart_bitmap)
797 panic("Cannot allocate iommu bitmap\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 memset(iommu_gart_bitmap, 0, iommu_pages/8);
799
800#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100801 if (leak_trace) {
802 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 get_order(iommu_pages*sizeof(void *)));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100804 if (iommu_leak_tab)
805 memset(iommu_leak_tab, 0, iommu_pages * 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 else
Ingo Molnar05fccb02008-01-30 13:30:12 +0100807 printk(KERN_DEBUG
808 "PCI-DMA: Cannot allocate leak trace area\n");
809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810#endif
811
Ingo Molnar05fccb02008-01-30 13:30:12 +0100812 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 * Out of IOMMU space handling.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100814 * Reserve some invalid pages at the beginning of the GART.
815 */
816 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Ingo Molnar05fccb02008-01-30 13:30:12 +0100818 agp_memory_reserved = iommu_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 printk(KERN_INFO
820 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
Ingo Molnar05fccb02008-01-30 13:30:12 +0100821 iommu_size >> 20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
Ingo Molnar05fccb02008-01-30 13:30:12 +0100823 iommu_start = aper_size - iommu_size;
824 iommu_bus_base = info.aper_base + iommu_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 bad_dma_address = iommu_bus_base;
826 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
827
Ingo Molnar05fccb02008-01-30 13:30:12 +0100828 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 * Unmap the IOMMU part of the GART. The alias of the page is
830 * always mapped with cache enabled and there is no full cache
831 * coherency across the GART remapping. The unmapping avoids
832 * automatic prefetches from the CPU allocating cache lines in
833 * there. All CPU accesses are done via the direct mapping to
834 * the backing memory. The GART address is only used by PCI
Ingo Molnar05fccb02008-01-30 13:30:12 +0100835 * devices.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 */
Andi Kleen28d6ee42008-02-04 16:48:08 +0100837 set_memory_np((unsigned long)__va(iommu_bus_base),
838 iommu_size >> PAGE_SHIFT);
Ingo Molnar184652e2008-02-14 23:30:20 +0100839 /*
840 * Tricky. The GART table remaps the physical memory range,
841 * so the CPU wont notice potential aliases and if the memory
842 * is remapped to UC later on, we might surprise the PCI devices
843 * with a stray writeout of a cacheline. So play it sure and
844 * do an explicit, full-scale wbinvd() _after_ having marked all
845 * the pages as Not-Present:
846 */
847 wbinvd();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Ingo Molnar05fccb02008-01-30 13:30:12 +0100849 /*
Pavel Machekfa3d3192008-06-26 00:25:43 +0200850 * Try to workaround a bug (thanks to BenH):
Ingo Molnar05fccb02008-01-30 13:30:12 +0100851 * Set unmapped entries to a scratch page instead of 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 * Any prefetches that hit unmapped entries won't get an bus abort
Pavel Machekfa3d3192008-06-26 00:25:43 +0200853 * then. (P2P bridge may be prefetching on DMA reads).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100855 scratch = get_zeroed_page(GFP_KERNEL);
856 if (!scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 panic("Cannot allocate iommu scratch page");
858 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100859 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 iommu_gatt_base[i] = gart_unmapped_entry;
861
Andi Kleena32073b2006-06-26 13:56:40 +0200862 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100863 dma_ops = &gart_dma_ops;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100864}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Sam Ravnborg43999d92007-03-16 21:07:36 +0100866void __init gart_parse_options(char *p)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100867{
868 int arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100871 if (!strncmp(p, "leak", 4)) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100872 leak_trace = 1;
873 p += 4;
874 if (*p == '=') ++p;
875 if (isdigit(*p) && get_option(&p, &arg))
876 iommu_leak_pages = arg;
877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100879 if (isdigit(*p) && get_option(&p, &arg))
880 iommu_size = arg;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100881 if (!strncmp(p, "fullflush", 8))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100882 iommu_fullflush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100883 if (!strncmp(p, "nofullflush", 11))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100884 iommu_fullflush = 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100885 if (!strncmp(p, "noagp", 5))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100886 no_agp = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100887 if (!strncmp(p, "noaperture", 10))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100888 fix_aperture = 0;
889 /* duplicated from pci-dma.c */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100890 if (!strncmp(p, "force", 5))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200891 gart_iommu_aperture_allowed = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100892 if (!strncmp(p, "allowed", 7))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200893 gart_iommu_aperture_allowed = 1;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100894 if (!strncmp(p, "memaper", 7)) {
895 fallback_aper_force = 1;
896 p += 7;
897 if (*p == '=') {
898 ++p;
899 if (get_option(&p, &arg))
900 fallback_aper_order = arg;
901 }
902 }
903}