blob: 9972c42ac925e62d239ca8755672be6c6a6f598d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support for AMD Hammer.
Ingo Molnar05fccb02008-01-30 13:30:12 +01003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
Ingo Molnar05fccb02008-01-30 13:30:12 +01006 * with more than 4GB.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
Ingo Molnar05fccb02008-01-30 13:30:12 +01009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Copyright 2002 Andi Kleen, SuSE Labs.
Andi Kleenff7f3642007-10-17 18:04:37 +020011 * Subject to the GNU General Public License v2 only.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070026#include <linux/kdebug.h>
Jens Axboe9ee1bea2007-10-04 09:35:37 +020027#include <linux/scatterlist.h>
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080028#include <linux/iommu-helper.h>
Pavel Machekcd763742008-05-29 00:30:21 -070029#include <linux/sysdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/mtrr.h>
33#include <asm/pgtable.h>
34#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090035#include <asm/iommu.h>
Joerg Roedel395624f2007-10-24 12:49:47 +020036#include <asm/gart.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/cacheflush.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010038#include <asm/swiotlb.h>
39#include <asm/dma.h>
Andi Kleena32073b2006-06-26 13:56:40 +020040#include <asm/k8.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Joerg Roedel79da0872007-10-24 12:49:49 +020042static unsigned long iommu_bus_base; /* GART remapping area (physical) */
Ingo Molnar05fccb02008-01-30 13:30:12 +010043static unsigned long iommu_size; /* size of remapping area bytes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static unsigned long iommu_pages; /* .. and in pages */
45
Ingo Molnar05fccb02008-01-30 13:30:12 +010046static u32 *iommu_gatt_base; /* Remapping table */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Ingo Molnar05fccb02008-01-30 13:30:12 +010048/*
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
54 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int iommu_fullflush = 1;
56
Ingo Molnar05fccb02008-01-30 13:30:12 +010057/* Allocation bitmap for the remapping area: */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static DEFINE_SPINLOCK(iommu_bitmap_lock);
Ingo Molnar05fccb02008-01-30 13:30:12 +010059/* Guarded by iommu_bitmap_lock: */
60static unsigned long *iommu_gart_bitmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Ingo Molnar05fccb02008-01-30 13:30:12 +010062static u32 gart_unmapped_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#define GPTE_VALID 1
65#define GPTE_COHERENT 2
66#define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69
Ingo Molnar05fccb02008-01-30 13:30:12 +010070#define EMERGENCY_PAGES 32 /* = 128KB */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#ifdef CONFIG_AGP
73#define AGPEXTERN extern
74#else
75#define AGPEXTERN
76#endif
77
78/* backdoor interface to AGP driver */
79AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table;
81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
Ingo Molnar05fccb02008-01-30 13:30:12 +010083static int need_flush; /* global flush state. set for each gart wrap */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +090085static unsigned long alloc_iommu(struct device *dev, int size,
FUJITA Tomonoribee44f22008-09-12 19:42:35 +090086 unsigned long align_mask, u64 dma_mask)
Ingo Molnar05fccb02008-01-30 13:30:12 +010087{
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 unsigned long offset, flags;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080089 unsigned long boundary_size;
90 unsigned long base_index;
FUJITA Tomonoribee44f22008-09-12 19:42:35 +090091 unsigned long limit;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080092
93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
94 PAGE_SIZE) >> PAGE_SHIFT;
Prarit Bhargava05d3ed02008-07-21 10:15:22 -040095 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080096 PAGE_SIZE) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
FUJITA Tomonoribee44f22008-09-12 19:42:35 +090098 limit = iommu_device_max_index(iommu_pages,
99 DIV_ROUND_UP(iommu_bus_base, PAGE_SIZE),
100 dma_mask >> PAGE_SHIFT);
101
Ingo Molnar05fccb02008-01-30 13:30:12 +0100102 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900103
104 if (limit <= next_bit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 need_flush = 1;
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900106 next_bit = 0;
107 }
108
109 offset = iommu_area_alloc(iommu_gart_bitmap, limit, next_bit,
110 size, base_index, boundary_size, align_mask);
111 if (offset == -1 && next_bit) {
112 need_flush = 1;
113 offset = iommu_area_alloc(iommu_gart_bitmap, limit, 0,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900114 size, base_index, boundary_size,
115 align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100117 if (offset != -1) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100118 next_bit = offset+size;
119 if (next_bit >= iommu_pages) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 next_bit = 0;
121 need_flush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100122 }
123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 if (iommu_fullflush)
125 need_flush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return offset;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131static void free_iommu(unsigned long offset, int size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100132{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800136 iommu_area_free(iommu_gart_bitmap, offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100138}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Ingo Molnar05fccb02008-01-30 13:30:12 +0100140/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 * Use global flush state to avoid races with multiple flushers.
142 */
Andi Kleena32073b2006-06-26 13:56:40 +0200143static void flush_gart(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100144{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 spin_lock_irqsave(&iommu_bitmap_lock, flags);
Andi Kleena32073b2006-06-26 13:56:40 +0200148 if (need_flush) {
149 k8_flush_garts();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 need_flush = 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100153}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#ifdef CONFIG_IOMMU_LEAK
156
Ingo Molnar05fccb02008-01-30 13:30:12 +0100157#define SET_LEAK(x) \
158 do { \
159 if (iommu_leak_tab) \
160 iommu_leak_tab[x] = __builtin_return_address(0);\
161 } while (0)
162
163#define CLEAR_LEAK(x) \
164 do { \
165 if (iommu_leak_tab) \
166 iommu_leak_tab[x] = NULL; \
167 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169/* Debugging aid for drivers that don't free their IOMMU tables */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100170static void **iommu_leak_tab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static int leak_trace;
Joerg Roedel79da0872007-10-24 12:49:49 +0200172static int iommu_leak_pages = 20;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100173
Joerg Roedel79da0872007-10-24 12:49:49 +0200174static void dump_leak(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100177 static int dump;
178
179 if (dump || !iommu_leak_tab)
180 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 dump = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100182 show_stack(NULL, NULL);
183
184 /* Very crude. dump some from the end of the table too */
185 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
186 iommu_leak_pages);
187 for (i = 0; i < iommu_leak_pages; i += 2) {
188 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
Arjan van de Venbc850d62008-01-30 13:33:07 +0100189 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100190 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
191 }
192 printk(KERN_DEBUG "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194#else
Ingo Molnar05fccb02008-01-30 13:30:12 +0100195# define SET_LEAK(x)
196# define CLEAR_LEAK(x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197#endif
198
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100199static void iommu_full(struct device *dev, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
Ingo Molnar05fccb02008-01-30 13:30:12 +0100201 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * Ran out of IOMMU space for this operation. This is very bad.
203 * Unfortunately the drivers cannot handle this operation properly.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100204 * Return some non mapped prereserved space in the aperture and
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * let the Northbridge deal with it. This will result in garbage
206 * in the IO operation. When the size exceeds the prereserved space
Ingo Molnar05fccb02008-01-30 13:30:12 +0100207 * memory corruption will occur or random memory will be DMAed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 * out. Hopefully no network devices use single mappings that big.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100209 */
210
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200211 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100213 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
215 panic("PCI-DMA: Memory would be corrupted\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100216 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
217 panic(KERN_ERR
218 "PCI-DMA: Random memory would be DMAed\n");
219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100221 dump_leak();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Ingo Molnar05fccb02008-01-30 13:30:12 +0100225static inline int
226need_iommu(struct device *dev, unsigned long addr, size_t size)
227{
FUJITA Tomonoriac4ff652008-09-10 01:06:47 +0900228 return force_iommu ||
229 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100230}
231
232static inline int
233nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
234{
FUJITA Tomonoriac4ff652008-09-10 01:06:47 +0900235 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
238/* Map a single continuous physical area into the IOMMU.
239 * Caller needs to check if the iommu is needed and flush.
240 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100241static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900242 size_t size, int dir, unsigned long align_mask,
243 u64 dma_mask)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100244{
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200245 unsigned long npages = iommu_num_pages(phys_mem, size);
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900246 unsigned long iommu_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100248
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900249 iommu_page = alloc_iommu(dev, npages, align_mask, dma_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 if (iommu_page == -1) {
251 if (!nonforced_iommu(dev, phys_mem, size))
Ingo Molnar05fccb02008-01-30 13:30:12 +0100252 return phys_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 if (panic_on_overflow)
254 panic("dma_map_area overflow %lu bytes\n", size);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100255 iommu_full(dev, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 return bad_dma_address;
257 }
258
259 for (i = 0; i < npages; i++) {
260 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
261 SET_LEAK(iommu_page + i);
262 phys_mem += PAGE_SIZE;
263 }
264 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
265}
266
267/* Map a single area into the IOMMU */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100268static dma_addr_t
Ingo Molnar2be62142008-04-19 19:19:56 +0200269gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
Ingo Molnar2be62142008-04-19 19:19:56 +0200271 unsigned long bus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (!dev)
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200274 dev = &x86_dma_fallback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Ingo Molnar2be62142008-04-19 19:19:56 +0200276 if (!need_iommu(dev, paddr, size))
277 return paddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900279 bus = dma_map_area(dev, paddr, size, dir, 0, dma_get_mask(dev));
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900280 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100281
282 return bus;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100283}
284
285/*
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200286 * Free a DMA mapping.
287 */
Yinghai Lu1048fa52007-07-21 17:11:23 +0200288static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100289 size_t size, int direction)
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200290{
291 unsigned long iommu_page;
292 int npages;
293 int i;
294
295 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
296 dma_addr >= iommu_bus_base + iommu_size)
297 return;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100298
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200299 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200300 npages = iommu_num_pages(dma_addr, size);
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200301 for (i = 0; i < npages; i++) {
302 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
303 CLEAR_LEAK(iommu_page + i);
304 }
305 free_iommu(iommu_page, npages);
306}
307
308/*
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100309 * Wrapper for pci_unmap_single working with scatterlists.
310 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100311static void
312gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100313{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200314 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100315 int i;
316
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200317 for_each_sg(sg, s, nents, i) {
Jon Mason60b08c62006-02-26 04:18:22 +0100318 if (!s->dma_length || !s->length)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100319 break;
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200320 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100321 }
322}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324/* Fallback for dma_map_sg in case of overflow */
325static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
326 int nents, int dir)
327{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200328 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 int i;
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900330 u64 dma_mask = dma_get_mask(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332#ifdef CONFIG_IOMMU_DEBUG
333 printk(KERN_DEBUG "dma_map_sg overflow\n");
334#endif
335
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200336 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200337 unsigned long addr = sg_phys(s);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100338
339 if (nonforced_iommu(dev, addr, s->length)) {
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900340 addr = dma_map_area(dev, addr, s->length, dir, 0,
341 dma_mask);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100342 if (addr == bad_dma_address) {
343 if (i > 0)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100344 gart_unmap_sg(dev, sg, i, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100345 nents = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 sg[0].dma_length = 0;
347 break;
348 }
349 }
350 s->dma_address = addr;
351 s->dma_length = s->length;
352 }
Andi Kleena32073b2006-06-26 13:56:40 +0200353 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return nents;
356}
357
358/* Map multiple scatterlist entries continuous into the first. */
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800359static int __dma_map_cont(struct device *dev, struct scatterlist *start,
360 int nelems, struct scatterlist *sout,
361 unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900363 unsigned long iommu_start;
364 unsigned long iommu_page;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200365 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 int i;
367
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900368 iommu_start = alloc_iommu(dev, pages, 0, dma_get_mask(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 if (iommu_start == -1)
370 return -1;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200371
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900372 iommu_page = iommu_start;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200373 for_each_sg(start, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 unsigned long pages, addr;
375 unsigned long phys_addr = s->dma_address;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100376
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200377 BUG_ON(s != start && s->offset);
378 if (s == start) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 sout->dma_address = iommu_bus_base;
380 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
381 sout->dma_length = s->length;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100382 } else {
383 sout->dma_length += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
386 addr = phys_addr;
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200387 pages = iommu_num_pages(s->offset, s->length);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100388 while (pages--) {
389 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 SET_LEAK(iommu_page);
391 addr += PAGE_SIZE;
392 iommu_page++;
Andi Kleen0d5410642006-02-12 14:34:59 -0800393 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100394 }
395 BUG_ON(iommu_page - iommu_start != pages);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return 0;
398}
399
Ingo Molnar05fccb02008-01-30 13:30:12 +0100400static inline int
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800401dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
402 struct scatterlist *sout, unsigned long pages, int need)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200404 if (!need) {
405 BUG_ON(nelems != 1);
FUJITA Tomonorie88a39d2007-10-25 09:13:32 +0200406 sout->dma_address = start->dma_address;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200407 sout->dma_length = start->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 return 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200409 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800410 return __dma_map_cont(dev, start, nelems, sout, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411}
Ingo Molnar05fccb02008-01-30 13:30:12 +0100412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413/*
414 * DMA map all entries in a scatterlist.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100415 * Merge chunks that have page aligned sizes into a continuous mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100417static int
418gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200420 struct scatterlist *s, *ps, *start_sg, *sgmap;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100421 int need = 0, nextneed, i, out, start;
422 unsigned long pages = 0;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800423 unsigned int seg_size;
424 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Ingo Molnar05fccb02008-01-30 13:30:12 +0100426 if (nents == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return 0;
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (!dev)
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200430 dev = &x86_dma_fallback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 out = 0;
433 start = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200434 start_sg = sgmap = sg;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800435 seg_size = 0;
436 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200437 ps = NULL; /* shut up gcc */
438 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200439 dma_addr_t addr = sg_phys(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Ingo Molnar05fccb02008-01-30 13:30:12 +0100441 s->dma_address = addr;
442 BUG_ON(s->length == 0);
443
444 nextneed = need_iommu(dev, addr, s->length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 /* Handle the previous not yet processed entries */
447 if (i > start) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100448 /*
449 * Can only merge when the last chunk ends on a
450 * page boundary and the new one doesn't have an
451 * offset.
452 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 if (!iommu_merge || !nextneed || !need || s->offset ||
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800454 (s->length + seg_size > max_seg_size) ||
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200455 (ps->offset + ps->length) % PAGE_SIZE) {
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800456 if (dma_map_cont(dev, start_sg, i - start,
457 sgmap, pages, need) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 goto error;
459 out++;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800460 seg_size = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200461 sgmap = sg_next(sgmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 pages = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200463 start = i;
464 start_sg = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 }
466 }
467
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800468 seg_size += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 need = nextneed;
Joerg Roedel87e39ea2008-07-25 14:58:00 +0200470 pages += iommu_num_pages(s->offset, s->length);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200471 ps = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800473 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 goto error;
475 out++;
Andi Kleena32073b2006-06-26 13:56:40 +0200476 flush_gart();
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200477 if (out < nents) {
478 sgmap = sg_next(sgmap);
479 sgmap->dma_length = 0;
480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return out;
482
483error:
Andi Kleena32073b2006-06-26 13:56:40 +0200484 flush_gart();
FUJITA Tomonori53369402007-10-26 13:56:24 +0200485 gart_unmap_sg(dev, sg, out, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100486
Kevin VanMarena1002a42006-02-03 21:51:32 +0100487 /* When it was forced or merged try again in a dumb way */
488 if (force_iommu || iommu_merge) {
489 out = dma_map_sg_nonforce(dev, sg, nents, dir);
490 if (out > 0)
491 return out;
492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (panic_on_overflow)
494 panic("dma_map_sg: overflow on %lu pages\n", pages);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100495
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100496 iommu_full(dev, pages << PAGE_SHIFT, dir);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200497 for_each_sg(sg, s, nents, i)
498 s->dma_address = bad_dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 return 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100500}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Joerg Roedel94581092008-08-19 16:32:39 +0200502/* allocate and map a coherent mapping */
503static void *
504gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
505 gfp_t flag)
506{
507 void *vaddr;
FUJITA Tomonori421076e2008-08-22 16:29:10 +0900508 unsigned long align_mask;
Joerg Roedel94581092008-08-19 16:32:39 +0200509
510 vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
511 if (!vaddr)
512 return NULL;
513
FUJITA Tomonori421076e2008-08-22 16:29:10 +0900514 align_mask = (1UL << get_order(size)) - 1;
515
FUJITA Tomonori421076e2008-08-22 16:29:10 +0900516 *dma_addr = dma_map_area(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL,
FUJITA Tomonoribee44f22008-09-12 19:42:35 +0900517 align_mask, dma_mask);
FUJITA Tomonori421076e2008-08-22 16:29:10 +0900518 flush_gart();
519
Joerg Roedel94581092008-08-19 16:32:39 +0200520 if (*dma_addr != bad_dma_address)
521 return vaddr;
522
523 free_pages((unsigned long)vaddr, get_order(size));
524
525 return NULL;
526}
527
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200528/* free a coherent mapping */
529static void
530gart_free_coherent(struct device *dev, size_t size, void *vaddr,
531 dma_addr_t dma_addr)
532{
533 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
534 free_pages((unsigned long)vaddr, get_order(size));
535}
536
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100537static int no_agp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100540{
541 unsigned long a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Ingo Molnar05fccb02008-01-30 13:30:12 +0100543 if (!iommu_size) {
544 iommu_size = aper_size;
545 if (!no_agp)
546 iommu_size /= 2;
547 }
548
549 a = aper + iommu_size;
Andi Kleen31422c52008-02-04 16:48:08 +0100550 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Ingo Molnar05fccb02008-01-30 13:30:12 +0100552 if (iommu_size < 64*1024*1024) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 printk(KERN_WARNING
Ingo Molnar05fccb02008-01-30 13:30:12 +0100554 "PCI-DMA: Warning: Small IOMMU %luMB."
555 " Consider increasing the AGP aperture in BIOS\n",
556 iommu_size >> 20);
557 }
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 return iommu_size;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100560}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Ingo Molnar05fccb02008-01-30 13:30:12 +0100562static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
563{
564 unsigned aper_size = 0, aper_base_32, aper_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 u64 aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200567 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
568 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100569 aper_order = (aper_order >> 1) & 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Ingo Molnar05fccb02008-01-30 13:30:12 +0100571 aper_base = aper_base_32 & 0x7fff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 aper_base <<= 25;
573
Ingo Molnar05fccb02008-01-30 13:30:12 +0100574 aper_size = (32 * 1024 * 1024) << aper_order;
575 if (aper_base + aper_size > 0x100000000UL || !aper_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 aper_base = 0;
577
578 *size = aper_size;
579 return aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100580}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200582static void enable_gart_translations(void)
583{
584 int i;
585
586 for (i = 0; i < num_k8_northbridges; i++) {
587 struct pci_dev *dev = k8_northbridges[i];
588
589 enable_gart_translation(dev, __pa(agp_gatt_table));
590 }
591}
592
593/*
594 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
595 * resume in the same way as they are handled in gart_iommu_hole_init().
596 */
597static bool fix_up_north_bridges;
598static u32 aperture_order;
599static u32 aperture_alloc;
600
601void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
602{
603 fix_up_north_bridges = true;
604 aperture_order = aper_order;
605 aperture_alloc = aper_alloc;
606}
607
Pavel Machekcd763742008-05-29 00:30:21 -0700608static int gart_resume(struct sys_device *dev)
609{
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200610 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
611
612 if (fix_up_north_bridges) {
613 int i;
614
615 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
616
617 for (i = 0; i < num_k8_northbridges; i++) {
618 struct pci_dev *dev = k8_northbridges[i];
619
620 /*
621 * Don't enable translations just yet. That is the next
622 * step. Restore the pre-suspend aperture settings.
623 */
624 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
625 aperture_order << 1);
626 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
627 aperture_alloc >> 25);
628 }
629 }
630
631 enable_gart_translations();
632
Pavel Machekcd763742008-05-29 00:30:21 -0700633 return 0;
634}
635
636static int gart_suspend(struct sys_device *dev, pm_message_t state)
637{
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200638 return 0;
Pavel Machekcd763742008-05-29 00:30:21 -0700639}
640
641static struct sysdev_class gart_sysdev_class = {
642 .name = "gart",
643 .suspend = gart_suspend,
644 .resume = gart_resume,
645
646};
647
648static struct sys_device device_gart = {
649 .id = 0,
650 .cls = &gart_sysdev_class,
651};
652
Ingo Molnar05fccb02008-01-30 13:30:12 +0100653/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 * Private Northbridge GATT initialization in case we cannot use the
Ingo Molnar05fccb02008-01-30 13:30:12 +0100655 * AGP driver for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 */
657static __init int init_k8_gatt(struct agp_kern_info *info)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100658{
659 unsigned aper_size, gatt_size, new_aper_size;
660 unsigned aper_base, new_aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 struct pci_dev *dev;
662 void *gatt;
Pavel Machekcd763742008-05-29 00:30:21 -0700663 int i, error;
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700664 unsigned long start_pfn, end_pfn;
Andi Kleena32073b2006-06-26 13:56:40 +0200665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
667 aper_size = aper_base = info->aper_size = 0;
Andi Kleena32073b2006-06-26 13:56:40 +0200668 dev = NULL;
669 for (i = 0; i < num_k8_northbridges; i++) {
670 dev = k8_northbridges[i];
Ingo Molnar05fccb02008-01-30 13:30:12 +0100671 new_aper_base = read_aperture(dev, &new_aper_size);
672 if (!new_aper_base)
673 goto nommu;
674
675 if (!aper_base) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 aper_size = new_aper_size;
677 aper_base = new_aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100678 }
679 if (aper_size != new_aper_size || aper_base != new_aper_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 goto nommu;
681 }
682 if (!aper_base)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100683 goto nommu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 info->aper_base = aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100685 info->aper_size = aper_size >> 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Ingo Molnar05fccb02008-01-30 13:30:12 +0100687 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
688 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
689 if (!gatt)
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200690 panic("Cannot allocate GATT table");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100691 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200692 panic("Could not set GART PTEs to uncacheable pages");
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200693
Ingo Molnar05fccb02008-01-30 13:30:12 +0100694 memset(gatt, 0, gatt_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 agp_gatt_table = gatt;
Andi Kleena32073b2006-06-26 13:56:40 +0200696
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200697 enable_gart_translations();
Pavel Machekcd763742008-05-29 00:30:21 -0700698
699 error = sysdev_class_register(&gart_sysdev_class);
700 if (!error)
701 error = sysdev_register(&device_gart);
702 if (error)
703 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200704
Andi Kleena32073b2006-06-26 13:56:40 +0200705 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100706
707 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
708 aper_base, aper_size>>10);
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700709
710 /* need to map that range */
711 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
712 if (end_pfn > max_low_pfn_mapped) {
Yinghai Lu32b23e92008-07-13 14:29:41 -0700713 start_pfn = (aper_base>>PAGE_SHIFT);
714 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return 0;
717
718 nommu:
Ingo Molnar05fccb02008-01-30 13:30:12 +0100719 /* Should not happen anymore */
Pavel Machek8f596102008-04-01 14:24:03 +0200720 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
721 KERN_WARNING "falling back to iommu=soft.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100722 return -1;
723}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725extern int agp_amd64_init(void);
726
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700727static struct dma_mapping_ops gart_dma_ops = {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100728 .map_single = gart_map_single,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100729 .unmap_single = gart_unmap_single,
730 .sync_single_for_cpu = NULL,
731 .sync_single_for_device = NULL,
732 .sync_single_range_for_cpu = NULL,
733 .sync_single_range_for_device = NULL,
734 .sync_sg_for_cpu = NULL,
735 .sync_sg_for_device = NULL,
736 .map_sg = gart_map_sg,
737 .unmap_sg = gart_unmap_sg,
Joerg Roedel94581092008-08-19 16:32:39 +0200738 .alloc_coherent = gart_alloc_coherent,
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200739 .free_coherent = gart_free_coherent,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100740};
741
Yinghai Lubc2cea62007-07-21 17:11:28 +0200742void gart_iommu_shutdown(void)
743{
744 struct pci_dev *dev;
745 int i;
746
747 if (no_agp && (dma_ops != &gart_dma_ops))
748 return;
749
Ingo Molnar05fccb02008-01-30 13:30:12 +0100750 for (i = 0; i < num_k8_northbridges; i++) {
751 u32 ctl;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200752
Ingo Molnar05fccb02008-01-30 13:30:12 +0100753 dev = k8_northbridges[i];
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200754 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
Yinghai Lubc2cea62007-07-21 17:11:28 +0200755
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200756 ctl &= ~GARTEN;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200757
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200758 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100759 }
Yinghai Lubc2cea62007-07-21 17:11:28 +0200760}
761
Jon Mason0dc243a2006-06-26 13:58:11 +0200762void __init gart_iommu_init(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100763{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 struct agp_kern_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 unsigned long iommu_start;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100766 unsigned long aper_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 unsigned long scratch;
768 long i;
769
Andi Kleena32073b2006-06-26 13:56:40 +0200770 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
771 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
Jon Mason0dc243a2006-06-26 13:58:11 +0200772 return;
Andi Kleena32073b2006-06-26 13:56:40 +0200773 }
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775#ifndef CONFIG_AGP_AMD64
Ingo Molnar05fccb02008-01-30 13:30:12 +0100776 no_agp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777#else
778 /* Makefile puts PCI initialization via subsys_initcall first. */
779 /* Add other K8 AGP bridge drivers here */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100780 no_agp = no_agp ||
781 (agp_amd64_init() < 0) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 (agp_copy_info(agp_bridge, &info) < 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100783#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Jon Mason60b08c62006-02-26 04:18:22 +0100785 if (swiotlb)
Jon Mason0dc243a2006-06-26 13:58:11 +0200786 return;
Jon Mason60b08c62006-02-26 04:18:22 +0100787
Jon Mason8d4f6b92006-06-26 13:58:05 +0200788 /* Did we detect a different HW IOMMU? */
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200789 if (iommu_detected && !gart_iommu_aperture)
Jon Mason0dc243a2006-06-26 13:58:11 +0200790 return;
Jon Mason8d4f6b92006-06-26 13:58:05 +0200791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (no_iommu ||
Yinghai Luc987d122008-06-24 22:14:09 -0700793 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200794 !gart_iommu_aperture ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 (no_agp && init_k8_gatt(&info) < 0)) {
Yinghai Luc987d122008-06-24 22:14:09 -0700796 if (max_pfn > MAX_DMA32_PFN) {
Pavel Machek8f596102008-04-01 14:24:03 +0200797 printk(KERN_WARNING "More than 4GB of memory "
798 "but GART IOMMU not available.\n"
799 KERN_WARNING "falling back to iommu=soft.\n");
Jon Mason5b7b6442006-02-03 21:51:59 +0100800 }
Jon Mason0dc243a2006-06-26 13:58:11 +0200801 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
803
Jon Mason5b7b6442006-02-03 21:51:59 +0100804 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100805 aper_size = info.aper_size * 1024 * 1024;
806 iommu_size = check_iommu_size(info.aper_base, aper_size);
807 iommu_pages = iommu_size >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Ingo Molnar05fccb02008-01-30 13:30:12 +0100809 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
810 get_order(iommu_pages/8));
811 if (!iommu_gart_bitmap)
812 panic("Cannot allocate iommu bitmap\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 memset(iommu_gart_bitmap, 0, iommu_pages/8);
814
815#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100816 if (leak_trace) {
817 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 get_order(iommu_pages*sizeof(void *)));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100819 if (iommu_leak_tab)
820 memset(iommu_leak_tab, 0, iommu_pages * 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 else
Ingo Molnar05fccb02008-01-30 13:30:12 +0100822 printk(KERN_DEBUG
823 "PCI-DMA: Cannot allocate leak trace area\n");
824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825#endif
826
Ingo Molnar05fccb02008-01-30 13:30:12 +0100827 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 * Out of IOMMU space handling.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100829 * Reserve some invalid pages at the beginning of the GART.
830 */
831 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Ingo Molnar05fccb02008-01-30 13:30:12 +0100833 agp_memory_reserved = iommu_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 printk(KERN_INFO
835 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
Ingo Molnar05fccb02008-01-30 13:30:12 +0100836 iommu_size >> 20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Ingo Molnar05fccb02008-01-30 13:30:12 +0100838 iommu_start = aper_size - iommu_size;
839 iommu_bus_base = info.aper_base + iommu_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 bad_dma_address = iommu_bus_base;
841 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
842
Ingo Molnar05fccb02008-01-30 13:30:12 +0100843 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 * Unmap the IOMMU part of the GART. The alias of the page is
845 * always mapped with cache enabled and there is no full cache
846 * coherency across the GART remapping. The unmapping avoids
847 * automatic prefetches from the CPU allocating cache lines in
848 * there. All CPU accesses are done via the direct mapping to
849 * the backing memory. The GART address is only used by PCI
Ingo Molnar05fccb02008-01-30 13:30:12 +0100850 * devices.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 */
Andi Kleen28d6ee42008-02-04 16:48:08 +0100852 set_memory_np((unsigned long)__va(iommu_bus_base),
853 iommu_size >> PAGE_SHIFT);
Ingo Molnar184652e2008-02-14 23:30:20 +0100854 /*
855 * Tricky. The GART table remaps the physical memory range,
856 * so the CPU wont notice potential aliases and if the memory
857 * is remapped to UC later on, we might surprise the PCI devices
858 * with a stray writeout of a cacheline. So play it sure and
859 * do an explicit, full-scale wbinvd() _after_ having marked all
860 * the pages as Not-Present:
861 */
862 wbinvd();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Ingo Molnar05fccb02008-01-30 13:30:12 +0100864 /*
Pavel Machekfa3d3192008-06-26 00:25:43 +0200865 * Try to workaround a bug (thanks to BenH):
Ingo Molnar05fccb02008-01-30 13:30:12 +0100866 * Set unmapped entries to a scratch page instead of 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 * Any prefetches that hit unmapped entries won't get an bus abort
Pavel Machekfa3d3192008-06-26 00:25:43 +0200868 * then. (P2P bridge may be prefetching on DMA reads).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100870 scratch = get_zeroed_page(GFP_KERNEL);
871 if (!scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 panic("Cannot allocate iommu scratch page");
873 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100874 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 iommu_gatt_base[i] = gart_unmapped_entry;
876
Andi Kleena32073b2006-06-26 13:56:40 +0200877 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100878 dma_ops = &gart_dma_ops;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100879}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Sam Ravnborg43999d92007-03-16 21:07:36 +0100881void __init gart_parse_options(char *p)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100882{
883 int arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100886 if (!strncmp(p, "leak", 4)) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100887 leak_trace = 1;
888 p += 4;
889 if (*p == '=') ++p;
890 if (isdigit(*p) && get_option(&p, &arg))
891 iommu_leak_pages = arg;
892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100894 if (isdigit(*p) && get_option(&p, &arg))
895 iommu_size = arg;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100896 if (!strncmp(p, "fullflush", 8))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100897 iommu_fullflush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100898 if (!strncmp(p, "nofullflush", 11))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100899 iommu_fullflush = 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100900 if (!strncmp(p, "noagp", 5))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100901 no_agp = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100902 if (!strncmp(p, "noaperture", 10))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100903 fix_aperture = 0;
904 /* duplicated from pci-dma.c */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100905 if (!strncmp(p, "force", 5))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200906 gart_iommu_aperture_allowed = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100907 if (!strncmp(p, "allowed", 7))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200908 gart_iommu_aperture_allowed = 1;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100909 if (!strncmp(p, "memaper", 7)) {
910 fallback_aper_force = 1;
911 p += 7;
912 if (*p == '=') {
913 ++p;
914 if (get_option(&p, &arg))
915 fallback_aper_order = arg;
916 }
917 }
918}