blob: e49c6dd0e8c6356c96f603a61bb7e922c309f389 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support for AMD Hammer.
Ingo Molnar05fccb02008-01-30 13:30:12 +01003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
Ingo Molnar05fccb02008-01-30 13:30:12 +01006 * with more than 4GB.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
Ingo Molnar05fccb02008-01-30 13:30:12 +01009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Copyright 2002 Andi Kleen, SuSE Labs.
Andi Kleenff7f3642007-10-17 18:04:37 +020011 * Subject to the GNU General Public License v2 only.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070026#include <linux/kdebug.h>
Jens Axboe9ee1bea2007-10-04 09:35:37 +020027#include <linux/scatterlist.h>
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080028#include <linux/iommu-helper.h>
Pavel Machekcd763742008-05-29 00:30:21 -070029#include <linux/sysdev.h>
Joerg Roedel237a6222008-09-25 12:13:53 +020030#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/mtrr.h>
33#include <asm/pgtable.h>
34#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090035#include <asm/iommu.h>
Joerg Roedel395624f2007-10-24 12:49:47 +020036#include <asm/gart.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/cacheflush.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010038#include <asm/swiotlb.h>
39#include <asm/dma.h>
Andi Kleena32073b2006-06-26 13:56:40 +020040#include <asm/k8.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Joerg Roedel79da0872007-10-24 12:49:49 +020042static unsigned long iommu_bus_base; /* GART remapping area (physical) */
Ingo Molnar05fccb02008-01-30 13:30:12 +010043static unsigned long iommu_size; /* size of remapping area bytes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static unsigned long iommu_pages; /* .. and in pages */
45
Ingo Molnar05fccb02008-01-30 13:30:12 +010046static u32 *iommu_gatt_base; /* Remapping table */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Ingo Molnar05fccb02008-01-30 13:30:12 +010048/*
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
54 */
Jaswinder Singh Rajputc854c912008-12-29 20:38:09 +053055static int iommu_fullflush = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Ingo Molnar05fccb02008-01-30 13:30:12 +010057/* Allocation bitmap for the remapping area: */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static DEFINE_SPINLOCK(iommu_bitmap_lock);
Ingo Molnar05fccb02008-01-30 13:30:12 +010059/* Guarded by iommu_bitmap_lock: */
60static unsigned long *iommu_gart_bitmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Ingo Molnar05fccb02008-01-30 13:30:12 +010062static u32 gart_unmapped_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#define GPTE_VALID 1
65#define GPTE_COHERENT 2
66#define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69
Ingo Molnar05fccb02008-01-30 13:30:12 +010070#define EMERGENCY_PAGES 32 /* = 128KB */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#ifdef CONFIG_AGP
73#define AGPEXTERN extern
74#else
75#define AGPEXTERN
76#endif
77
78/* backdoor interface to AGP driver */
79AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table;
81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
Joerg Roedel3610f212008-09-25 12:13:54 +020083static bool need_flush; /* global flush state. set for each gart wrap */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +090085static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
Ingo Molnar05fccb02008-01-30 13:30:12 +010087{
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 unsigned long offset, flags;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080089 unsigned long boundary_size;
90 unsigned long base_index;
91
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT;
Prarit Bhargava05d3ed02008-07-21 10:15:22 -040094 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080095 PAGE_SIZE) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Ingo Molnar05fccb02008-01-30 13:30:12 +010097 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080098 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +090099 size, base_index, boundary_size, align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 if (offset == -1) {
Joerg Roedel3610f212008-09-25 12:13:54 +0200101 need_flush = true;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900103 size, base_index, boundary_size,
104 align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100106 if (offset != -1) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100107 next_bit = offset+size;
108 if (next_bit >= iommu_pages) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 next_bit = 0;
Joerg Roedel3610f212008-09-25 12:13:54 +0200110 need_flush = true;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100111 }
112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 if (iommu_fullflush)
Joerg Roedel3610f212008-09-25 12:13:54 +0200114 need_flush = true;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 return offset;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100118}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120static void free_iommu(unsigned long offset, int size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100121{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800125 iommu_area_free(iommu_gart_bitmap, offset, size);
Joerg Roedel70d7d352008-12-02 20:16:03 +0100126 if (offset >= next_bit)
127 next_bit = offset + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Ingo Molnar05fccb02008-01-30 13:30:12 +0100131/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 * Use global flush state to avoid races with multiple flushers.
133 */
Andi Kleena32073b2006-06-26 13:56:40 +0200134static void flush_gart(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100135{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 spin_lock_irqsave(&iommu_bitmap_lock, flags);
Andi Kleena32073b2006-06-26 13:56:40 +0200139 if (need_flush) {
140 k8_flush_garts();
Joerg Roedel3610f212008-09-25 12:13:54 +0200141 need_flush = false;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100144}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#ifdef CONFIG_IOMMU_LEAK
147
Ingo Molnar05fccb02008-01-30 13:30:12 +0100148#define SET_LEAK(x) \
149 do { \
150 if (iommu_leak_tab) \
151 iommu_leak_tab[x] = __builtin_return_address(0);\
152 } while (0)
153
154#define CLEAR_LEAK(x) \
155 do { \
156 if (iommu_leak_tab) \
157 iommu_leak_tab[x] = NULL; \
158 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160/* Debugging aid for drivers that don't free their IOMMU tables */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100161static void **iommu_leak_tab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162static int leak_trace;
Joerg Roedel79da0872007-10-24 12:49:49 +0200163static int iommu_leak_pages = 20;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100164
Joerg Roedel79da0872007-10-24 12:49:49 +0200165static void dump_leak(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
167 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100168 static int dump;
169
170 if (dump || !iommu_leak_tab)
171 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 dump = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100173 show_stack(NULL, NULL);
174
175 /* Very crude. dump some from the end of the table too */
176 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
177 iommu_leak_pages);
178 for (i = 0; i < iommu_leak_pages; i += 2) {
179 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
Joerg Roedel237a6222008-09-25 12:13:53 +0200180 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
181 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100182 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
183 }
184 printk(KERN_DEBUG "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186#else
Ingo Molnar05fccb02008-01-30 13:30:12 +0100187# define SET_LEAK(x)
188# define CLEAR_LEAK(x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#endif
190
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100191static void iommu_full(struct device *dev, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Ingo Molnar05fccb02008-01-30 13:30:12 +0100193 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * Ran out of IOMMU space for this operation. This is very bad.
195 * Unfortunately the drivers cannot handle this operation properly.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100196 * Return some non mapped prereserved space in the aperture and
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 * let the Northbridge deal with it. This will result in garbage
198 * in the IO operation. When the size exceeds the prereserved space
Ingo Molnar05fccb02008-01-30 13:30:12 +0100199 * memory corruption will occur or random memory will be DMAed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * out. Hopefully no network devices use single mappings that big.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100201 */
202
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200203 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100205 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
207 panic("PCI-DMA: Memory would be corrupted\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100208 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
209 panic(KERN_ERR
210 "PCI-DMA: Random memory would be DMAed\n");
211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100213 dump_leak();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Ingo Molnar05fccb02008-01-30 13:30:12 +0100217static inline int
218need_iommu(struct device *dev, unsigned long addr, size_t size)
219{
FUJITA Tomonoriac4ff652008-09-10 01:06:47 +0900220 return force_iommu ||
221 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100222}
223
224static inline int
225nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
226{
FUJITA Tomonoriac4ff652008-09-10 01:06:47 +0900227 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228}
229
230/* Map a single continuous physical area into the IOMMU.
231 * Caller needs to check if the iommu is needed and flush.
232 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100233static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900234 size_t size, int dir, unsigned long align_mask)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100235{
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700236 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900237 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 if (iommu_page == -1) {
241 if (!nonforced_iommu(dev, phys_mem, size))
Ingo Molnar05fccb02008-01-30 13:30:12 +0100242 return phys_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 if (panic_on_overflow)
244 panic("dma_map_area overflow %lu bytes\n", size);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100245 iommu_full(dev, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return bad_dma_address;
247 }
248
249 for (i = 0; i < npages; i++) {
250 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
251 SET_LEAK(iommu_page + i);
252 phys_mem += PAGE_SIZE;
253 }
254 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
255}
256
257/* Map a single area into the IOMMU */
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900258static dma_addr_t gart_map_page(struct device *dev, struct page *page,
259 unsigned long offset, size_t size,
260 enum dma_data_direction dir,
261 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
Ingo Molnar2be62142008-04-19 19:19:56 +0200263 unsigned long bus;
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900264 phys_addr_t paddr = page_to_phys(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 if (!dev)
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200267 dev = &x86_dma_fallback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Ingo Molnar2be62142008-04-19 19:19:56 +0200269 if (!need_iommu(dev, paddr, size))
270 return paddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900272 bus = dma_map_area(dev, paddr, size, dir, 0);
273 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100274
275 return bus;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100276}
277
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900278static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr,
279 size_t size, int dir)
280{
281 return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
282 paddr & ~PAGE_MASK, size, dir, NULL);
283}
284
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100285/*
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200286 * Free a DMA mapping.
287 */
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900288static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
289 size_t size, enum dma_data_direction dir,
290 struct dma_attrs *attrs)
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200291{
292 unsigned long iommu_page;
293 int npages;
294 int i;
295
296 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
297 dma_addr >= iommu_bus_base + iommu_size)
298 return;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100299
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200300 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700301 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200302 for (i = 0; i < npages; i++) {
303 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
304 CLEAR_LEAK(iommu_page + i);
305 }
306 free_iommu(iommu_page, npages);
307}
308
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900309static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
310 size_t size, int direction)
311{
312 gart_unmap_page(dev, dma_addr, size, direction, NULL);
313}
314
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200315/*
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100316 * Wrapper for pci_unmap_single working with scatterlists.
317 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100318static void
319gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100320{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200321 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100322 int i;
323
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200324 for_each_sg(sg, s, nents, i) {
Jon Mason60b08c62006-02-26 04:18:22 +0100325 if (!s->dma_length || !s->length)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100326 break;
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200327 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100328 }
329}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331/* Fallback for dma_map_sg in case of overflow */
332static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
333 int nents, int dir)
334{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200335 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 int i;
337
338#ifdef CONFIG_IOMMU_DEBUG
339 printk(KERN_DEBUG "dma_map_sg overflow\n");
340#endif
341
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200342 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200343 unsigned long addr = sg_phys(s);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100344
345 if (nonforced_iommu(dev, addr, s->length)) {
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900346 addr = dma_map_area(dev, addr, s->length, dir, 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100347 if (addr == bad_dma_address) {
348 if (i > 0)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100349 gart_unmap_sg(dev, sg, i, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100350 nents = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 sg[0].dma_length = 0;
352 break;
353 }
354 }
355 s->dma_address = addr;
356 s->dma_length = s->length;
357 }
Andi Kleena32073b2006-06-26 13:56:40 +0200358 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return nents;
361}
362
363/* Map multiple scatterlist entries continuous into the first. */
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800364static int __dma_map_cont(struct device *dev, struct scatterlist *start,
365 int nelems, struct scatterlist *sout,
366 unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900368 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100369 unsigned long iommu_page = iommu_start;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200370 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 int i;
372
373 if (iommu_start == -1)
374 return -1;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200375
376 for_each_sg(start, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 unsigned long pages, addr;
378 unsigned long phys_addr = s->dma_address;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100379
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200380 BUG_ON(s != start && s->offset);
381 if (s == start) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 sout->dma_address = iommu_bus_base;
383 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
384 sout->dma_length = s->length;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100385 } else {
386 sout->dma_length += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
388
389 addr = phys_addr;
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700390 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100391 while (pages--) {
392 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 SET_LEAK(iommu_page);
394 addr += PAGE_SIZE;
395 iommu_page++;
Andi Kleen0d5410642006-02-12 14:34:59 -0800396 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100397 }
398 BUG_ON(iommu_page - iommu_start != pages);
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 return 0;
401}
402
Ingo Molnar05fccb02008-01-30 13:30:12 +0100403static inline int
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800404dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
405 struct scatterlist *sout, unsigned long pages, int need)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200407 if (!need) {
408 BUG_ON(nelems != 1);
FUJITA Tomonorie88a39d2007-10-25 09:13:32 +0200409 sout->dma_address = start->dma_address;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200410 sout->dma_length = start->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200412 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800413 return __dma_map_cont(dev, start, nelems, sout, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
Ingo Molnar05fccb02008-01-30 13:30:12 +0100415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416/*
417 * DMA map all entries in a scatterlist.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100418 * Merge chunks that have page aligned sizes into a continuous mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100420static int
421gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200423 struct scatterlist *s, *ps, *start_sg, *sgmap;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100424 int need = 0, nextneed, i, out, start;
425 unsigned long pages = 0;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800426 unsigned int seg_size;
427 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Ingo Molnar05fccb02008-01-30 13:30:12 +0100429 if (nents == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 return 0;
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 if (!dev)
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200433 dev = &x86_dma_fallback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 out = 0;
436 start = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200437 start_sg = sgmap = sg;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800438 seg_size = 0;
439 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200440 ps = NULL; /* shut up gcc */
441 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200442 dma_addr_t addr = sg_phys(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Ingo Molnar05fccb02008-01-30 13:30:12 +0100444 s->dma_address = addr;
445 BUG_ON(s->length == 0);
446
447 nextneed = need_iommu(dev, addr, s->length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 /* Handle the previous not yet processed entries */
450 if (i > start) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100451 /*
452 * Can only merge when the last chunk ends on a
453 * page boundary and the new one doesn't have an
454 * offset.
455 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 if (!iommu_merge || !nextneed || !need || s->offset ||
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800457 (s->length + seg_size > max_seg_size) ||
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200458 (ps->offset + ps->length) % PAGE_SIZE) {
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800459 if (dma_map_cont(dev, start_sg, i - start,
460 sgmap, pages, need) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 goto error;
462 out++;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800463 seg_size = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200464 sgmap = sg_next(sgmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 pages = 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200466 start = i;
467 start_sg = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 }
469 }
470
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800471 seg_size += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 need = nextneed;
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700473 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200474 ps = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800476 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 goto error;
478 out++;
Andi Kleena32073b2006-06-26 13:56:40 +0200479 flush_gart();
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200480 if (out < nents) {
481 sgmap = sg_next(sgmap);
482 sgmap->dma_length = 0;
483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 return out;
485
486error:
Andi Kleena32073b2006-06-26 13:56:40 +0200487 flush_gart();
FUJITA Tomonori53369402007-10-26 13:56:24 +0200488 gart_unmap_sg(dev, sg, out, dir);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100489
Kevin VanMarena1002a42006-02-03 21:51:32 +0100490 /* When it was forced or merged try again in a dumb way */
491 if (force_iommu || iommu_merge) {
492 out = dma_map_sg_nonforce(dev, sg, nents, dir);
493 if (out > 0)
494 return out;
495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 if (panic_on_overflow)
497 panic("dma_map_sg: overflow on %lu pages\n", pages);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100498
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100499 iommu_full(dev, pages << PAGE_SHIFT, dir);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200500 for_each_sg(sg, s, nents, i)
501 s->dma_address = bad_dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 return 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100503}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Joerg Roedel94581092008-08-19 16:32:39 +0200505/* allocate and map a coherent mapping */
506static void *
507gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
508 gfp_t flag)
509{
FUJITA Tomonorif6a32a32008-09-11 23:08:48 +0900510 dma_addr_t paddr;
FUJITA Tomonori421076e2008-08-22 16:29:10 +0900511 unsigned long align_mask;
FUJITA Tomonori1d990882008-09-24 20:48:37 +0900512 struct page *page;
Joerg Roedel94581092008-08-19 16:32:39 +0200513
FUJITA Tomonori1d990882008-09-24 20:48:37 +0900514 if (force_iommu && !(flag & GFP_DMA)) {
515 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
516 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
517 if (!page)
518 return NULL;
Joerg Roedel94581092008-08-19 16:32:39 +0200519
FUJITA Tomonori1d990882008-09-24 20:48:37 +0900520 align_mask = (1UL << get_order(size)) - 1;
521 paddr = dma_map_area(dev, page_to_phys(page), size,
522 DMA_BIDIRECTIONAL, align_mask);
FUJITA Tomonorif6a32a32008-09-11 23:08:48 +0900523
FUJITA Tomonori1d990882008-09-24 20:48:37 +0900524 flush_gart();
525 if (paddr != bad_dma_address) {
526 *dma_addr = paddr;
527 return page_address(page);
528 }
529 __free_pages(page, get_order(size));
530 } else
531 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
Joerg Roedel94581092008-08-19 16:32:39 +0200532
533 return NULL;
534}
535
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200536/* free a coherent mapping */
537static void
538gart_free_coherent(struct device *dev, size_t size, void *vaddr,
539 dma_addr_t dma_addr)
540{
541 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
542 free_pages((unsigned long)vaddr, get_order(size));
543}
544
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100545static int no_agp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100548{
549 unsigned long a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Ingo Molnar05fccb02008-01-30 13:30:12 +0100551 if (!iommu_size) {
552 iommu_size = aper_size;
553 if (!no_agp)
554 iommu_size /= 2;
555 }
556
557 a = aper + iommu_size;
Andi Kleen31422c52008-02-04 16:48:08 +0100558 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Ingo Molnar05fccb02008-01-30 13:30:12 +0100560 if (iommu_size < 64*1024*1024) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 printk(KERN_WARNING
Ingo Molnar05fccb02008-01-30 13:30:12 +0100562 "PCI-DMA: Warning: Small IOMMU %luMB."
563 " Consider increasing the AGP aperture in BIOS\n",
564 iommu_size >> 20);
565 }
566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 return iommu_size;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100568}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Ingo Molnar05fccb02008-01-30 13:30:12 +0100570static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
571{
572 unsigned aper_size = 0, aper_base_32, aper_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 u64 aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200575 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
576 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100577 aper_order = (aper_order >> 1) & 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Ingo Molnar05fccb02008-01-30 13:30:12 +0100579 aper_base = aper_base_32 & 0x7fff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 aper_base <<= 25;
581
Ingo Molnar05fccb02008-01-30 13:30:12 +0100582 aper_size = (32 * 1024 * 1024) << aper_order;
583 if (aper_base + aper_size > 0x100000000UL || !aper_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 aper_base = 0;
585
586 *size = aper_size;
587 return aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100588}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200590static void enable_gart_translations(void)
591{
592 int i;
593
594 for (i = 0; i < num_k8_northbridges; i++) {
595 struct pci_dev *dev = k8_northbridges[i];
596
597 enable_gart_translation(dev, __pa(agp_gatt_table));
598 }
599}
600
601/*
602 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
603 * resume in the same way as they are handled in gart_iommu_hole_init().
604 */
605static bool fix_up_north_bridges;
606static u32 aperture_order;
607static u32 aperture_alloc;
608
609void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
610{
611 fix_up_north_bridges = true;
612 aperture_order = aper_order;
613 aperture_alloc = aper_alloc;
614}
615
Pavel Machekcd763742008-05-29 00:30:21 -0700616static int gart_resume(struct sys_device *dev)
617{
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200618 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
619
620 if (fix_up_north_bridges) {
621 int i;
622
623 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
624
625 for (i = 0; i < num_k8_northbridges; i++) {
626 struct pci_dev *dev = k8_northbridges[i];
627
628 /*
629 * Don't enable translations just yet. That is the next
630 * step. Restore the pre-suspend aperture settings.
631 */
632 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
633 aperture_order << 1);
634 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
635 aperture_alloc >> 25);
636 }
637 }
638
639 enable_gart_translations();
640
Pavel Machekcd763742008-05-29 00:30:21 -0700641 return 0;
642}
643
644static int gart_suspend(struct sys_device *dev, pm_message_t state)
645{
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200646 return 0;
Pavel Machekcd763742008-05-29 00:30:21 -0700647}
648
649static struct sysdev_class gart_sysdev_class = {
650 .name = "gart",
651 .suspend = gart_suspend,
652 .resume = gart_resume,
653
654};
655
656static struct sys_device device_gart = {
657 .id = 0,
658 .cls = &gart_sysdev_class,
659};
660
Ingo Molnar05fccb02008-01-30 13:30:12 +0100661/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 * Private Northbridge GATT initialization in case we cannot use the
Ingo Molnar05fccb02008-01-30 13:30:12 +0100663 * AGP driver for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 */
665static __init int init_k8_gatt(struct agp_kern_info *info)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100666{
667 unsigned aper_size, gatt_size, new_aper_size;
668 unsigned aper_base, new_aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 struct pci_dev *dev;
670 void *gatt;
Pavel Machekcd763742008-05-29 00:30:21 -0700671 int i, error;
Andi Kleena32073b2006-06-26 13:56:40 +0200672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
674 aper_size = aper_base = info->aper_size = 0;
Andi Kleena32073b2006-06-26 13:56:40 +0200675 dev = NULL;
676 for (i = 0; i < num_k8_northbridges; i++) {
677 dev = k8_northbridges[i];
Ingo Molnar05fccb02008-01-30 13:30:12 +0100678 new_aper_base = read_aperture(dev, &new_aper_size);
679 if (!new_aper_base)
680 goto nommu;
681
682 if (!aper_base) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 aper_size = new_aper_size;
684 aper_base = new_aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100685 }
686 if (aper_size != new_aper_size || aper_base != new_aper_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 goto nommu;
688 }
689 if (!aper_base)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100690 goto nommu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 info->aper_base = aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100692 info->aper_size = aper_size >> 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Ingo Molnar05fccb02008-01-30 13:30:12 +0100694 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
Joerg Roedel01142672008-09-25 12:42:12 +0200695 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
696 get_order(gatt_size));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100697 if (!gatt)
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200698 panic("Cannot allocate GATT table");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100699 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200700 panic("Could not set GART PTEs to uncacheable pages");
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200701
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 agp_gatt_table = gatt;
Andi Kleena32073b2006-06-26 13:56:40 +0200703
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200704 enable_gart_translations();
Pavel Machekcd763742008-05-29 00:30:21 -0700705
706 error = sysdev_class_register(&gart_sysdev_class);
707 if (!error)
708 error = sysdev_register(&device_gart);
709 if (error)
Joerg Roedel237a6222008-09-25 12:13:53 +0200710 panic("Could not register gart_sysdev -- "
711 "would corrupt data on next suspend");
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200712
Andi Kleena32073b2006-06-26 13:56:40 +0200713 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100714
715 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
716 aper_base, aper_size>>10);
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 return 0;
719
720 nommu:
Ingo Molnar05fccb02008-01-30 13:30:12 +0100721 /* Should not happen anymore */
Pavel Machek8f596102008-04-01 14:24:03 +0200722 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
723 KERN_WARNING "falling back to iommu=soft.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100724 return -1;
725}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700727static struct dma_mapping_ops gart_dma_ops = {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100728 .map_single = gart_map_single,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100729 .unmap_single = gart_unmap_single,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100730 .map_sg = gart_map_sg,
731 .unmap_sg = gart_unmap_sg,
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900732 .map_page = gart_map_page,
733 .unmap_page = gart_unmap_page,
Joerg Roedel94581092008-08-19 16:32:39 +0200734 .alloc_coherent = gart_alloc_coherent,
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200735 .free_coherent = gart_free_coherent,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100736};
737
Yinghai Lubc2cea62007-07-21 17:11:28 +0200738void gart_iommu_shutdown(void)
739{
740 struct pci_dev *dev;
741 int i;
742
743 if (no_agp && (dma_ops != &gart_dma_ops))
744 return;
745
Ingo Molnar05fccb02008-01-30 13:30:12 +0100746 for (i = 0; i < num_k8_northbridges; i++) {
747 u32 ctl;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200748
Ingo Molnar05fccb02008-01-30 13:30:12 +0100749 dev = k8_northbridges[i];
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200750 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
Yinghai Lubc2cea62007-07-21 17:11:28 +0200751
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200752 ctl &= ~GARTEN;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200753
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200754 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100755 }
Yinghai Lubc2cea62007-07-21 17:11:28 +0200756}
757
Jon Mason0dc243a2006-06-26 13:58:11 +0200758void __init gart_iommu_init(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100759{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 struct agp_kern_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 unsigned long iommu_start;
Yinghai Lud99e9012008-10-04 15:55:12 -0700762 unsigned long aper_base, aper_size;
763 unsigned long start_pfn, end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 unsigned long scratch;
765 long i;
766
Bjorn Helgaas55aab5f2008-12-17 12:52:34 -0700767 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
Jon Mason0dc243a2006-06-26 13:58:11 +0200768 return;
Andi Kleena32073b2006-06-26 13:56:40 +0200769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770#ifndef CONFIG_AGP_AMD64
Ingo Molnar05fccb02008-01-30 13:30:12 +0100771 no_agp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772#else
773 /* Makefile puts PCI initialization via subsys_initcall first. */
774 /* Add other K8 AGP bridge drivers here */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100775 no_agp = no_agp ||
776 (agp_amd64_init() < 0) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 (agp_copy_info(agp_bridge, &info) < 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100778#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
Jon Mason60b08c62006-02-26 04:18:22 +0100780 if (swiotlb)
Jon Mason0dc243a2006-06-26 13:58:11 +0200781 return;
Jon Mason60b08c62006-02-26 04:18:22 +0100782
Jon Mason8d4f6b92006-06-26 13:58:05 +0200783 /* Did we detect a different HW IOMMU? */
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200784 if (iommu_detected && !gart_iommu_aperture)
Jon Mason0dc243a2006-06-26 13:58:11 +0200785 return;
Jon Mason8d4f6b92006-06-26 13:58:05 +0200786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (no_iommu ||
Yinghai Luc987d122008-06-24 22:14:09 -0700788 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200789 !gart_iommu_aperture ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 (no_agp && init_k8_gatt(&info) < 0)) {
Yinghai Luc987d122008-06-24 22:14:09 -0700791 if (max_pfn > MAX_DMA32_PFN) {
Pavel Machek8f596102008-04-01 14:24:03 +0200792 printk(KERN_WARNING "More than 4GB of memory "
Joerg Roedel237a6222008-09-25 12:13:53 +0200793 "but GART IOMMU not available.\n");
794 printk(KERN_WARNING "falling back to iommu=soft.\n");
Jon Mason5b7b6442006-02-03 21:51:59 +0100795 }
Jon Mason0dc243a2006-06-26 13:58:11 +0200796 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 }
798
Yinghai Lud99e9012008-10-04 15:55:12 -0700799 /* need to map that range */
800 aper_size = info.aper_size << 20;
801 aper_base = info.aper_base;
802 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
803 if (end_pfn > max_low_pfn_mapped) {
804 start_pfn = (aper_base>>PAGE_SHIFT);
805 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
806 }
807
Jon Mason5b7b6442006-02-03 21:51:59 +0100808 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100809 iommu_size = check_iommu_size(info.aper_base, aper_size);
810 iommu_pages = iommu_size >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Joerg Roedel01142672008-09-25 12:42:12 +0200812 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100813 get_order(iommu_pages/8));
814 if (!iommu_gart_bitmap)
815 panic("Cannot allocate iommu bitmap\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100818 if (leak_trace) {
Joerg Roedel01142672008-09-25 12:42:12 +0200819 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 get_order(iommu_pages*sizeof(void *)));
Joerg Roedel01142672008-09-25 12:42:12 +0200821 if (!iommu_leak_tab)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100822 printk(KERN_DEBUG
823 "PCI-DMA: Cannot allocate leak trace area\n");
824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825#endif
826
Ingo Molnar05fccb02008-01-30 13:30:12 +0100827 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 * Out of IOMMU space handling.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100829 * Reserve some invalid pages at the beginning of the GART.
830 */
FUJITA Tomonorid26dbc52008-09-22 22:35:07 +0900831 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Ingo Molnar05fccb02008-01-30 13:30:12 +0100833 agp_memory_reserved = iommu_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 printk(KERN_INFO
835 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
Ingo Molnar05fccb02008-01-30 13:30:12 +0100836 iommu_size >> 20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Ingo Molnar05fccb02008-01-30 13:30:12 +0100838 iommu_start = aper_size - iommu_size;
839 iommu_bus_base = info.aper_base + iommu_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 bad_dma_address = iommu_bus_base;
841 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
842
Ingo Molnar05fccb02008-01-30 13:30:12 +0100843 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 * Unmap the IOMMU part of the GART. The alias of the page is
845 * always mapped with cache enabled and there is no full cache
846 * coherency across the GART remapping. The unmapping avoids
847 * automatic prefetches from the CPU allocating cache lines in
848 * there. All CPU accesses are done via the direct mapping to
849 * the backing memory. The GART address is only used by PCI
Ingo Molnar05fccb02008-01-30 13:30:12 +0100850 * devices.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 */
Andi Kleen28d6ee42008-02-04 16:48:08 +0100852 set_memory_np((unsigned long)__va(iommu_bus_base),
853 iommu_size >> PAGE_SHIFT);
Ingo Molnar184652e2008-02-14 23:30:20 +0100854 /*
855 * Tricky. The GART table remaps the physical memory range,
856 * so the CPU wont notice potential aliases and if the memory
857 * is remapped to UC later on, we might surprise the PCI devices
858 * with a stray writeout of a cacheline. So play it sure and
859 * do an explicit, full-scale wbinvd() _after_ having marked all
860 * the pages as Not-Present:
861 */
862 wbinvd();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Ingo Molnar05fccb02008-01-30 13:30:12 +0100864 /*
Pavel Machekfa3d3192008-06-26 00:25:43 +0200865 * Try to workaround a bug (thanks to BenH):
Ingo Molnar05fccb02008-01-30 13:30:12 +0100866 * Set unmapped entries to a scratch page instead of 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 * Any prefetches that hit unmapped entries won't get an bus abort
Pavel Machekfa3d3192008-06-26 00:25:43 +0200868 * then. (P2P bridge may be prefetching on DMA reads).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100870 scratch = get_zeroed_page(GFP_KERNEL);
871 if (!scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 panic("Cannot allocate iommu scratch page");
873 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100874 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 iommu_gatt_base[i] = gart_unmapped_entry;
876
Andi Kleena32073b2006-06-26 13:56:40 +0200877 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100878 dma_ops = &gart_dma_ops;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100879}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Sam Ravnborg43999d92007-03-16 21:07:36 +0100881void __init gart_parse_options(char *p)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100882{
883 int arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100886 if (!strncmp(p, "leak", 4)) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100887 leak_trace = 1;
888 p += 4;
Joerg Roedel237a6222008-09-25 12:13:53 +0200889 if (*p == '=')
890 ++p;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100891 if (isdigit(*p) && get_option(&p, &arg))
892 iommu_leak_pages = arg;
893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894#endif
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100895 if (isdigit(*p) && get_option(&p, &arg))
896 iommu_size = arg;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100897 if (!strncmp(p, "fullflush", 8))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100898 iommu_fullflush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100899 if (!strncmp(p, "nofullflush", 11))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100900 iommu_fullflush = 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100901 if (!strncmp(p, "noagp", 5))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100902 no_agp = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100903 if (!strncmp(p, "noaperture", 10))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100904 fix_aperture = 0;
905 /* duplicated from pci-dma.c */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100906 if (!strncmp(p, "force", 5))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200907 gart_iommu_aperture_allowed = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100908 if (!strncmp(p, "allowed", 7))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200909 gart_iommu_aperture_allowed = 1;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100910 if (!strncmp(p, "memaper", 7)) {
911 fallback_aper_force = 1;
912 p += 7;
913 if (*p == '=') {
914 ++p;
915 if (get_option(&p, &arg))
916 fallback_aper_order = arg;
917 }
918 }
919}