blob: c9272a1fb691ddbdf270be449680f1c465d2f3ad [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100040#include <linux/dmi.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070041#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090042#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070043#include "pci.h"
44
Fenghua Yu5b6985c2008-10-16 18:02:32 -070045#define ROOT_SIZE VTD_PAGE_SIZE
46#define CONTEXT_SIZE VTD_PAGE_SIZE
47
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50
51#define IOAPIC_RANGE_START (0xfee00000)
52#define IOAPIC_RANGE_END (0xfeefffff)
53#define IOVA_START_ADDR (0x1000)
54
55#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070057#define MAX_AGAW_WIDTH 64
58
David Woodhouse2ebe3152009-09-19 07:34:04 -070059#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
61
62/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
63 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
64#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
65 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
66#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070067
Mark McLoughlinf27be032008-11-20 15:49:43 +000068#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070069#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070070#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080071
David Woodhousefd18de52009-05-10 23:57:41 +010072
David Woodhousedd4e8312009-06-27 16:21:20 +010073/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
74 are never going to work. */
75static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
76{
77 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
78}
79
80static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
81{
82 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
83}
84static inline unsigned long page_to_dma_pfn(struct page *pg)
85{
86 return mm_to_dma_pfn(page_to_pfn(pg));
87}
88static inline unsigned long virt_to_dma_pfn(void *p)
89{
90 return page_to_dma_pfn(virt_to_page(p));
91}
92
Weidong Hand9630fe2008-12-08 11:06:32 +080093/* global iommu list, set NULL for ignored DMAR units */
94static struct intel_iommu **g_iommus;
95
David Woodhouse9af88142009-02-13 23:18:03 +000096static int rwbf_quirk;
97
Mark McLoughlin46b08e12008-11-20 15:49:44 +000098/*
99 * 0: Present
100 * 1-11: Reserved
101 * 12-63: Context Ptr (12 - (haw-1))
102 * 64-127: Reserved
103 */
104struct root_entry {
105 u64 val;
106 u64 rsvd1;
107};
108#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
109static inline bool root_present(struct root_entry *root)
110{
111 return (root->val & 1);
112}
113static inline void set_root_present(struct root_entry *root)
114{
115 root->val |= 1;
116}
117static inline void set_root_value(struct root_entry *root, unsigned long value)
118{
119 root->val |= value & VTD_PAGE_MASK;
120}
121
122static inline struct context_entry *
123get_context_addr_from_root(struct root_entry *root)
124{
125 return (struct context_entry *)
126 (root_present(root)?phys_to_virt(
127 root->val & VTD_PAGE_MASK) :
128 NULL);
129}
130
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000131/*
132 * low 64 bits:
133 * 0: present
134 * 1: fault processing disable
135 * 2-3: translation type
136 * 12-63: address space root
137 * high 64 bits:
138 * 0-2: address width
139 * 3-6: aval
140 * 8-23: domain id
141 */
142struct context_entry {
143 u64 lo;
144 u64 hi;
145};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000146
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000147static inline bool context_present(struct context_entry *context)
148{
149 return (context->lo & 1);
150}
151static inline void context_set_present(struct context_entry *context)
152{
153 context->lo |= 1;
154}
155
156static inline void context_set_fault_enable(struct context_entry *context)
157{
158 context->lo &= (((u64)-1) << 2) | 1;
159}
160
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000161static inline void context_set_translation_type(struct context_entry *context,
162 unsigned long value)
163{
164 context->lo &= (((u64)-1) << 4) | 3;
165 context->lo |= (value & 3) << 2;
166}
167
168static inline void context_set_address_root(struct context_entry *context,
169 unsigned long value)
170{
171 context->lo |= value & VTD_PAGE_MASK;
172}
173
174static inline void context_set_address_width(struct context_entry *context,
175 unsigned long value)
176{
177 context->hi |= value & 7;
178}
179
180static inline void context_set_domain_id(struct context_entry *context,
181 unsigned long value)
182{
183 context->hi |= (value & ((1 << 16) - 1)) << 8;
184}
185
186static inline void context_clear_entry(struct context_entry *context)
187{
188 context->lo = 0;
189 context->hi = 0;
190}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000191
Mark McLoughlin622ba122008-11-20 15:49:46 +0000192/*
193 * 0: readable
194 * 1: writable
195 * 2-6: reserved
196 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800197 * 8-10: available
198 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000199 * 12-63: Host physcial address
200 */
201struct dma_pte {
202 u64 val;
203};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000204
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000205static inline void dma_clear_pte(struct dma_pte *pte)
206{
207 pte->val = 0;
208}
209
210static inline void dma_set_pte_readable(struct dma_pte *pte)
211{
212 pte->val |= DMA_PTE_READ;
213}
214
215static inline void dma_set_pte_writable(struct dma_pte *pte)
216{
217 pte->val |= DMA_PTE_WRITE;
218}
219
Sheng Yang9cf06692009-03-18 15:33:07 +0800220static inline void dma_set_pte_snp(struct dma_pte *pte)
221{
222 pte->val |= DMA_PTE_SNP;
223}
224
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000225static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
226{
227 pte->val = (pte->val & ~3) | (prot & 3);
228}
229
230static inline u64 dma_pte_addr(struct dma_pte *pte)
231{
David Woodhousec85994e2009-07-01 19:21:24 +0100232#ifdef CONFIG_64BIT
233 return pte->val & VTD_PAGE_MASK;
234#else
235 /* Must have a full atomic 64-bit read */
236 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
237#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000238}
239
David Woodhousedd4e8312009-06-27 16:21:20 +0100240static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000241{
David Woodhousedd4e8312009-06-27 16:21:20 +0100242 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000243}
244
245static inline bool dma_pte_present(struct dma_pte *pte)
246{
247 return (pte->val & 3) != 0;
248}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000249
David Woodhouse75e6bf92009-07-02 11:21:16 +0100250static inline int first_pte_in_page(struct dma_pte *pte)
251{
252 return !((unsigned long)pte & ~VTD_PAGE_MASK);
253}
254
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700255/*
256 * This domain is a statically identity mapping domain.
257 * 1. This domain creats a static 1:1 mapping to all usable memory.
258 * 2. It maps to each iommu if successful.
259 * 3. Each iommu mapps to this domain if successful.
260 */
David Woodhouse19943b02009-08-04 16:19:20 +0100261static struct dmar_domain *si_domain;
262static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700263
Weidong Han3b5410e2008-12-08 09:17:15 +0800264/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100265#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800266
Weidong Han1ce28fe2008-12-08 16:35:39 +0800267/* domain represents a virtual machine, more than one devices
268 * across iommus may be owned in one domain, e.g. kvm guest.
269 */
270#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
271
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700272/* si_domain contains mulitple devices */
273#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
274
Mark McLoughlin99126f72008-11-20 15:49:47 +0000275struct dmar_domain {
276 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800277 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000278
279 struct list_head devices; /* all devices' list */
280 struct iova_domain iovad; /* iova's that belong to this domain */
281
282 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000283 int gaw; /* max guest address width */
284
285 /* adjusted guest address width, 0 is level 2 30-bit */
286 int agaw;
287
Weidong Han3b5410e2008-12-08 09:17:15 +0800288 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800289
290 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800291 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800292 int iommu_count; /* reference count of iommu */
293 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800294 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000295};
296
Mark McLoughlina647dac2008-11-20 15:49:48 +0000297/* PCI domain-device relationship */
298struct device_domain_info {
299 struct list_head link; /* link to domain siblings */
300 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100301 int segment; /* PCI domain */
302 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000303 u8 devfn; /* PCI devfn number */
304 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800305 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000306 struct dmar_domain *domain; /* pointer to domain */
307};
308
mark gross5e0d2a62008-03-04 15:22:08 -0800309static void flush_unmaps_timeout(unsigned long data);
310
311DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
312
mark gross80b20dd2008-04-18 13:53:58 -0700313#define HIGH_WATER_MARK 250
314struct deferred_flush_tables {
315 int next;
316 struct iova *iova[HIGH_WATER_MARK];
317 struct dmar_domain *domain[HIGH_WATER_MARK];
318};
319
320static struct deferred_flush_tables *deferred_flush;
321
mark gross5e0d2a62008-03-04 15:22:08 -0800322/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800323static int g_num_of_iommus;
324
325static DEFINE_SPINLOCK(async_umap_flush_lock);
326static LIST_HEAD(unmaps_to_do);
327
328static int timer_on;
329static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800330
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700331static void domain_remove_dev_info(struct dmar_domain *domain);
332
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800333#ifdef CONFIG_DMAR_DEFAULT_ON
334int dmar_disabled = 0;
335#else
336int dmar_disabled = 1;
337#endif /*CONFIG_DMAR_DEFAULT_ON*/
338
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700339static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700340static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800341static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700342
343#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
344static DEFINE_SPINLOCK(device_domain_lock);
345static LIST_HEAD(device_domain_list);
346
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100347static struct iommu_ops intel_iommu_ops;
348
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700349static int __init intel_iommu_setup(char *str)
350{
351 if (!str)
352 return -EINVAL;
353 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800354 if (!strncmp(str, "on", 2)) {
355 dmar_disabled = 0;
356 printk(KERN_INFO "Intel-IOMMU: enabled\n");
357 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700358 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800359 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700360 } else if (!strncmp(str, "igfx_off", 8)) {
361 dmar_map_gfx = 0;
362 printk(KERN_INFO
363 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700364 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800365 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700366 "Intel-IOMMU: Forcing DAC for PCI devices\n");
367 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800368 } else if (!strncmp(str, "strict", 6)) {
369 printk(KERN_INFO
370 "Intel-IOMMU: disable batched IOTLB flush\n");
371 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700372 }
373
374 str += strcspn(str, ",");
375 while (*str == ',')
376 str++;
377 }
378 return 0;
379}
380__setup("intel_iommu=", intel_iommu_setup);
381
382static struct kmem_cache *iommu_domain_cache;
383static struct kmem_cache *iommu_devinfo_cache;
384static struct kmem_cache *iommu_iova_cache;
385
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700386static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
387{
388 unsigned int flags;
389 void *vaddr;
390
391 /* trying to avoid low memory issues */
392 flags = current->flags & PF_MEMALLOC;
393 current->flags |= PF_MEMALLOC;
394 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
395 current->flags &= (~PF_MEMALLOC | flags);
396 return vaddr;
397}
398
399
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700400static inline void *alloc_pgtable_page(void)
401{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700402 unsigned int flags;
403 void *vaddr;
404
405 /* trying to avoid low memory issues */
406 flags = current->flags & PF_MEMALLOC;
407 current->flags |= PF_MEMALLOC;
408 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
409 current->flags &= (~PF_MEMALLOC | flags);
410 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700411}
412
413static inline void free_pgtable_page(void *vaddr)
414{
415 free_page((unsigned long)vaddr);
416}
417
418static inline void *alloc_domain_mem(void)
419{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700420 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700421}
422
Kay, Allen M38717942008-09-09 18:37:29 +0300423static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424{
425 kmem_cache_free(iommu_domain_cache, vaddr);
426}
427
428static inline void * alloc_devinfo_mem(void)
429{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700430 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700431}
432
433static inline void free_devinfo_mem(void *vaddr)
434{
435 kmem_cache_free(iommu_devinfo_cache, vaddr);
436}
437
438struct iova *alloc_iova_mem(void)
439{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700440 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700441}
442
443void free_iova_mem(struct iova *iova)
444{
445 kmem_cache_free(iommu_iova_cache, iova);
446}
447
Weidong Han1b573682008-12-08 15:34:06 +0800448
449static inline int width_to_agaw(int width);
450
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700451static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800452{
453 unsigned long sagaw;
454 int agaw = -1;
455
456 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700457 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800458 agaw >= 0; agaw--) {
459 if (test_bit(agaw, &sagaw))
460 break;
461 }
462
463 return agaw;
464}
465
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700466/*
467 * Calculate max SAGAW for each iommu.
468 */
469int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
470{
471 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
472}
473
474/*
475 * calculate agaw for each iommu.
476 * "SAGAW" may be different across iommus, use a default agaw, and
477 * get a supported less agaw for iommus that don't support the default agaw.
478 */
479int iommu_calculate_agaw(struct intel_iommu *iommu)
480{
481 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
482}
483
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700484/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800485static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
486{
487 int iommu_id;
488
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700489 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800490 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700491 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800492
Weidong Han8c11e792008-12-08 15:29:22 +0800493 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
494 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
495 return NULL;
496
497 return g_iommus[iommu_id];
498}
499
Weidong Han8e6040972008-12-08 15:49:06 +0800500static void domain_update_iommu_coherency(struct dmar_domain *domain)
501{
502 int i;
503
504 domain->iommu_coherency = 1;
505
506 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
507 for (; i < g_num_of_iommus; ) {
508 if (!ecap_coherent(g_iommus[i]->ecap)) {
509 domain->iommu_coherency = 0;
510 break;
511 }
512 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
513 }
514}
515
Sheng Yang58c610b2009-03-18 15:33:05 +0800516static void domain_update_iommu_snooping(struct dmar_domain *domain)
517{
518 int i;
519
520 domain->iommu_snooping = 1;
521
522 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
523 for (; i < g_num_of_iommus; ) {
524 if (!ecap_sc_support(g_iommus[i]->ecap)) {
525 domain->iommu_snooping = 0;
526 break;
527 }
528 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
529 }
530}
531
532/* Some capabilities may be different across iommus */
533static void domain_update_iommu_cap(struct dmar_domain *domain)
534{
535 domain_update_iommu_coherency(domain);
536 domain_update_iommu_snooping(domain);
537}
538
David Woodhouse276dbf92009-04-04 01:45:37 +0100539static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800540{
541 struct dmar_drhd_unit *drhd = NULL;
542 int i;
543
544 for_each_drhd_unit(drhd) {
545 if (drhd->ignored)
546 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100547 if (segment != drhd->segment)
548 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800549
David Woodhouse924b6232009-04-04 00:39:25 +0100550 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000551 if (drhd->devices[i] &&
552 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800553 drhd->devices[i]->devfn == devfn)
554 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700555 if (drhd->devices[i] &&
556 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100557 drhd->devices[i]->subordinate->number <= bus &&
558 drhd->devices[i]->subordinate->subordinate >= bus)
559 return drhd->iommu;
560 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800561
562 if (drhd->include_all)
563 return drhd->iommu;
564 }
565
566 return NULL;
567}
568
Weidong Han5331fe62008-12-08 23:00:00 +0800569static void domain_flush_cache(struct dmar_domain *domain,
570 void *addr, int size)
571{
572 if (!domain->iommu_coherency)
573 clflush_cache_range(addr, size);
574}
575
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700576/* Gets context entry for a given bus and devfn */
577static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
578 u8 bus, u8 devfn)
579{
580 struct root_entry *root;
581 struct context_entry *context;
582 unsigned long phy_addr;
583 unsigned long flags;
584
585 spin_lock_irqsave(&iommu->lock, flags);
586 root = &iommu->root_entry[bus];
587 context = get_context_addr_from_root(root);
588 if (!context) {
589 context = (struct context_entry *)alloc_pgtable_page();
590 if (!context) {
591 spin_unlock_irqrestore(&iommu->lock, flags);
592 return NULL;
593 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700594 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700595 phy_addr = virt_to_phys((void *)context);
596 set_root_value(root, phy_addr);
597 set_root_present(root);
598 __iommu_flush_cache(iommu, root, sizeof(*root));
599 }
600 spin_unlock_irqrestore(&iommu->lock, flags);
601 return &context[devfn];
602}
603
604static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
605{
606 struct root_entry *root;
607 struct context_entry *context;
608 int ret;
609 unsigned long flags;
610
611 spin_lock_irqsave(&iommu->lock, flags);
612 root = &iommu->root_entry[bus];
613 context = get_context_addr_from_root(root);
614 if (!context) {
615 ret = 0;
616 goto out;
617 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000618 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700619out:
620 spin_unlock_irqrestore(&iommu->lock, flags);
621 return ret;
622}
623
624static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
625{
626 struct root_entry *root;
627 struct context_entry *context;
628 unsigned long flags;
629
630 spin_lock_irqsave(&iommu->lock, flags);
631 root = &iommu->root_entry[bus];
632 context = get_context_addr_from_root(root);
633 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000634 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700635 __iommu_flush_cache(iommu, &context[devfn], \
636 sizeof(*context));
637 }
638 spin_unlock_irqrestore(&iommu->lock, flags);
639}
640
641static void free_context_table(struct intel_iommu *iommu)
642{
643 struct root_entry *root;
644 int i;
645 unsigned long flags;
646 struct context_entry *context;
647
648 spin_lock_irqsave(&iommu->lock, flags);
649 if (!iommu->root_entry) {
650 goto out;
651 }
652 for (i = 0; i < ROOT_ENTRY_NR; i++) {
653 root = &iommu->root_entry[i];
654 context = get_context_addr_from_root(root);
655 if (context)
656 free_pgtable_page(context);
657 }
658 free_pgtable_page(iommu->root_entry);
659 iommu->root_entry = NULL;
660out:
661 spin_unlock_irqrestore(&iommu->lock, flags);
662}
663
664/* page table handling */
665#define LEVEL_STRIDE (9)
666#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
667
668static inline int agaw_to_level(int agaw)
669{
670 return agaw + 2;
671}
672
673static inline int agaw_to_width(int agaw)
674{
675 return 30 + agaw * LEVEL_STRIDE;
676
677}
678
679static inline int width_to_agaw(int width)
680{
681 return (width - 30) / LEVEL_STRIDE;
682}
683
684static inline unsigned int level_to_offset_bits(int level)
685{
David Woodhouse6660c632009-06-27 22:41:00 +0100686 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687}
688
David Woodhouse77dfa562009-06-27 16:40:08 +0100689static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690{
David Woodhouse6660c632009-06-27 22:41:00 +0100691 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700692}
693
David Woodhouse6660c632009-06-27 22:41:00 +0100694static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700695{
David Woodhouse6660c632009-06-27 22:41:00 +0100696 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697}
698
David Woodhouse6660c632009-06-27 22:41:00 +0100699static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700700{
David Woodhouse6660c632009-06-27 22:41:00 +0100701 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700702}
703
David Woodhouse6660c632009-06-27 22:41:00 +0100704static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700705{
David Woodhouse6660c632009-06-27 22:41:00 +0100706 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707}
708
David Woodhouseb026fd22009-06-28 10:37:25 +0100709static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
710 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700711{
David Woodhouseb026fd22009-06-28 10:37:25 +0100712 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700713 struct dma_pte *parent, *pte = NULL;
714 int level = agaw_to_level(domain->agaw);
715 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700716
717 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100718 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700719 parent = domain->pgd;
720
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700721 while (level > 0) {
722 void *tmp_page;
723
David Woodhouseb026fd22009-06-28 10:37:25 +0100724 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700725 pte = &parent[offset];
726 if (level == 1)
727 break;
728
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000729 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100730 uint64_t pteval;
731
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700732 tmp_page = alloc_pgtable_page();
733
David Woodhouse206a73c2009-07-01 19:30:28 +0100734 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700735 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100736
David Woodhousec85994e2009-07-01 19:21:24 +0100737 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
738 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
739 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
740 /* Someone else set it while we were thinking; use theirs. */
741 free_pgtable_page(tmp_page);
742 } else {
743 dma_pte_addr(pte);
744 domain_flush_cache(domain, pte, sizeof(*pte));
745 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700746 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000747 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700748 level--;
749 }
750
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700751 return pte;
752}
753
754/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100755static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
756 unsigned long pfn,
757 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758{
759 struct dma_pte *parent, *pte = NULL;
760 int total = agaw_to_level(domain->agaw);
761 int offset;
762
763 parent = domain->pgd;
764 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100765 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766 pte = &parent[offset];
767 if (level == total)
768 return pte;
769
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000770 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000772 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700773 total--;
774 }
775 return NULL;
776}
777
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100779static void dma_pte_clear_range(struct dmar_domain *domain,
780 unsigned long start_pfn,
781 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782{
David Woodhouse04b18e62009-06-27 19:15:01 +0100783 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100784 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700785
David Woodhouse04b18e62009-06-27 19:15:01 +0100786 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100787 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100788
David Woodhouse04b18e62009-06-27 19:15:01 +0100789 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100790 while (start_pfn <= last_pfn) {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100791 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
792 if (!pte) {
793 start_pfn = align_to_level(start_pfn + 1, 2);
794 continue;
795 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100796 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100797 dma_clear_pte(pte);
798 start_pfn++;
799 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100800 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
801
David Woodhouse310a5ab2009-06-28 18:52:20 +0100802 domain_flush_cache(domain, first_pte,
803 (void *)pte - (void *)first_pte);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804 }
805}
806
807/* free page table pages. last level pte should already be cleared */
808static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100809 unsigned long start_pfn,
810 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700811{
David Woodhouse6660c632009-06-27 22:41:00 +0100812 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100813 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814 int total = agaw_to_level(domain->agaw);
815 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100816 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817
David Woodhouse6660c632009-06-27 22:41:00 +0100818 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
819 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820
David Woodhousef3a0a522009-06-30 03:40:07 +0100821 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822 level = 2;
823 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100824 tmp = align_to_level(start_pfn, level);
825
David Woodhousef3a0a522009-06-30 03:40:07 +0100826 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100827 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 return;
829
David Woodhouse3d7b0e42009-06-30 03:38:09 +0100830 while (tmp + level_size(level) - 1 <= last_pfn) {
David Woodhousef3a0a522009-06-30 03:40:07 +0100831 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
832 if (!pte) {
833 tmp = align_to_level(tmp + 1, level + 1);
834 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100836 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100837 if (dma_pte_present(pte)) {
838 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
839 dma_clear_pte(pte);
840 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100841 pte++;
842 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100843 } while (!first_pte_in_page(pte) &&
844 tmp + level_size(level) - 1 <= last_pfn);
845
David Woodhousef3a0a522009-06-30 03:40:07 +0100846 domain_flush_cache(domain, first_pte,
847 (void *)pte - (void *)first_pte);
848
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849 }
850 level++;
851 }
852 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100853 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 free_pgtable_page(domain->pgd);
855 domain->pgd = NULL;
856 }
857}
858
859/* iommu handling */
860static int iommu_alloc_root_entry(struct intel_iommu *iommu)
861{
862 struct root_entry *root;
863 unsigned long flags;
864
865 root = (struct root_entry *)alloc_pgtable_page();
866 if (!root)
867 return -ENOMEM;
868
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700869 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870
871 spin_lock_irqsave(&iommu->lock, flags);
872 iommu->root_entry = root;
873 spin_unlock_irqrestore(&iommu->lock, flags);
874
875 return 0;
876}
877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878static void iommu_set_root_entry(struct intel_iommu *iommu)
879{
880 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100881 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882 unsigned long flag;
883
884 addr = iommu->root_entry;
885
886 spin_lock_irqsave(&iommu->register_lock, flag);
887 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
888
David Woodhousec416daa2009-05-10 20:30:58 +0100889 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890
891 /* Make sure hardware complete it */
892 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100893 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894
895 spin_unlock_irqrestore(&iommu->register_lock, flag);
896}
897
898static void iommu_flush_write_buffer(struct intel_iommu *iommu)
899{
900 u32 val;
901 unsigned long flag;
902
David Woodhouse9af88142009-02-13 23:18:03 +0000903 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905
906 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100907 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908
909 /* Make sure hardware complete it */
910 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100911 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912
913 spin_unlock_irqrestore(&iommu->register_lock, flag);
914}
915
916/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100917static void __iommu_flush_context(struct intel_iommu *iommu,
918 u16 did, u16 source_id, u8 function_mask,
919 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700920{
921 u64 val = 0;
922 unsigned long flag;
923
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700924 switch (type) {
925 case DMA_CCMD_GLOBAL_INVL:
926 val = DMA_CCMD_GLOBAL_INVL;
927 break;
928 case DMA_CCMD_DOMAIN_INVL:
929 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
930 break;
931 case DMA_CCMD_DEVICE_INVL:
932 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
933 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
934 break;
935 default:
936 BUG();
937 }
938 val |= DMA_CCMD_ICC;
939
940 spin_lock_irqsave(&iommu->register_lock, flag);
941 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
942
943 /* Make sure hardware complete it */
944 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
945 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
946
947 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700948}
949
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700950/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100951static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
952 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953{
954 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
955 u64 val = 0, val_iva = 0;
956 unsigned long flag;
957
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958 switch (type) {
959 case DMA_TLB_GLOBAL_FLUSH:
960 /* global flush doesn't need set IVA_REG */
961 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
962 break;
963 case DMA_TLB_DSI_FLUSH:
964 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
965 break;
966 case DMA_TLB_PSI_FLUSH:
967 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
968 /* Note: always flush non-leaf currently */
969 val_iva = size_order | addr;
970 break;
971 default:
972 BUG();
973 }
974 /* Note: set drain read/write */
975#if 0
976 /*
977 * This is probably to be super secure.. Looks like we can
978 * ignore it without any impact.
979 */
980 if (cap_read_drain(iommu->cap))
981 val |= DMA_TLB_READ_DRAIN;
982#endif
983 if (cap_write_drain(iommu->cap))
984 val |= DMA_TLB_WRITE_DRAIN;
985
986 spin_lock_irqsave(&iommu->register_lock, flag);
987 /* Note: Only uses first TLB reg currently */
988 if (val_iva)
989 dmar_writeq(iommu->reg + tlb_offset, val_iva);
990 dmar_writeq(iommu->reg + tlb_offset + 8, val);
991
992 /* Make sure hardware complete it */
993 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
994 dmar_readq, (!(val & DMA_TLB_IVT)), val);
995
996 spin_unlock_irqrestore(&iommu->register_lock, flag);
997
998 /* check IOTLB invalidation granularity */
999 if (DMA_TLB_IAIG(val) == 0)
1000 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1001 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1002 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001003 (unsigned long long)DMA_TLB_IIRG(type),
1004 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005}
1006
Yu Zhao93a23a72009-05-18 13:51:37 +08001007static struct device_domain_info *iommu_support_dev_iotlb(
1008 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009{
Yu Zhao93a23a72009-05-18 13:51:37 +08001010 int found = 0;
1011 unsigned long flags;
1012 struct device_domain_info *info;
1013 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1014
1015 if (!ecap_dev_iotlb_support(iommu->ecap))
1016 return NULL;
1017
1018 if (!iommu->qi)
1019 return NULL;
1020
1021 spin_lock_irqsave(&device_domain_lock, flags);
1022 list_for_each_entry(info, &domain->devices, link)
1023 if (info->bus == bus && info->devfn == devfn) {
1024 found = 1;
1025 break;
1026 }
1027 spin_unlock_irqrestore(&device_domain_lock, flags);
1028
1029 if (!found || !info->dev)
1030 return NULL;
1031
1032 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1033 return NULL;
1034
1035 if (!dmar_find_matched_atsr_unit(info->dev))
1036 return NULL;
1037
1038 info->iommu = iommu;
1039
1040 return info;
1041}
1042
1043static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1044{
1045 if (!info)
1046 return;
1047
1048 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1049}
1050
1051static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1052{
1053 if (!info->dev || !pci_ats_enabled(info->dev))
1054 return;
1055
1056 pci_disable_ats(info->dev);
1057}
1058
1059static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1060 u64 addr, unsigned mask)
1061{
1062 u16 sid, qdep;
1063 unsigned long flags;
1064 struct device_domain_info *info;
1065
1066 spin_lock_irqsave(&device_domain_lock, flags);
1067 list_for_each_entry(info, &domain->devices, link) {
1068 if (!info->dev || !pci_ats_enabled(info->dev))
1069 continue;
1070
1071 sid = info->bus << 8 | info->devfn;
1072 qdep = pci_ats_queue_depth(info->dev);
1073 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1074 }
1075 spin_unlock_irqrestore(&device_domain_lock, flags);
1076}
1077
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001078static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001079 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001081 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001082 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001083
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001084 BUG_ON(pages == 0);
1085
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001087 * Fallback to domain selective flush if no PSI support or the size is
1088 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001089 * PSI requires page size to be 2 ^ x, and the base address is naturally
1090 * aligned to the size
1091 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001092 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1093 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001094 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001095 else
1096 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1097 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001098
1099 /*
1100 * In caching mode, domain ID 0 is reserved for non-present to present
1101 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1102 */
1103 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001104 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105}
1106
mark grossf8bab732008-02-08 04:18:38 -08001107static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1108{
1109 u32 pmen;
1110 unsigned long flags;
1111
1112 spin_lock_irqsave(&iommu->register_lock, flags);
1113 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1114 pmen &= ~DMA_PMEN_EPM;
1115 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1116
1117 /* wait for the protected region status bit to clear */
1118 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1119 readl, !(pmen & DMA_PMEN_PRS), pmen);
1120
1121 spin_unlock_irqrestore(&iommu->register_lock, flags);
1122}
1123
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124static int iommu_enable_translation(struct intel_iommu *iommu)
1125{
1126 u32 sts;
1127 unsigned long flags;
1128
1129 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001130 iommu->gcmd |= DMA_GCMD_TE;
1131 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132
1133 /* Make sure hardware complete it */
1134 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001135 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137 spin_unlock_irqrestore(&iommu->register_lock, flags);
1138 return 0;
1139}
1140
1141static int iommu_disable_translation(struct intel_iommu *iommu)
1142{
1143 u32 sts;
1144 unsigned long flag;
1145
1146 spin_lock_irqsave(&iommu->register_lock, flag);
1147 iommu->gcmd &= ~DMA_GCMD_TE;
1148 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1149
1150 /* Make sure hardware complete it */
1151 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001152 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001153
1154 spin_unlock_irqrestore(&iommu->register_lock, flag);
1155 return 0;
1156}
1157
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001158
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159static int iommu_init_domains(struct intel_iommu *iommu)
1160{
1161 unsigned long ndomains;
1162 unsigned long nlongs;
1163
1164 ndomains = cap_ndoms(iommu->cap);
1165 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1166 nlongs = BITS_TO_LONGS(ndomains);
1167
Donald Dutile94a91b52009-08-20 16:51:34 -04001168 spin_lock_init(&iommu->lock);
1169
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001170 /* TBD: there might be 64K domains,
1171 * consider other allocation for future chip
1172 */
1173 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1174 if (!iommu->domain_ids) {
1175 printk(KERN_ERR "Allocating domain id array failed\n");
1176 return -ENOMEM;
1177 }
1178 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1179 GFP_KERNEL);
1180 if (!iommu->domains) {
1181 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182 return -ENOMEM;
1183 }
1184
1185 /*
1186 * if Caching mode is set, then invalid translations are tagged
1187 * with domainid 0. Hence we need to pre-allocate it.
1188 */
1189 if (cap_caching_mode(iommu->cap))
1190 set_bit(0, iommu->domain_ids);
1191 return 0;
1192}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001194
1195static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001196static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001197
1198void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199{
1200 struct dmar_domain *domain;
1201 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001202 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001203
Donald Dutile94a91b52009-08-20 16:51:34 -04001204 if ((iommu->domains) && (iommu->domain_ids)) {
1205 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1206 for (; i < cap_ndoms(iommu->cap); ) {
1207 domain = iommu->domains[i];
1208 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001209
Donald Dutile94a91b52009-08-20 16:51:34 -04001210 spin_lock_irqsave(&domain->iommu_lock, flags);
1211 if (--domain->iommu_count == 0) {
1212 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1213 vm_domain_exit(domain);
1214 else
1215 domain_exit(domain);
1216 }
1217 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1218
1219 i = find_next_bit(iommu->domain_ids,
1220 cap_ndoms(iommu->cap), i+1);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001221 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222 }
1223
1224 if (iommu->gcmd & DMA_GCMD_TE)
1225 iommu_disable_translation(iommu);
1226
1227 if (iommu->irq) {
1228 set_irq_data(iommu->irq, NULL);
1229 /* This will mask the irq */
1230 free_irq(iommu->irq, iommu);
1231 destroy_irq(iommu->irq);
1232 }
1233
1234 kfree(iommu->domains);
1235 kfree(iommu->domain_ids);
1236
Weidong Hand9630fe2008-12-08 11:06:32 +08001237 g_iommus[iommu->seq_id] = NULL;
1238
1239 /* if all iommus are freed, free g_iommus */
1240 for (i = 0; i < g_num_of_iommus; i++) {
1241 if (g_iommus[i])
1242 break;
1243 }
1244
1245 if (i == g_num_of_iommus)
1246 kfree(g_iommus);
1247
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 /* free context mapping */
1249 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001250}
1251
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001252static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001253{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001254 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255
1256 domain = alloc_domain_mem();
1257 if (!domain)
1258 return NULL;
1259
Weidong Han8c11e792008-12-08 15:29:22 +08001260 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001261 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262
1263 return domain;
1264}
1265
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001266static int iommu_attach_domain(struct dmar_domain *domain,
1267 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001269 int num;
1270 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 unsigned long flags;
1272
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001273 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001274
1275 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001276
1277 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1278 if (num >= ndomains) {
1279 spin_unlock_irqrestore(&iommu->lock, flags);
1280 printk(KERN_ERR "IOMMU: no free domain ids\n");
1281 return -ENOMEM;
1282 }
1283
1284 domain->id = num;
1285 set_bit(num, iommu->domain_ids);
1286 set_bit(iommu->seq_id, &domain->iommu_bmp);
1287 iommu->domains[num] = domain;
1288 spin_unlock_irqrestore(&iommu->lock, flags);
1289
1290 return 0;
1291}
1292
1293static void iommu_detach_domain(struct dmar_domain *domain,
1294 struct intel_iommu *iommu)
1295{
1296 unsigned long flags;
1297 int num, ndomains;
1298 int found = 0;
1299
1300 spin_lock_irqsave(&iommu->lock, flags);
1301 ndomains = cap_ndoms(iommu->cap);
1302 num = find_first_bit(iommu->domain_ids, ndomains);
1303 for (; num < ndomains; ) {
1304 if (iommu->domains[num] == domain) {
1305 found = 1;
1306 break;
1307 }
1308 num = find_next_bit(iommu->domain_ids,
1309 cap_ndoms(iommu->cap), num+1);
1310 }
1311
1312 if (found) {
1313 clear_bit(num, iommu->domain_ids);
1314 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1315 iommu->domains[num] = NULL;
1316 }
Weidong Han8c11e792008-12-08 15:29:22 +08001317 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001318}
1319
1320static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001321static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322
1323static void dmar_init_reserved_ranges(void)
1324{
1325 struct pci_dev *pdev = NULL;
1326 struct iova *iova;
1327 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328
David Millerf6611972008-02-06 01:36:23 -08001329 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001330
Mark Gross8a443df2008-03-04 14:59:31 -08001331 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1332 &reserved_rbtree_key);
1333
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 /* IOAPIC ranges shouldn't be accessed by DMA */
1335 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1336 IOVA_PFN(IOAPIC_RANGE_END));
1337 if (!iova)
1338 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1339
1340 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1341 for_each_pci_dev(pdev) {
1342 struct resource *r;
1343
1344 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1345 r = &pdev->resource[i];
1346 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1347 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001348 iova = reserve_iova(&reserved_iova_list,
1349 IOVA_PFN(r->start),
1350 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351 if (!iova)
1352 printk(KERN_ERR "Reserve iova failed\n");
1353 }
1354 }
1355
1356}
1357
1358static void domain_reserve_special_ranges(struct dmar_domain *domain)
1359{
1360 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1361}
1362
1363static inline int guestwidth_to_adjustwidth(int gaw)
1364{
1365 int agaw;
1366 int r = (gaw - 12) % 9;
1367
1368 if (r == 0)
1369 agaw = gaw;
1370 else
1371 agaw = gaw + 9 - r;
1372 if (agaw > 64)
1373 agaw = 64;
1374 return agaw;
1375}
1376
1377static int domain_init(struct dmar_domain *domain, int guest_width)
1378{
1379 struct intel_iommu *iommu;
1380 int adjust_width, agaw;
1381 unsigned long sagaw;
1382
David Millerf6611972008-02-06 01:36:23 -08001383 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001384 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385
1386 domain_reserve_special_ranges(domain);
1387
1388 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001389 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001390 if (guest_width > cap_mgaw(iommu->cap))
1391 guest_width = cap_mgaw(iommu->cap);
1392 domain->gaw = guest_width;
1393 adjust_width = guestwidth_to_adjustwidth(guest_width);
1394 agaw = width_to_agaw(adjust_width);
1395 sagaw = cap_sagaw(iommu->cap);
1396 if (!test_bit(agaw, &sagaw)) {
1397 /* hardware doesn't support it, choose a bigger one */
1398 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1399 agaw = find_next_bit(&sagaw, 5, agaw);
1400 if (agaw >= 5)
1401 return -ENODEV;
1402 }
1403 domain->agaw = agaw;
1404 INIT_LIST_HEAD(&domain->devices);
1405
Weidong Han8e6040972008-12-08 15:49:06 +08001406 if (ecap_coherent(iommu->ecap))
1407 domain->iommu_coherency = 1;
1408 else
1409 domain->iommu_coherency = 0;
1410
Sheng Yang58c610b2009-03-18 15:33:05 +08001411 if (ecap_sc_support(iommu->ecap))
1412 domain->iommu_snooping = 1;
1413 else
1414 domain->iommu_snooping = 0;
1415
Weidong Hanc7151a82008-12-08 22:51:37 +08001416 domain->iommu_count = 1;
1417
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 /* always allocate the top pgd */
1419 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1420 if (!domain->pgd)
1421 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001422 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423 return 0;
1424}
1425
1426static void domain_exit(struct dmar_domain *domain)
1427{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001428 struct dmar_drhd_unit *drhd;
1429 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430
1431 /* Domain 0 is reserved, so dont process it */
1432 if (!domain)
1433 return;
1434
1435 domain_remove_dev_info(domain);
1436 /* destroy iovas */
1437 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
1439 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001440 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441
1442 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001443 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001445 for_each_active_iommu(iommu, drhd)
1446 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1447 iommu_detach_domain(domain, iommu);
1448
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449 free_domain_mem(domain);
1450}
1451
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001452static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1453 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454{
1455 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001457 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001458 struct dma_pte *pgd;
1459 unsigned long num;
1460 unsigned long ndomains;
1461 int id;
1462 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001463 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464
1465 pr_debug("Set context mapping for %02x:%02x.%d\n",
1466 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001467
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001468 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001469 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1470 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001471
David Woodhouse276dbf92009-04-04 01:45:37 +01001472 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001473 if (!iommu)
1474 return -ENODEV;
1475
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476 context = device_to_context_entry(iommu, bus, devfn);
1477 if (!context)
1478 return -ENOMEM;
1479 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001480 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481 spin_unlock_irqrestore(&iommu->lock, flags);
1482 return 0;
1483 }
1484
Weidong Hanea6606b2008-12-08 23:08:15 +08001485 id = domain->id;
1486 pgd = domain->pgd;
1487
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001488 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1489 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001490 int found = 0;
1491
1492 /* find an available domain id for this device in iommu */
1493 ndomains = cap_ndoms(iommu->cap);
1494 num = find_first_bit(iommu->domain_ids, ndomains);
1495 for (; num < ndomains; ) {
1496 if (iommu->domains[num] == domain) {
1497 id = num;
1498 found = 1;
1499 break;
1500 }
1501 num = find_next_bit(iommu->domain_ids,
1502 cap_ndoms(iommu->cap), num+1);
1503 }
1504
1505 if (found == 0) {
1506 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1507 if (num >= ndomains) {
1508 spin_unlock_irqrestore(&iommu->lock, flags);
1509 printk(KERN_ERR "IOMMU: no free domain ids\n");
1510 return -EFAULT;
1511 }
1512
1513 set_bit(num, iommu->domain_ids);
1514 iommu->domains[num] = domain;
1515 id = num;
1516 }
1517
1518 /* Skip top levels of page tables for
1519 * iommu which has less agaw than default.
1520 */
1521 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1522 pgd = phys_to_virt(dma_pte_addr(pgd));
1523 if (!dma_pte_present(pgd)) {
1524 spin_unlock_irqrestore(&iommu->lock, flags);
1525 return -ENOMEM;
1526 }
1527 }
1528 }
1529
1530 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531
Yu Zhao93a23a72009-05-18 13:51:37 +08001532 if (translation != CONTEXT_TT_PASS_THROUGH) {
1533 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1534 translation = info ? CONTEXT_TT_DEV_IOTLB :
1535 CONTEXT_TT_MULTI_LEVEL;
1536 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001537 /*
1538 * In pass through mode, AW must be programmed to indicate the largest
1539 * AGAW value supported by hardware. And ASR is ignored by hardware.
1540 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001541 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001542 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001543 else {
1544 context_set_address_root(context, virt_to_phys(pgd));
1545 context_set_address_width(context, iommu->agaw);
1546 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001547
1548 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001549 context_set_fault_enable(context);
1550 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001551 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001552
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001553 /*
1554 * It's a non-present to present mapping. If hardware doesn't cache
1555 * non-present entry we only need to flush the write-buffer. If the
1556 * _does_ cache non-present entries, then it does so in the special
1557 * domain #0, which we have to flush:
1558 */
1559 if (cap_caching_mode(iommu->cap)) {
1560 iommu->flush.flush_context(iommu, 0,
1561 (((u16)bus) << 8) | devfn,
1562 DMA_CCMD_MASK_NOBIT,
1563 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001564 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001565 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001567 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001568 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001570
1571 spin_lock_irqsave(&domain->iommu_lock, flags);
1572 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1573 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001574 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001575 }
1576 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577 return 0;
1578}
1579
1580static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001581domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1582 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001583{
1584 int ret;
1585 struct pci_dev *tmp, *parent;
1586
David Woodhouse276dbf92009-04-04 01:45:37 +01001587 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001588 pdev->bus->number, pdev->devfn,
1589 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001590 if (ret)
1591 return ret;
1592
1593 /* dependent device mapping */
1594 tmp = pci_find_upstream_pcie_bridge(pdev);
1595 if (!tmp)
1596 return 0;
1597 /* Secondary interface's bus number and devfn 0 */
1598 parent = pdev->bus->self;
1599 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001600 ret = domain_context_mapping_one(domain,
1601 pci_domain_nr(parent->bus),
1602 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001603 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001604 if (ret)
1605 return ret;
1606 parent = parent->bus->self;
1607 }
1608 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1609 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001610 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001611 tmp->subordinate->number, 0,
1612 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001613 else /* this is a legacy PCI bridge */
1614 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001615 pci_domain_nr(tmp->bus),
1616 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001617 tmp->devfn,
1618 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001619}
1620
Weidong Han5331fe62008-12-08 23:00:00 +08001621static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001622{
1623 int ret;
1624 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001625 struct intel_iommu *iommu;
1626
David Woodhouse276dbf92009-04-04 01:45:37 +01001627 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1628 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001629 if (!iommu)
1630 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631
David Woodhouse276dbf92009-04-04 01:45:37 +01001632 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 if (!ret)
1634 return ret;
1635 /* dependent device mapping */
1636 tmp = pci_find_upstream_pcie_bridge(pdev);
1637 if (!tmp)
1638 return ret;
1639 /* Secondary interface's bus number and devfn 0 */
1640 parent = pdev->bus->self;
1641 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001642 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001643 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 if (!ret)
1645 return ret;
1646 parent = parent->bus->self;
1647 }
1648 if (tmp->is_pcie)
David Woodhouse276dbf92009-04-04 01:45:37 +01001649 return device_context_mapped(iommu, tmp->subordinate->number,
1650 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001652 return device_context_mapped(iommu, tmp->bus->number,
1653 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654}
1655
Fenghua Yuf5329592009-08-04 15:09:37 -07001656/* Returns a number of VTD pages, but aligned to MM page size */
1657static inline unsigned long aligned_nrpages(unsigned long host_addr,
1658 size_t size)
1659{
1660 host_addr &= ~PAGE_MASK;
1661 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1662}
1663
David Woodhouse9051aa02009-06-29 12:30:54 +01001664static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1665 struct scatterlist *sg, unsigned long phys_pfn,
1666 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001667{
1668 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001669 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001670 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001671 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001672
1673 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1674
1675 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1676 return -EINVAL;
1677
1678 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1679
David Woodhouse9051aa02009-06-29 12:30:54 +01001680 if (sg)
1681 sg_res = 0;
1682 else {
1683 sg_res = nr_pages + 1;
1684 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1685 }
1686
David Woodhousee1605492009-06-29 11:17:38 +01001687 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001688 uint64_t tmp;
1689
David Woodhousee1605492009-06-29 11:17:38 +01001690 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001691 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001692 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1693 sg->dma_length = sg->length;
1694 pteval = page_to_phys(sg_page(sg)) | prot;
1695 }
1696 if (!pte) {
1697 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1698 if (!pte)
1699 return -ENOMEM;
1700 }
1701 /* We don't need lock here, nobody else
1702 * touches the iova range
1703 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001704 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001705 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001706 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001707 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1708 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001709 if (dumps) {
1710 dumps--;
1711 debug_dma_dump_mappings(NULL);
1712 }
1713 WARN_ON(1);
1714 }
David Woodhousee1605492009-06-29 11:17:38 +01001715 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001716 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001717 domain_flush_cache(domain, first_pte,
1718 (void *)pte - (void *)first_pte);
1719 pte = NULL;
1720 }
1721 iov_pfn++;
1722 pteval += VTD_PAGE_SIZE;
1723 sg_res--;
1724 if (!sg_res)
1725 sg = sg_next(sg);
1726 }
1727 return 0;
1728}
1729
David Woodhouse9051aa02009-06-29 12:30:54 +01001730static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1731 struct scatterlist *sg, unsigned long nr_pages,
1732 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001733{
David Woodhouse9051aa02009-06-29 12:30:54 +01001734 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1735}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001736
David Woodhouse9051aa02009-06-29 12:30:54 +01001737static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1738 unsigned long phys_pfn, unsigned long nr_pages,
1739 int prot)
1740{
1741 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742}
1743
Weidong Hanc7151a82008-12-08 22:51:37 +08001744static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001745{
Weidong Hanc7151a82008-12-08 22:51:37 +08001746 if (!iommu)
1747 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001748
1749 clear_context_table(iommu, bus, devfn);
1750 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001751 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001752 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001753}
1754
1755static void domain_remove_dev_info(struct dmar_domain *domain)
1756{
1757 struct device_domain_info *info;
1758 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001759 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760
1761 spin_lock_irqsave(&device_domain_lock, flags);
1762 while (!list_empty(&domain->devices)) {
1763 info = list_entry(domain->devices.next,
1764 struct device_domain_info, link);
1765 list_del(&info->link);
1766 list_del(&info->global);
1767 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001768 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001769 spin_unlock_irqrestore(&device_domain_lock, flags);
1770
Yu Zhao93a23a72009-05-18 13:51:37 +08001771 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001772 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001773 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001774 free_devinfo_mem(info);
1775
1776 spin_lock_irqsave(&device_domain_lock, flags);
1777 }
1778 spin_unlock_irqrestore(&device_domain_lock, flags);
1779}
1780
1781/*
1782 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001783 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784 */
Kay, Allen M38717942008-09-09 18:37:29 +03001785static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786find_domain(struct pci_dev *pdev)
1787{
1788 struct device_domain_info *info;
1789
1790 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001791 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792 if (info)
1793 return info->domain;
1794 return NULL;
1795}
1796
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797/* domain is initialized */
1798static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1799{
1800 struct dmar_domain *domain, *found = NULL;
1801 struct intel_iommu *iommu;
1802 struct dmar_drhd_unit *drhd;
1803 struct device_domain_info *info, *tmp;
1804 struct pci_dev *dev_tmp;
1805 unsigned long flags;
1806 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001807 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001808 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001809
1810 domain = find_domain(pdev);
1811 if (domain)
1812 return domain;
1813
David Woodhouse276dbf92009-04-04 01:45:37 +01001814 segment = pci_domain_nr(pdev->bus);
1815
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001816 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1817 if (dev_tmp) {
1818 if (dev_tmp->is_pcie) {
1819 bus = dev_tmp->subordinate->number;
1820 devfn = 0;
1821 } else {
1822 bus = dev_tmp->bus->number;
1823 devfn = dev_tmp->devfn;
1824 }
1825 spin_lock_irqsave(&device_domain_lock, flags);
1826 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001827 if (info->segment == segment &&
1828 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001829 found = info->domain;
1830 break;
1831 }
1832 }
1833 spin_unlock_irqrestore(&device_domain_lock, flags);
1834 /* pcie-pci bridge already has a domain, uses it */
1835 if (found) {
1836 domain = found;
1837 goto found_domain;
1838 }
1839 }
1840
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001841 domain = alloc_domain();
1842 if (!domain)
1843 goto error;
1844
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001845 /* Allocate new domain for the device */
1846 drhd = dmar_find_matched_drhd_unit(pdev);
1847 if (!drhd) {
1848 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1849 pci_name(pdev));
1850 return NULL;
1851 }
1852 iommu = drhd->iommu;
1853
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001854 ret = iommu_attach_domain(domain, iommu);
1855 if (ret) {
1856 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001857 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001858 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859
1860 if (domain_init(domain, gaw)) {
1861 domain_exit(domain);
1862 goto error;
1863 }
1864
1865 /* register pcie-to-pci device */
1866 if (dev_tmp) {
1867 info = alloc_devinfo_mem();
1868 if (!info) {
1869 domain_exit(domain);
1870 goto error;
1871 }
David Woodhouse276dbf92009-04-04 01:45:37 +01001872 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873 info->bus = bus;
1874 info->devfn = devfn;
1875 info->dev = NULL;
1876 info->domain = domain;
1877 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001878 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879
1880 /* pcie-to-pci bridge already has a domain, uses it */
1881 found = NULL;
1882 spin_lock_irqsave(&device_domain_lock, flags);
1883 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001884 if (tmp->segment == segment &&
1885 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886 found = tmp->domain;
1887 break;
1888 }
1889 }
1890 if (found) {
1891 free_devinfo_mem(info);
1892 domain_exit(domain);
1893 domain = found;
1894 } else {
1895 list_add(&info->link, &domain->devices);
1896 list_add(&info->global, &device_domain_list);
1897 }
1898 spin_unlock_irqrestore(&device_domain_lock, flags);
1899 }
1900
1901found_domain:
1902 info = alloc_devinfo_mem();
1903 if (!info)
1904 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01001905 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 info->bus = pdev->bus->number;
1907 info->devfn = pdev->devfn;
1908 info->dev = pdev;
1909 info->domain = domain;
1910 spin_lock_irqsave(&device_domain_lock, flags);
1911 /* somebody is fast */
1912 found = find_domain(pdev);
1913 if (found != NULL) {
1914 spin_unlock_irqrestore(&device_domain_lock, flags);
1915 if (found != domain) {
1916 domain_exit(domain);
1917 domain = found;
1918 }
1919 free_devinfo_mem(info);
1920 return domain;
1921 }
1922 list_add(&info->link, &domain->devices);
1923 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001924 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001925 spin_unlock_irqrestore(&device_domain_lock, flags);
1926 return domain;
1927error:
1928 /* recheck it here, maybe others set it */
1929 return find_domain(pdev);
1930}
1931
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001932static int iommu_identity_mapping;
1933
David Woodhouseb2132032009-06-26 18:50:28 +01001934static int iommu_domain_identity_map(struct dmar_domain *domain,
1935 unsigned long long start,
1936 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937{
David Woodhousec5395d52009-06-28 16:35:56 +01001938 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1939 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940
David Woodhousec5395d52009-06-28 16:35:56 +01001941 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1942 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001943 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001944 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 }
1946
David Woodhousec5395d52009-06-28 16:35:56 +01001947 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1948 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001949 /*
1950 * RMRR range might have overlap with physical memory range,
1951 * clear it first
1952 */
David Woodhousec5395d52009-06-28 16:35:56 +01001953 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954
David Woodhousec5395d52009-06-28 16:35:56 +01001955 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1956 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001957 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001958}
1959
1960static int iommu_prepare_identity_map(struct pci_dev *pdev,
1961 unsigned long long start,
1962 unsigned long long end)
1963{
1964 struct dmar_domain *domain;
1965 int ret;
1966
David Woodhousec7ab48d2009-06-26 19:10:36 +01001967 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001968 if (!domain)
1969 return -ENOMEM;
1970
David Woodhouse19943b02009-08-04 16:19:20 +01001971 /* For _hardware_ passthrough, don't bother. But for software
1972 passthrough, we do it anyway -- it may indicate a memory
1973 range which is reserved in E820, so which didn't get set
1974 up to start with in si_domain */
1975 if (domain == si_domain && hw_pass_through) {
1976 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1977 pci_name(pdev), start, end);
1978 return 0;
1979 }
1980
1981 printk(KERN_INFO
1982 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1983 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01001984
1985 if (end >> agaw_to_width(domain->agaw)) {
1986 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1987 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1988 agaw_to_width(domain->agaw),
1989 dmi_get_system_info(DMI_BIOS_VENDOR),
1990 dmi_get_system_info(DMI_BIOS_VERSION),
1991 dmi_get_system_info(DMI_PRODUCT_VERSION));
1992 ret = -EIO;
1993 goto error;
1994 }
David Woodhouse19943b02009-08-04 16:19:20 +01001995
David Woodhouseb2132032009-06-26 18:50:28 +01001996 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001997 if (ret)
1998 goto error;
1999
2000 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002001 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002002 if (ret)
2003 goto error;
2004
2005 return 0;
2006
2007 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 domain_exit(domain);
2009 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002010}
2011
2012static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2013 struct pci_dev *pdev)
2014{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002015 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002016 return 0;
2017 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2018 rmrr->end_address + 1);
2019}
2020
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002021#ifdef CONFIG_DMAR_FLOPPY_WA
2022static inline void iommu_prepare_isa(void)
2023{
2024 struct pci_dev *pdev;
2025 int ret;
2026
2027 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2028 if (!pdev)
2029 return;
2030
David Woodhousec7ab48d2009-06-26 19:10:36 +01002031 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002032 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2033
2034 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002035 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2036 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002037
2038}
2039#else
2040static inline void iommu_prepare_isa(void)
2041{
2042 return;
2043}
2044#endif /* !CONFIG_DMAR_FLPY_WA */
2045
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002046static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002047
2048static int __init si_domain_work_fn(unsigned long start_pfn,
2049 unsigned long end_pfn, void *datax)
2050{
2051 int *ret = datax;
2052
2053 *ret = iommu_domain_identity_map(si_domain,
2054 (uint64_t)start_pfn << PAGE_SHIFT,
2055 (uint64_t)end_pfn << PAGE_SHIFT);
2056 return *ret;
2057
2058}
2059
Matt Kraai071e1372009-08-23 22:30:22 -07002060static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002061{
2062 struct dmar_drhd_unit *drhd;
2063 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002064 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002065
2066 si_domain = alloc_domain();
2067 if (!si_domain)
2068 return -EFAULT;
2069
David Woodhousec7ab48d2009-06-26 19:10:36 +01002070 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002071
2072 for_each_active_iommu(iommu, drhd) {
2073 ret = iommu_attach_domain(si_domain, iommu);
2074 if (ret) {
2075 domain_exit(si_domain);
2076 return -EFAULT;
2077 }
2078 }
2079
2080 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2081 domain_exit(si_domain);
2082 return -EFAULT;
2083 }
2084
2085 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2086
David Woodhouse19943b02009-08-04 16:19:20 +01002087 if (hw)
2088 return 0;
2089
David Woodhousec7ab48d2009-06-26 19:10:36 +01002090 for_each_online_node(nid) {
2091 work_with_active_regions(nid, si_domain_work_fn, &ret);
2092 if (ret)
2093 return ret;
2094 }
2095
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002096 return 0;
2097}
2098
2099static void domain_remove_one_dev_info(struct dmar_domain *domain,
2100 struct pci_dev *pdev);
2101static int identity_mapping(struct pci_dev *pdev)
2102{
2103 struct device_domain_info *info;
2104
2105 if (likely(!iommu_identity_mapping))
2106 return 0;
2107
2108
2109 list_for_each_entry(info, &si_domain->devices, link)
2110 if (info->dev == pdev)
2111 return 1;
2112 return 0;
2113}
2114
2115static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002116 struct pci_dev *pdev,
2117 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002118{
2119 struct device_domain_info *info;
2120 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002121 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002122
2123 info = alloc_devinfo_mem();
2124 if (!info)
2125 return -ENOMEM;
2126
David Woodhouse5fe60f42009-08-09 10:53:41 +01002127 ret = domain_context_mapping(domain, pdev, translation);
2128 if (ret) {
2129 free_devinfo_mem(info);
2130 return ret;
2131 }
2132
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002133 info->segment = pci_domain_nr(pdev->bus);
2134 info->bus = pdev->bus->number;
2135 info->devfn = pdev->devfn;
2136 info->dev = pdev;
2137 info->domain = domain;
2138
2139 spin_lock_irqsave(&device_domain_lock, flags);
2140 list_add(&info->link, &domain->devices);
2141 list_add(&info->global, &device_domain_list);
2142 pdev->dev.archdata.iommu = info;
2143 spin_unlock_irqrestore(&device_domain_lock, flags);
2144
2145 return 0;
2146}
2147
David Woodhouse6941af22009-07-04 18:24:27 +01002148static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2149{
2150 if (iommu_identity_mapping == 2)
2151 return IS_GFX_DEVICE(pdev);
2152
David Woodhouse3dfc8132009-07-04 19:11:08 +01002153 /*
2154 * We want to start off with all devices in the 1:1 domain, and
2155 * take them out later if we find they can't access all of memory.
2156 *
2157 * However, we can't do this for PCI devices behind bridges,
2158 * because all PCI devices behind the same bridge will end up
2159 * with the same source-id on their transactions.
2160 *
2161 * Practically speaking, we can't change things around for these
2162 * devices at run-time, because we can't be sure there'll be no
2163 * DMA transactions in flight for any of their siblings.
2164 *
2165 * So PCI devices (unless they're on the root bus) as well as
2166 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2167 * the 1:1 domain, just in _case_ one of their siblings turns out
2168 * not to be able to map all of memory.
2169 */
2170 if (!pdev->is_pcie) {
2171 if (!pci_is_root_bus(pdev->bus))
2172 return 0;
2173 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2174 return 0;
2175 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2176 return 0;
2177
2178 /*
2179 * At boot time, we don't yet know if devices will be 64-bit capable.
2180 * Assume that they will -- if they turn out not to be, then we can
2181 * take them out of the 1:1 domain later.
2182 */
David Woodhouse6941af22009-07-04 18:24:27 +01002183 if (!startup)
2184 return pdev->dma_mask > DMA_BIT_MASK(32);
2185
2186 return 1;
2187}
2188
Matt Kraai071e1372009-08-23 22:30:22 -07002189static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002190{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002191 struct pci_dev *pdev = NULL;
2192 int ret;
2193
David Woodhouse19943b02009-08-04 16:19:20 +01002194 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002195 if (ret)
2196 return -EFAULT;
2197
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002198 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002199 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002200 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2201 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002202
David Woodhouse5fe60f42009-08-09 10:53:41 +01002203 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002204 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002205 CONTEXT_TT_MULTI_LEVEL);
2206 if (ret)
2207 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002208 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002209 }
2210
2211 return 0;
2212}
2213
2214int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002215{
2216 struct dmar_drhd_unit *drhd;
2217 struct dmar_rmrr_unit *rmrr;
2218 struct pci_dev *pdev;
2219 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002220 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002221
2222 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002223 * for each drhd
2224 * allocate root
2225 * initialize and program root entry to not present
2226 * endfor
2227 */
2228 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002229 g_num_of_iommus++;
2230 /*
2231 * lock not needed as this is only incremented in the single
2232 * threaded kernel __init code path all other access are read
2233 * only
2234 */
2235 }
2236
Weidong Hand9630fe2008-12-08 11:06:32 +08002237 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2238 GFP_KERNEL);
2239 if (!g_iommus) {
2240 printk(KERN_ERR "Allocating global iommu array failed\n");
2241 ret = -ENOMEM;
2242 goto error;
2243 }
2244
mark gross80b20dd2008-04-18 13:53:58 -07002245 deferred_flush = kzalloc(g_num_of_iommus *
2246 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2247 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002248 ret = -ENOMEM;
2249 goto error;
2250 }
2251
mark gross5e0d2a62008-03-04 15:22:08 -08002252 for_each_drhd_unit(drhd) {
2253 if (drhd->ignored)
2254 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002255
2256 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002257 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002258
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002259 ret = iommu_init_domains(iommu);
2260 if (ret)
2261 goto error;
2262
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002263 /*
2264 * TBD:
2265 * we could share the same root & context tables
2266 * amoung all IOMMU's. Need to Split it later.
2267 */
2268 ret = iommu_alloc_root_entry(iommu);
2269 if (ret) {
2270 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2271 goto error;
2272 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002273 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002274 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275 }
2276
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002277 /*
2278 * Start from the sane iommu hardware state.
2279 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002280 for_each_drhd_unit(drhd) {
2281 if (drhd->ignored)
2282 continue;
2283
2284 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002285
2286 /*
2287 * If the queued invalidation is already initialized by us
2288 * (for example, while enabling interrupt-remapping) then
2289 * we got the things already rolling from a sane state.
2290 */
2291 if (iommu->qi)
2292 continue;
2293
2294 /*
2295 * Clear any previous faults.
2296 */
2297 dmar_fault(-1, iommu);
2298 /*
2299 * Disable queued invalidation if supported and already enabled
2300 * before OS handover.
2301 */
2302 dmar_disable_qi(iommu);
2303 }
2304
2305 for_each_drhd_unit(drhd) {
2306 if (drhd->ignored)
2307 continue;
2308
2309 iommu = drhd->iommu;
2310
Youquan Songa77b67d2008-10-16 16:31:56 -07002311 if (dmar_enable_qi(iommu)) {
2312 /*
2313 * Queued Invalidate not enabled, use Register Based
2314 * Invalidate
2315 */
2316 iommu->flush.flush_context = __iommu_flush_context;
2317 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2318 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002319 "invalidation\n",
2320 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002321 } else {
2322 iommu->flush.flush_context = qi_flush_context;
2323 iommu->flush.flush_iotlb = qi_flush_iotlb;
2324 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002325 "invalidation\n",
2326 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002327 }
2328 }
2329
David Woodhouse19943b02009-08-04 16:19:20 +01002330 if (iommu_pass_through)
2331 iommu_identity_mapping = 1;
2332#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2333 else
2334 iommu_identity_mapping = 2;
2335#endif
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002336 /*
2337 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002338 * identity mappings for rmrr, gfx, and isa and may fall back to static
2339 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002340 */
David Woodhouse19943b02009-08-04 16:19:20 +01002341 if (iommu_identity_mapping) {
2342 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2343 if (ret) {
2344 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2345 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002346 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002347 }
David Woodhouse19943b02009-08-04 16:19:20 +01002348 /*
2349 * For each rmrr
2350 * for each dev attached to rmrr
2351 * do
2352 * locate drhd for dev, alloc domain for dev
2353 * allocate free domain
2354 * allocate page table entries for rmrr
2355 * if context not allocated for bus
2356 * allocate and init context
2357 * set present in root table for this bus
2358 * init context with domain, translation etc
2359 * endfor
2360 * endfor
2361 */
2362 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2363 for_each_rmrr_units(rmrr) {
2364 for (i = 0; i < rmrr->devices_cnt; i++) {
2365 pdev = rmrr->devices[i];
2366 /*
2367 * some BIOS lists non-exist devices in DMAR
2368 * table.
2369 */
2370 if (!pdev)
2371 continue;
2372 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2373 if (ret)
2374 printk(KERN_ERR
2375 "IOMMU: mapping reserved region failed\n");
2376 }
2377 }
2378
2379 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002380
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002381 /*
2382 * for each drhd
2383 * enable fault log
2384 * global invalidate context cache
2385 * global invalidate iotlb
2386 * enable translation
2387 */
2388 for_each_drhd_unit(drhd) {
2389 if (drhd->ignored)
2390 continue;
2391 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392
2393 iommu_flush_write_buffer(iommu);
2394
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002395 ret = dmar_set_interrupt(iommu);
2396 if (ret)
2397 goto error;
2398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 iommu_set_root_entry(iommu);
2400
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002401 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002402 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002403 iommu_disable_protect_mem_regions(iommu);
2404
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002405 ret = iommu_enable_translation(iommu);
2406 if (ret)
2407 goto error;
2408 }
2409
2410 return 0;
2411error:
2412 for_each_drhd_unit(drhd) {
2413 if (drhd->ignored)
2414 continue;
2415 iommu = drhd->iommu;
2416 free_iommu(iommu);
2417 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002418 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002419 return ret;
2420}
2421
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002422/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002423static struct iova *intel_alloc_iova(struct device *dev,
2424 struct dmar_domain *domain,
2425 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002426{
2427 struct pci_dev *pdev = to_pci_dev(dev);
2428 struct iova *iova = NULL;
2429
David Woodhouse875764d2009-06-28 21:20:51 +01002430 /* Restrict dma_mask to the width that the iommu can handle */
2431 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2432
2433 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002434 /*
2435 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002436 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002437 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002438 */
David Woodhouse875764d2009-06-28 21:20:51 +01002439 iova = alloc_iova(&domain->iovad, nrpages,
2440 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2441 if (iova)
2442 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002443 }
David Woodhouse875764d2009-06-28 21:20:51 +01002444 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2445 if (unlikely(!iova)) {
2446 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2447 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002448 return NULL;
2449 }
2450
2451 return iova;
2452}
2453
David Woodhouse147202a2009-07-07 19:43:20 +01002454static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002455{
2456 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002457 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002458
2459 domain = get_domain_for_dev(pdev,
2460 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2461 if (!domain) {
2462 printk(KERN_ERR
2463 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002464 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002465 }
2466
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002467 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002468 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002469 ret = domain_context_mapping(domain, pdev,
2470 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002471 if (ret) {
2472 printk(KERN_ERR
2473 "Domain context map for %s failed",
2474 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002475 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002476 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002477 }
2478
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002479 return domain;
2480}
2481
David Woodhouse147202a2009-07-07 19:43:20 +01002482static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2483{
2484 struct device_domain_info *info;
2485
2486 /* No lock here, assumes no domain exit in normal case */
2487 info = dev->dev.archdata.iommu;
2488 if (likely(info))
2489 return info->domain;
2490
2491 return __get_valid_domain_for_dev(dev);
2492}
2493
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494static int iommu_dummy(struct pci_dev *pdev)
2495{
2496 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2497}
2498
2499/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002500static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002501{
David Woodhouse73676832009-07-04 14:08:36 +01002502 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002503 int found;
2504
David Woodhouse73676832009-07-04 14:08:36 +01002505 if (unlikely(dev->bus != &pci_bus_type))
2506 return 1;
2507
2508 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002509 if (iommu_dummy(pdev))
2510 return 1;
2511
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002512 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002513 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002514
2515 found = identity_mapping(pdev);
2516 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002517 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002518 return 1;
2519 else {
2520 /*
2521 * 32 bit DMA is removed from si_domain and fall back
2522 * to non-identity mapping.
2523 */
2524 domain_remove_one_dev_info(si_domain, pdev);
2525 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2526 pci_name(pdev));
2527 return 0;
2528 }
2529 } else {
2530 /*
2531 * In case of a detached 64 bit DMA device from vm, the device
2532 * is put into si_domain for identity mapping.
2533 */
David Woodhouse6941af22009-07-04 18:24:27 +01002534 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002535 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002536 ret = domain_add_dev_info(si_domain, pdev,
2537 hw_pass_through ?
2538 CONTEXT_TT_PASS_THROUGH :
2539 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002540 if (!ret) {
2541 printk(KERN_INFO "64bit %s uses identity mapping\n",
2542 pci_name(pdev));
2543 return 1;
2544 }
2545 }
2546 }
2547
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002548 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002549}
2550
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002551static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2552 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002553{
2554 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002555 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002556 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002557 struct iova *iova;
2558 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002559 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002560 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002561 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002562
2563 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002564
David Woodhouse73676832009-07-04 14:08:36 +01002565 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002566 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002567
2568 domain = get_valid_domain_for_dev(pdev);
2569 if (!domain)
2570 return 0;
2571
Weidong Han8c11e792008-12-08 15:29:22 +08002572 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002573 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002574
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002575 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2576 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002577 if (!iova)
2578 goto error;
2579
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002580 /*
2581 * Check if DMAR supports zero-length reads on write only
2582 * mappings..
2583 */
2584 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002585 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002586 prot |= DMA_PTE_READ;
2587 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2588 prot |= DMA_PTE_WRITE;
2589 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002590 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002591 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002592 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002593 * is not a big problem
2594 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002595 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002596 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002597 if (ret)
2598 goto error;
2599
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002600 /* it's a non-present to present mapping. Only flush if caching mode */
2601 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002602 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002603 else
Weidong Han8c11e792008-12-08 15:29:22 +08002604 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002605
David Woodhouse03d6a242009-06-28 15:33:46 +01002606 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2607 start_paddr += paddr & ~PAGE_MASK;
2608 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002609
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002610error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002611 if (iova)
2612 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002613 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002614 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002615 return 0;
2616}
2617
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002618static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2619 unsigned long offset, size_t size,
2620 enum dma_data_direction dir,
2621 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002622{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002623 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2624 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002625}
2626
mark gross5e0d2a62008-03-04 15:22:08 -08002627static void flush_unmaps(void)
2628{
mark gross80b20dd2008-04-18 13:53:58 -07002629 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002630
mark gross5e0d2a62008-03-04 15:22:08 -08002631 timer_on = 0;
2632
2633 /* just flush them all */
2634 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002635 struct intel_iommu *iommu = g_iommus[i];
2636 if (!iommu)
2637 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002638
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002639 if (!deferred_flush[i].next)
2640 continue;
2641
2642 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002643 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002644 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002645 unsigned long mask;
2646 struct iova *iova = deferred_flush[i].iova[j];
2647
2648 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2649 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2650 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2651 iova->pfn_lo << PAGE_SHIFT, mask);
2652 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002653 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002654 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002655 }
2656
mark gross5e0d2a62008-03-04 15:22:08 -08002657 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002658}
2659
2660static void flush_unmaps_timeout(unsigned long data)
2661{
mark gross80b20dd2008-04-18 13:53:58 -07002662 unsigned long flags;
2663
2664 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002665 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002666 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002667}
2668
2669static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2670{
2671 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002672 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002673 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002674
2675 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002676 if (list_size == HIGH_WATER_MARK)
2677 flush_unmaps();
2678
Weidong Han8c11e792008-12-08 15:29:22 +08002679 iommu = domain_get_iommu(dom);
2680 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002681
mark gross80b20dd2008-04-18 13:53:58 -07002682 next = deferred_flush[iommu_id].next;
2683 deferred_flush[iommu_id].domain[next] = dom;
2684 deferred_flush[iommu_id].iova[next] = iova;
2685 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002686
2687 if (!timer_on) {
2688 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2689 timer_on = 1;
2690 }
2691 list_size++;
2692 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2693}
2694
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002695static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2696 size_t size, enum dma_data_direction dir,
2697 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002698{
2699 struct pci_dev *pdev = to_pci_dev(dev);
2700 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002701 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002702 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002703 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002704
David Woodhouse73676832009-07-04 14:08:36 +01002705 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002706 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002707
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002709 BUG_ON(!domain);
2710
Weidong Han8c11e792008-12-08 15:29:22 +08002711 iommu = domain_get_iommu(domain);
2712
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002713 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002714 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2715 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002717
David Woodhoused794dc92009-06-28 00:27:49 +01002718 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2719 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002720
David Woodhoused794dc92009-06-28 00:27:49 +01002721 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2722 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002723
2724 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002725 dma_pte_clear_range(domain, start_pfn, last_pfn);
2726
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002728 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2729
mark gross5e0d2a62008-03-04 15:22:08 -08002730 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002731 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002732 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002733 /* free iova */
2734 __free_iova(&domain->iovad, iova);
2735 } else {
2736 add_unmap(domain, iova);
2737 /*
2738 * queue up the release of the unmap to save the 1/6th of the
2739 * cpu used up by the iotlb flush operation...
2740 */
mark gross5e0d2a62008-03-04 15:22:08 -08002741 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002742}
2743
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002744static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2745 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002746{
2747 void *vaddr;
2748 int order;
2749
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002750 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002751 order = get_order(size);
2752 flags &= ~(GFP_DMA | GFP_DMA32);
2753
2754 vaddr = (void *)__get_free_pages(flags, order);
2755 if (!vaddr)
2756 return NULL;
2757 memset(vaddr, 0, size);
2758
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002759 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2760 DMA_BIDIRECTIONAL,
2761 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002762 if (*dma_handle)
2763 return vaddr;
2764 free_pages((unsigned long)vaddr, order);
2765 return NULL;
2766}
2767
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002768static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2769 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002770{
2771 int order;
2772
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002773 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002774 order = get_order(size);
2775
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002776 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002777 free_pages((unsigned long)vaddr, order);
2778}
2779
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002780static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2781 int nelems, enum dma_data_direction dir,
2782 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002783{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002784 struct pci_dev *pdev = to_pci_dev(hwdev);
2785 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002786 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002787 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002788 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002789
David Woodhouse73676832009-07-04 14:08:36 +01002790 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002791 return;
2792
2793 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002794 BUG_ON(!domain);
2795
2796 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002797
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002798 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002799 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2800 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002801 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002802
David Woodhoused794dc92009-06-28 00:27:49 +01002803 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2804 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805
2806 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002807 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808
David Woodhoused794dc92009-06-28 00:27:49 +01002809 /* free page tables */
2810 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2811
David Woodhouseacea0012009-07-14 01:55:11 +01002812 if (intel_iommu_strict) {
2813 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2814 last_pfn - start_pfn + 1);
2815 /* free iova */
2816 __free_iova(&domain->iovad, iova);
2817 } else {
2818 add_unmap(domain, iova);
2819 /*
2820 * queue up the release of the unmap to save the 1/6th of the
2821 * cpu used up by the iotlb flush operation...
2822 */
2823 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002824}
2825
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002826static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002827 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002828{
2829 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002830 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002831
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002832 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002833 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002834 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002835 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 }
2837 return nelems;
2838}
2839
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002840static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2841 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002842{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002843 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002844 struct pci_dev *pdev = to_pci_dev(hwdev);
2845 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002846 size_t size = 0;
2847 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002848 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002849 struct iova *iova = NULL;
2850 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002851 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002852 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002853 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002854
2855 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002856 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002857 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002858
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002859 domain = get_valid_domain_for_dev(pdev);
2860 if (!domain)
2861 return 0;
2862
Weidong Han8c11e792008-12-08 15:29:22 +08002863 iommu = domain_get_iommu(domain);
2864
David Woodhouseb536d242009-06-28 14:49:31 +01002865 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002866 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002867
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002868 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2869 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002870 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002871 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002872 return 0;
2873 }
2874
2875 /*
2876 * Check if DMAR supports zero-length reads on write only
2877 * mappings..
2878 */
2879 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002880 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002881 prot |= DMA_PTE_READ;
2882 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2883 prot |= DMA_PTE_WRITE;
2884
David Woodhouseb536d242009-06-28 14:49:31 +01002885 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002886
Fenghua Yuf5329592009-08-04 15:09:37 -07002887 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01002888 if (unlikely(ret)) {
2889 /* clear the page */
2890 dma_pte_clear_range(domain, start_vpfn,
2891 start_vpfn + size - 1);
2892 /* free page tables */
2893 dma_pte_free_pagetable(domain, start_vpfn,
2894 start_vpfn + size - 1);
2895 /* free iova */
2896 __free_iova(&domain->iovad, iova);
2897 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002898 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002899
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002900 /* it's a non-present to present mapping. Only flush if caching mode */
2901 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002902 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002903 else
Weidong Han8c11e792008-12-08 15:29:22 +08002904 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002905
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002906 return nelems;
2907}
2908
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002909static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2910{
2911 return !dma_addr;
2912}
2913
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002914struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002915 .alloc_coherent = intel_alloc_coherent,
2916 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002917 .map_sg = intel_map_sg,
2918 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002919 .map_page = intel_map_page,
2920 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002921 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002922};
2923
2924static inline int iommu_domain_cache_init(void)
2925{
2926 int ret = 0;
2927
2928 iommu_domain_cache = kmem_cache_create("iommu_domain",
2929 sizeof(struct dmar_domain),
2930 0,
2931 SLAB_HWCACHE_ALIGN,
2932
2933 NULL);
2934 if (!iommu_domain_cache) {
2935 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2936 ret = -ENOMEM;
2937 }
2938
2939 return ret;
2940}
2941
2942static inline int iommu_devinfo_cache_init(void)
2943{
2944 int ret = 0;
2945
2946 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2947 sizeof(struct device_domain_info),
2948 0,
2949 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002950 NULL);
2951 if (!iommu_devinfo_cache) {
2952 printk(KERN_ERR "Couldn't create devinfo cache\n");
2953 ret = -ENOMEM;
2954 }
2955
2956 return ret;
2957}
2958
2959static inline int iommu_iova_cache_init(void)
2960{
2961 int ret = 0;
2962
2963 iommu_iova_cache = kmem_cache_create("iommu_iova",
2964 sizeof(struct iova),
2965 0,
2966 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 NULL);
2968 if (!iommu_iova_cache) {
2969 printk(KERN_ERR "Couldn't create iova cache\n");
2970 ret = -ENOMEM;
2971 }
2972
2973 return ret;
2974}
2975
2976static int __init iommu_init_mempool(void)
2977{
2978 int ret;
2979 ret = iommu_iova_cache_init();
2980 if (ret)
2981 return ret;
2982
2983 ret = iommu_domain_cache_init();
2984 if (ret)
2985 goto domain_error;
2986
2987 ret = iommu_devinfo_cache_init();
2988 if (!ret)
2989 return ret;
2990
2991 kmem_cache_destroy(iommu_domain_cache);
2992domain_error:
2993 kmem_cache_destroy(iommu_iova_cache);
2994
2995 return -ENOMEM;
2996}
2997
2998static void __init iommu_exit_mempool(void)
2999{
3000 kmem_cache_destroy(iommu_devinfo_cache);
3001 kmem_cache_destroy(iommu_domain_cache);
3002 kmem_cache_destroy(iommu_iova_cache);
3003
3004}
3005
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003006static void __init init_no_remapping_devices(void)
3007{
3008 struct dmar_drhd_unit *drhd;
3009
3010 for_each_drhd_unit(drhd) {
3011 if (!drhd->include_all) {
3012 int i;
3013 for (i = 0; i < drhd->devices_cnt; i++)
3014 if (drhd->devices[i] != NULL)
3015 break;
3016 /* ignore DMAR unit if no pci devices exist */
3017 if (i == drhd->devices_cnt)
3018 drhd->ignored = 1;
3019 }
3020 }
3021
3022 if (dmar_map_gfx)
3023 return;
3024
3025 for_each_drhd_unit(drhd) {
3026 int i;
3027 if (drhd->ignored || drhd->include_all)
3028 continue;
3029
3030 for (i = 0; i < drhd->devices_cnt; i++)
3031 if (drhd->devices[i] &&
3032 !IS_GFX_DEVICE(drhd->devices[i]))
3033 break;
3034
3035 if (i < drhd->devices_cnt)
3036 continue;
3037
3038 /* bypass IOMMU if it is just for gfx devices */
3039 drhd->ignored = 1;
3040 for (i = 0; i < drhd->devices_cnt; i++) {
3041 if (!drhd->devices[i])
3042 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003043 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044 }
3045 }
3046}
3047
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003048#ifdef CONFIG_SUSPEND
3049static int init_iommu_hw(void)
3050{
3051 struct dmar_drhd_unit *drhd;
3052 struct intel_iommu *iommu = NULL;
3053
3054 for_each_active_iommu(iommu, drhd)
3055 if (iommu->qi)
3056 dmar_reenable_qi(iommu);
3057
3058 for_each_active_iommu(iommu, drhd) {
3059 iommu_flush_write_buffer(iommu);
3060
3061 iommu_set_root_entry(iommu);
3062
3063 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003064 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003065 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003066 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003067 iommu_disable_protect_mem_regions(iommu);
3068 iommu_enable_translation(iommu);
3069 }
3070
3071 return 0;
3072}
3073
3074static void iommu_flush_all(void)
3075{
3076 struct dmar_drhd_unit *drhd;
3077 struct intel_iommu *iommu;
3078
3079 for_each_active_iommu(iommu, drhd) {
3080 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003081 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003082 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003083 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003084 }
3085}
3086
3087static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3088{
3089 struct dmar_drhd_unit *drhd;
3090 struct intel_iommu *iommu = NULL;
3091 unsigned long flag;
3092
3093 for_each_active_iommu(iommu, drhd) {
3094 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3095 GFP_ATOMIC);
3096 if (!iommu->iommu_state)
3097 goto nomem;
3098 }
3099
3100 iommu_flush_all();
3101
3102 for_each_active_iommu(iommu, drhd) {
3103 iommu_disable_translation(iommu);
3104
3105 spin_lock_irqsave(&iommu->register_lock, flag);
3106
3107 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3108 readl(iommu->reg + DMAR_FECTL_REG);
3109 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3110 readl(iommu->reg + DMAR_FEDATA_REG);
3111 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3112 readl(iommu->reg + DMAR_FEADDR_REG);
3113 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3114 readl(iommu->reg + DMAR_FEUADDR_REG);
3115
3116 spin_unlock_irqrestore(&iommu->register_lock, flag);
3117 }
3118 return 0;
3119
3120nomem:
3121 for_each_active_iommu(iommu, drhd)
3122 kfree(iommu->iommu_state);
3123
3124 return -ENOMEM;
3125}
3126
3127static int iommu_resume(struct sys_device *dev)
3128{
3129 struct dmar_drhd_unit *drhd;
3130 struct intel_iommu *iommu = NULL;
3131 unsigned long flag;
3132
3133 if (init_iommu_hw()) {
3134 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3135 return -EIO;
3136 }
3137
3138 for_each_active_iommu(iommu, drhd) {
3139
3140 spin_lock_irqsave(&iommu->register_lock, flag);
3141
3142 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3143 iommu->reg + DMAR_FECTL_REG);
3144 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3145 iommu->reg + DMAR_FEDATA_REG);
3146 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3147 iommu->reg + DMAR_FEADDR_REG);
3148 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3149 iommu->reg + DMAR_FEUADDR_REG);
3150
3151 spin_unlock_irqrestore(&iommu->register_lock, flag);
3152 }
3153
3154 for_each_active_iommu(iommu, drhd)
3155 kfree(iommu->iommu_state);
3156
3157 return 0;
3158}
3159
3160static struct sysdev_class iommu_sysclass = {
3161 .name = "iommu",
3162 .resume = iommu_resume,
3163 .suspend = iommu_suspend,
3164};
3165
3166static struct sys_device device_iommu = {
3167 .cls = &iommu_sysclass,
3168};
3169
3170static int __init init_iommu_sysfs(void)
3171{
3172 int error;
3173
3174 error = sysdev_class_register(&iommu_sysclass);
3175 if (error)
3176 return error;
3177
3178 error = sysdev_register(&device_iommu);
3179 if (error)
3180 sysdev_class_unregister(&iommu_sysclass);
3181
3182 return error;
3183}
3184
3185#else
3186static int __init init_iommu_sysfs(void)
3187{
3188 return 0;
3189}
3190#endif /* CONFIG_PM */
3191
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192int __init intel_iommu_init(void)
3193{
3194 int ret = 0;
3195
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003196 if (dmar_table_init())
3197 return -ENODEV;
3198
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003199 if (dmar_dev_scope_init())
3200 return -ENODEV;
3201
Suresh Siddha2ae21012008-07-10 11:16:43 -07003202 /*
3203 * Check the need for DMA-remapping initialization now.
3204 * Above initialization will also be used by Interrupt-remapping.
3205 */
David Woodhouse19943b02009-08-04 16:19:20 +01003206 if (no_iommu || swiotlb || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003207 return -ENODEV;
3208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003209 iommu_init_mempool();
3210 dmar_init_reserved_ranges();
3211
3212 init_no_remapping_devices();
3213
3214 ret = init_dmars();
3215 if (ret) {
3216 printk(KERN_ERR "IOMMU: dmar init failed\n");
3217 put_iova_domain(&reserved_iova_list);
3218 iommu_exit_mempool();
3219 return ret;
3220 }
3221 printk(KERN_INFO
3222 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3223
mark gross5e0d2a62008-03-04 15:22:08 -08003224 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003225 force_iommu = 1;
David Woodhouse19943b02009-08-04 16:19:20 +01003226 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003227
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003228 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003229
3230 register_iommu(&intel_iommu_ops);
3231
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232 return 0;
3233}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003234
Han, Weidong3199aa62009-02-26 17:31:12 +08003235static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3236 struct pci_dev *pdev)
3237{
3238 struct pci_dev *tmp, *parent;
3239
3240 if (!iommu || !pdev)
3241 return;
3242
3243 /* dependent device detach */
3244 tmp = pci_find_upstream_pcie_bridge(pdev);
3245 /* Secondary interface's bus number and devfn 0 */
3246 if (tmp) {
3247 parent = pdev->bus->self;
3248 while (parent != tmp) {
3249 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003250 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003251 parent = parent->bus->self;
3252 }
3253 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3254 iommu_detach_dev(iommu,
3255 tmp->subordinate->number, 0);
3256 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003257 iommu_detach_dev(iommu, tmp->bus->number,
3258 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003259 }
3260}
3261
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003262static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003263 struct pci_dev *pdev)
3264{
3265 struct device_domain_info *info;
3266 struct intel_iommu *iommu;
3267 unsigned long flags;
3268 int found = 0;
3269 struct list_head *entry, *tmp;
3270
David Woodhouse276dbf92009-04-04 01:45:37 +01003271 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3272 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003273 if (!iommu)
3274 return;
3275
3276 spin_lock_irqsave(&device_domain_lock, flags);
3277 list_for_each_safe(entry, tmp, &domain->devices) {
3278 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf92009-04-04 01:45:37 +01003279 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003280 if (info->bus == pdev->bus->number &&
3281 info->devfn == pdev->devfn) {
3282 list_del(&info->link);
3283 list_del(&info->global);
3284 if (info->dev)
3285 info->dev->dev.archdata.iommu = NULL;
3286 spin_unlock_irqrestore(&device_domain_lock, flags);
3287
Yu Zhao93a23a72009-05-18 13:51:37 +08003288 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003289 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003290 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003291 free_devinfo_mem(info);
3292
3293 spin_lock_irqsave(&device_domain_lock, flags);
3294
3295 if (found)
3296 break;
3297 else
3298 continue;
3299 }
3300
3301 /* if there is no other devices under the same iommu
3302 * owned by this domain, clear this iommu in iommu_bmp
3303 * update iommu count and coherency
3304 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003305 if (iommu == device_to_iommu(info->segment, info->bus,
3306 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003307 found = 1;
3308 }
3309
3310 if (found == 0) {
3311 unsigned long tmp_flags;
3312 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3313 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3314 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003315 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003316 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3317 }
3318
3319 spin_unlock_irqrestore(&device_domain_lock, flags);
3320}
3321
3322static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3323{
3324 struct device_domain_info *info;
3325 struct intel_iommu *iommu;
3326 unsigned long flags1, flags2;
3327
3328 spin_lock_irqsave(&device_domain_lock, flags1);
3329 while (!list_empty(&domain->devices)) {
3330 info = list_entry(domain->devices.next,
3331 struct device_domain_info, link);
3332 list_del(&info->link);
3333 list_del(&info->global);
3334 if (info->dev)
3335 info->dev->dev.archdata.iommu = NULL;
3336
3337 spin_unlock_irqrestore(&device_domain_lock, flags1);
3338
Yu Zhao93a23a72009-05-18 13:51:37 +08003339 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003340 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003341 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003342 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003343
3344 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003345 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003346 */
3347 spin_lock_irqsave(&domain->iommu_lock, flags2);
3348 if (test_and_clear_bit(iommu->seq_id,
3349 &domain->iommu_bmp)) {
3350 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003351 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003352 }
3353 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3354
3355 free_devinfo_mem(info);
3356 spin_lock_irqsave(&device_domain_lock, flags1);
3357 }
3358 spin_unlock_irqrestore(&device_domain_lock, flags1);
3359}
3360
Weidong Han5e98c4b2008-12-08 23:03:27 +08003361/* domain id for virtual machine, it won't be set in context */
3362static unsigned long vm_domid;
3363
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003364static int vm_domain_min_agaw(struct dmar_domain *domain)
3365{
3366 int i;
3367 int min_agaw = domain->agaw;
3368
3369 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3370 for (; i < g_num_of_iommus; ) {
3371 if (min_agaw > g_iommus[i]->agaw)
3372 min_agaw = g_iommus[i]->agaw;
3373
3374 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3375 }
3376
3377 return min_agaw;
3378}
3379
Weidong Han5e98c4b2008-12-08 23:03:27 +08003380static struct dmar_domain *iommu_alloc_vm_domain(void)
3381{
3382 struct dmar_domain *domain;
3383
3384 domain = alloc_domain_mem();
3385 if (!domain)
3386 return NULL;
3387
3388 domain->id = vm_domid++;
3389 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3390 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3391
3392 return domain;
3393}
3394
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003395static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003396{
3397 int adjust_width;
3398
3399 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003400 spin_lock_init(&domain->iommu_lock);
3401
3402 domain_reserve_special_ranges(domain);
3403
3404 /* calculate AGAW */
3405 domain->gaw = guest_width;
3406 adjust_width = guestwidth_to_adjustwidth(guest_width);
3407 domain->agaw = width_to_agaw(adjust_width);
3408
3409 INIT_LIST_HEAD(&domain->devices);
3410
3411 domain->iommu_count = 0;
3412 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003413 domain->iommu_snooping = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003414 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003415
3416 /* always allocate the top pgd */
3417 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3418 if (!domain->pgd)
3419 return -ENOMEM;
3420 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3421 return 0;
3422}
3423
3424static void iommu_free_vm_domain(struct dmar_domain *domain)
3425{
3426 unsigned long flags;
3427 struct dmar_drhd_unit *drhd;
3428 struct intel_iommu *iommu;
3429 unsigned long i;
3430 unsigned long ndomains;
3431
3432 for_each_drhd_unit(drhd) {
3433 if (drhd->ignored)
3434 continue;
3435 iommu = drhd->iommu;
3436
3437 ndomains = cap_ndoms(iommu->cap);
3438 i = find_first_bit(iommu->domain_ids, ndomains);
3439 for (; i < ndomains; ) {
3440 if (iommu->domains[i] == domain) {
3441 spin_lock_irqsave(&iommu->lock, flags);
3442 clear_bit(i, iommu->domain_ids);
3443 iommu->domains[i] = NULL;
3444 spin_unlock_irqrestore(&iommu->lock, flags);
3445 break;
3446 }
3447 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3448 }
3449 }
3450}
3451
3452static void vm_domain_exit(struct dmar_domain *domain)
3453{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003454 /* Domain 0 is reserved, so dont process it */
3455 if (!domain)
3456 return;
3457
3458 vm_domain_remove_all_dev_info(domain);
3459 /* destroy iovas */
3460 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003461
3462 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003463 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003464
3465 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003466 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003467
3468 iommu_free_vm_domain(domain);
3469 free_domain_mem(domain);
3470}
3471
Joerg Roedel5d450802008-12-03 14:52:32 +01003472static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003473{
Joerg Roedel5d450802008-12-03 14:52:32 +01003474 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003475
Joerg Roedel5d450802008-12-03 14:52:32 +01003476 dmar_domain = iommu_alloc_vm_domain();
3477 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003478 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003479 "intel_iommu_domain_init: dmar_domain == NULL\n");
3480 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003481 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003482 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003483 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003484 "intel_iommu_domain_init() failed\n");
3485 vm_domain_exit(dmar_domain);
3486 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003487 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003488 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003489
Joerg Roedel5d450802008-12-03 14:52:32 +01003490 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003491}
Kay, Allen M38717942008-09-09 18:37:29 +03003492
Joerg Roedel5d450802008-12-03 14:52:32 +01003493static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003494{
Joerg Roedel5d450802008-12-03 14:52:32 +01003495 struct dmar_domain *dmar_domain = domain->priv;
3496
3497 domain->priv = NULL;
3498 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003499}
Kay, Allen M38717942008-09-09 18:37:29 +03003500
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003501static int intel_iommu_attach_device(struct iommu_domain *domain,
3502 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003503{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003504 struct dmar_domain *dmar_domain = domain->priv;
3505 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003506 struct intel_iommu *iommu;
3507 int addr_width;
3508 u64 end;
Kay, Allen M38717942008-09-09 18:37:29 +03003509
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003510 /* normally pdev is not mapped */
3511 if (unlikely(domain_context_mapped(pdev))) {
3512 struct dmar_domain *old_domain;
3513
3514 old_domain = find_domain(pdev);
3515 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003516 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3517 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3518 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003519 else
3520 domain_remove_dev_info(old_domain);
3521 }
3522 }
3523
David Woodhouse276dbf92009-04-04 01:45:37 +01003524 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3525 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003526 if (!iommu)
3527 return -ENODEV;
3528
3529 /* check if this iommu agaw is sufficient for max mapped address */
3530 addr_width = agaw_to_width(iommu->agaw);
3531 end = DOMAIN_MAX_ADDR(addr_width);
3532 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003533 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003534 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3535 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003536 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003537 return -EFAULT;
3538 }
3539
David Woodhouse5fe60f42009-08-09 10:53:41 +01003540 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003541}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003542
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003543static void intel_iommu_detach_device(struct iommu_domain *domain,
3544 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003545{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003546 struct dmar_domain *dmar_domain = domain->priv;
3547 struct pci_dev *pdev = to_pci_dev(dev);
3548
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003549 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003550}
Kay, Allen M38717942008-09-09 18:37:29 +03003551
Joerg Roedeldde57a22008-12-03 15:04:09 +01003552static int intel_iommu_map_range(struct iommu_domain *domain,
3553 unsigned long iova, phys_addr_t hpa,
3554 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003555{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003556 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003557 u64 max_addr;
3558 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003559 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003560 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003561
Joerg Roedeldde57a22008-12-03 15:04:09 +01003562 if (iommu_prot & IOMMU_READ)
3563 prot |= DMA_PTE_READ;
3564 if (iommu_prot & IOMMU_WRITE)
3565 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003566 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3567 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003568
David Woodhouse163cc522009-06-28 00:51:17 +01003569 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003570 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003571 int min_agaw;
3572 u64 end;
3573
3574 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003575 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003576 addr_width = agaw_to_width(min_agaw);
3577 end = DOMAIN_MAX_ADDR(addr_width);
3578 end = end & VTD_PAGE_MASK;
3579 if (end < max_addr) {
3580 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3581 "sufficient for the mapped address (%llx)\n",
3582 __func__, min_agaw, max_addr);
3583 return -EFAULT;
3584 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003585 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003586 }
David Woodhousead051222009-06-28 14:22:28 +01003587 /* Round up size to next multiple of PAGE_SIZE, if it and
3588 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003589 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003590 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3591 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003592 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003593}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003594
Joerg Roedeldde57a22008-12-03 15:04:09 +01003595static void intel_iommu_unmap_range(struct iommu_domain *domain,
3596 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003597{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003598 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003599
Sheng Yang4b99d352009-07-08 11:52:52 +01003600 if (!size)
3601 return;
3602
David Woodhouse163cc522009-06-28 00:51:17 +01003603 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3604 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003605
David Woodhouse163cc522009-06-28 00:51:17 +01003606 if (dmar_domain->max_addr == iova + size)
3607 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003608}
Kay, Allen M38717942008-09-09 18:37:29 +03003609
Joerg Roedeld14d6572008-12-03 15:06:57 +01003610static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3611 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003612{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003613 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003614 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003615 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003616
David Woodhouseb026fd22009-06-28 10:37:25 +01003617 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003618 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003619 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003620
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003621 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003622}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003623
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003624static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3625 unsigned long cap)
3626{
3627 struct dmar_domain *dmar_domain = domain->priv;
3628
3629 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3630 return dmar_domain->iommu_snooping;
3631
3632 return 0;
3633}
3634
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003635static struct iommu_ops intel_iommu_ops = {
3636 .domain_init = intel_iommu_domain_init,
3637 .domain_destroy = intel_iommu_domain_destroy,
3638 .attach_dev = intel_iommu_attach_device,
3639 .detach_dev = intel_iommu_detach_device,
3640 .map = intel_iommu_map_range,
3641 .unmap = intel_iommu_unmap_range,
3642 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003643 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003644};
David Woodhouse9af88142009-02-13 23:18:03 +00003645
3646static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3647{
3648 /*
3649 * Mobile 4 Series Chipset neglects to set RWBF capability,
3650 * but needs it:
3651 */
3652 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3653 rwbf_quirk = 1;
3654}
3655
3656DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);