blob: 562221e119172ffe19655b5ff6b5792098d89e48 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Shane Wang69575d32009-09-01 18:25:07 -070040#include <linux/tboot.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070041#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090042#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070043#include "pci.h"
44
Fenghua Yu5b6985c2008-10-16 18:02:32 -070045#define ROOT_SIZE VTD_PAGE_SIZE
46#define CONTEXT_SIZE VTD_PAGE_SIZE
47
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50
51#define IOAPIC_RANGE_START (0xfee00000)
52#define IOAPIC_RANGE_END (0xfeefffff)
53#define IOVA_START_ADDR (0x1000)
54
55#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070057#define MAX_AGAW_WIDTH 64
58
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070059#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010060#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070061
Mark McLoughlinf27be032008-11-20 15:49:43 +000062#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070063#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070064#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080065
David Woodhousefd18de52009-05-10 23:57:41 +010066
David Woodhousedd4e8312009-06-27 16:21:20 +010067/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
68 are never going to work. */
69static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
70{
71 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
72}
73
74static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
75{
76 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
77}
78static inline unsigned long page_to_dma_pfn(struct page *pg)
79{
80 return mm_to_dma_pfn(page_to_pfn(pg));
81}
82static inline unsigned long virt_to_dma_pfn(void *p)
83{
84 return page_to_dma_pfn(virt_to_page(p));
85}
86
Weidong Hand9630fe2008-12-08 11:06:32 +080087/* global iommu list, set NULL for ignored DMAR units */
88static struct intel_iommu **g_iommus;
89
David Woodhouse9af88142009-02-13 23:18:03 +000090static int rwbf_quirk;
91
Mark McLoughlin46b08e12008-11-20 15:49:44 +000092/*
93 * 0: Present
94 * 1-11: Reserved
95 * 12-63: Context Ptr (12 - (haw-1))
96 * 64-127: Reserved
97 */
98struct root_entry {
99 u64 val;
100 u64 rsvd1;
101};
102#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
103static inline bool root_present(struct root_entry *root)
104{
105 return (root->val & 1);
106}
107static inline void set_root_present(struct root_entry *root)
108{
109 root->val |= 1;
110}
111static inline void set_root_value(struct root_entry *root, unsigned long value)
112{
113 root->val |= value & VTD_PAGE_MASK;
114}
115
116static inline struct context_entry *
117get_context_addr_from_root(struct root_entry *root)
118{
119 return (struct context_entry *)
120 (root_present(root)?phys_to_virt(
121 root->val & VTD_PAGE_MASK) :
122 NULL);
123}
124
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000125/*
126 * low 64 bits:
127 * 0: present
128 * 1: fault processing disable
129 * 2-3: translation type
130 * 12-63: address space root
131 * high 64 bits:
132 * 0-2: address width
133 * 3-6: aval
134 * 8-23: domain id
135 */
136struct context_entry {
137 u64 lo;
138 u64 hi;
139};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000140
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000141static inline bool context_present(struct context_entry *context)
142{
143 return (context->lo & 1);
144}
145static inline void context_set_present(struct context_entry *context)
146{
147 context->lo |= 1;
148}
149
150static inline void context_set_fault_enable(struct context_entry *context)
151{
152 context->lo &= (((u64)-1) << 2) | 1;
153}
154
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000155static inline void context_set_translation_type(struct context_entry *context,
156 unsigned long value)
157{
158 context->lo &= (((u64)-1) << 4) | 3;
159 context->lo |= (value & 3) << 2;
160}
161
162static inline void context_set_address_root(struct context_entry *context,
163 unsigned long value)
164{
165 context->lo |= value & VTD_PAGE_MASK;
166}
167
168static inline void context_set_address_width(struct context_entry *context,
169 unsigned long value)
170{
171 context->hi |= value & 7;
172}
173
174static inline void context_set_domain_id(struct context_entry *context,
175 unsigned long value)
176{
177 context->hi |= (value & ((1 << 16) - 1)) << 8;
178}
179
180static inline void context_clear_entry(struct context_entry *context)
181{
182 context->lo = 0;
183 context->hi = 0;
184}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000185
Mark McLoughlin622ba122008-11-20 15:49:46 +0000186/*
187 * 0: readable
188 * 1: writable
189 * 2-6: reserved
190 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800191 * 8-10: available
192 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000193 * 12-63: Host physcial address
194 */
195struct dma_pte {
196 u64 val;
197};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000198
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000199static inline void dma_clear_pte(struct dma_pte *pte)
200{
201 pte->val = 0;
202}
203
204static inline void dma_set_pte_readable(struct dma_pte *pte)
205{
206 pte->val |= DMA_PTE_READ;
207}
208
209static inline void dma_set_pte_writable(struct dma_pte *pte)
210{
211 pte->val |= DMA_PTE_WRITE;
212}
213
Sheng Yang9cf06692009-03-18 15:33:07 +0800214static inline void dma_set_pte_snp(struct dma_pte *pte)
215{
216 pte->val |= DMA_PTE_SNP;
217}
218
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000219static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
220{
221 pte->val = (pte->val & ~3) | (prot & 3);
222}
223
224static inline u64 dma_pte_addr(struct dma_pte *pte)
225{
David Woodhousec85994e2009-07-01 19:21:24 +0100226#ifdef CONFIG_64BIT
227 return pte->val & VTD_PAGE_MASK;
228#else
229 /* Must have a full atomic 64-bit read */
230 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
231#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000232}
233
David Woodhousedd4e8312009-06-27 16:21:20 +0100234static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000235{
David Woodhousedd4e8312009-06-27 16:21:20 +0100236 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000237}
238
239static inline bool dma_pte_present(struct dma_pte *pte)
240{
241 return (pte->val & 3) != 0;
242}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000243
David Woodhouse75e6bf92009-07-02 11:21:16 +0100244static inline int first_pte_in_page(struct dma_pte *pte)
245{
246 return !((unsigned long)pte & ~VTD_PAGE_MASK);
247}
248
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700249/*
250 * This domain is a statically identity mapping domain.
251 * 1. This domain creats a static 1:1 mapping to all usable memory.
252 * 2. It maps to each iommu if successful.
253 * 3. Each iommu mapps to this domain if successful.
254 */
255struct dmar_domain *si_domain;
256
Weidong Han3b5410e2008-12-08 09:17:15 +0800257/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100258#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800259
Weidong Han1ce28fe2008-12-08 16:35:39 +0800260/* domain represents a virtual machine, more than one devices
261 * across iommus may be owned in one domain, e.g. kvm guest.
262 */
263#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
264
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700265/* si_domain contains mulitple devices */
266#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
267
Mark McLoughlin99126f72008-11-20 15:49:47 +0000268struct dmar_domain {
269 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800270 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000271
272 struct list_head devices; /* all devices' list */
273 struct iova_domain iovad; /* iova's that belong to this domain */
274
275 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000276 int gaw; /* max guest address width */
277
278 /* adjusted guest address width, 0 is level 2 30-bit */
279 int agaw;
280
Weidong Han3b5410e2008-12-08 09:17:15 +0800281 int flags; /* flags to find out type of domain */
Weidong Han8e604092008-12-08 15:49:06 +0800282
283 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800284 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800285 int iommu_count; /* reference count of iommu */
286 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800287 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000288};
289
Mark McLoughlina647dac2008-11-20 15:49:48 +0000290/* PCI domain-device relationship */
291struct device_domain_info {
292 struct list_head link; /* link to domain siblings */
293 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100294 int segment; /* PCI domain */
295 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000296 u8 devfn; /* PCI devfn number */
297 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800298 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000299 struct dmar_domain *domain; /* pointer to domain */
300};
301
mark gross5e0d2a62008-03-04 15:22:08 -0800302static void flush_unmaps_timeout(unsigned long data);
303
304DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
305
mark gross80b20dd2008-04-18 13:53:58 -0700306#define HIGH_WATER_MARK 250
307struct deferred_flush_tables {
308 int next;
309 struct iova *iova[HIGH_WATER_MARK];
310 struct dmar_domain *domain[HIGH_WATER_MARK];
311};
312
313static struct deferred_flush_tables *deferred_flush;
314
mark gross5e0d2a62008-03-04 15:22:08 -0800315/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800316static int g_num_of_iommus;
317
318static DEFINE_SPINLOCK(async_umap_flush_lock);
319static LIST_HEAD(unmaps_to_do);
320
321static int timer_on;
322static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800323
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700324static void domain_remove_dev_info(struct dmar_domain *domain);
325
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800326#ifdef CONFIG_DMAR_DEFAULT_ON
327int dmar_disabled = 0;
328#else
329int dmar_disabled = 1;
330#endif /*CONFIG_DMAR_DEFAULT_ON*/
331
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700332static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700333static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800334static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700335
336#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
337static DEFINE_SPINLOCK(device_domain_lock);
338static LIST_HEAD(device_domain_list);
339
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100340static struct iommu_ops intel_iommu_ops;
341
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700342static int __init intel_iommu_setup(char *str)
343{
344 if (!str)
345 return -EINVAL;
346 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800347 if (!strncmp(str, "on", 2)) {
348 dmar_disabled = 0;
349 printk(KERN_INFO "Intel-IOMMU: enabled\n");
350 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700351 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800352 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700353 } else if (!strncmp(str, "igfx_off", 8)) {
354 dmar_map_gfx = 0;
355 printk(KERN_INFO
356 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700357 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800358 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700359 "Intel-IOMMU: Forcing DAC for PCI devices\n");
360 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800361 } else if (!strncmp(str, "strict", 6)) {
362 printk(KERN_INFO
363 "Intel-IOMMU: disable batched IOTLB flush\n");
364 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700365 }
366
367 str += strcspn(str, ",");
368 while (*str == ',')
369 str++;
370 }
371 return 0;
372}
373__setup("intel_iommu=", intel_iommu_setup);
374
375static struct kmem_cache *iommu_domain_cache;
376static struct kmem_cache *iommu_devinfo_cache;
377static struct kmem_cache *iommu_iova_cache;
378
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700379static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
380{
381 unsigned int flags;
382 void *vaddr;
383
384 /* trying to avoid low memory issues */
385 flags = current->flags & PF_MEMALLOC;
386 current->flags |= PF_MEMALLOC;
387 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
388 current->flags &= (~PF_MEMALLOC | flags);
389 return vaddr;
390}
391
392
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700393static inline void *alloc_pgtable_page(void)
394{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700395 unsigned int flags;
396 void *vaddr;
397
398 /* trying to avoid low memory issues */
399 flags = current->flags & PF_MEMALLOC;
400 current->flags |= PF_MEMALLOC;
401 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
402 current->flags &= (~PF_MEMALLOC | flags);
403 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700404}
405
406static inline void free_pgtable_page(void *vaddr)
407{
408 free_page((unsigned long)vaddr);
409}
410
411static inline void *alloc_domain_mem(void)
412{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700413 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700414}
415
Kay, Allen M38717942008-09-09 18:37:29 +0300416static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700417{
418 kmem_cache_free(iommu_domain_cache, vaddr);
419}
420
421static inline void * alloc_devinfo_mem(void)
422{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700423 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424}
425
426static inline void free_devinfo_mem(void *vaddr)
427{
428 kmem_cache_free(iommu_devinfo_cache, vaddr);
429}
430
431struct iova *alloc_iova_mem(void)
432{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700433 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700434}
435
436void free_iova_mem(struct iova *iova)
437{
438 kmem_cache_free(iommu_iova_cache, iova);
439}
440
Weidong Han1b573682008-12-08 15:34:06 +0800441
442static inline int width_to_agaw(int width);
443
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700444static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800445{
446 unsigned long sagaw;
447 int agaw = -1;
448
449 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700450 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800451 agaw >= 0; agaw--) {
452 if (test_bit(agaw, &sagaw))
453 break;
454 }
455
456 return agaw;
457}
458
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700459/*
460 * Calculate max SAGAW for each iommu.
461 */
462int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
463{
464 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
465}
466
467/*
468 * calculate agaw for each iommu.
469 * "SAGAW" may be different across iommus, use a default agaw, and
470 * get a supported less agaw for iommus that don't support the default agaw.
471 */
472int iommu_calculate_agaw(struct intel_iommu *iommu)
473{
474 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
475}
476
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700477/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800478static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
479{
480 int iommu_id;
481
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700482 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800483 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700484 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800485
Weidong Han8c11e792008-12-08 15:29:22 +0800486 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
487 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
488 return NULL;
489
490 return g_iommus[iommu_id];
491}
492
Weidong Han8e604092008-12-08 15:49:06 +0800493static void domain_update_iommu_coherency(struct dmar_domain *domain)
494{
495 int i;
496
497 domain->iommu_coherency = 1;
498
499 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
500 for (; i < g_num_of_iommus; ) {
501 if (!ecap_coherent(g_iommus[i]->ecap)) {
502 domain->iommu_coherency = 0;
503 break;
504 }
505 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
506 }
507}
508
Sheng Yang58c610b2009-03-18 15:33:05 +0800509static void domain_update_iommu_snooping(struct dmar_domain *domain)
510{
511 int i;
512
513 domain->iommu_snooping = 1;
514
515 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
516 for (; i < g_num_of_iommus; ) {
517 if (!ecap_sc_support(g_iommus[i]->ecap)) {
518 domain->iommu_snooping = 0;
519 break;
520 }
521 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
522 }
523}
524
525/* Some capabilities may be different across iommus */
526static void domain_update_iommu_cap(struct dmar_domain *domain)
527{
528 domain_update_iommu_coherency(domain);
529 domain_update_iommu_snooping(domain);
530}
531
David Woodhouse276dbf92009-04-04 01:45:37 +0100532static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800533{
534 struct dmar_drhd_unit *drhd = NULL;
535 int i;
536
537 for_each_drhd_unit(drhd) {
538 if (drhd->ignored)
539 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100540 if (segment != drhd->segment)
541 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800542
David Woodhouse924b6232009-04-04 00:39:25 +0100543 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000544 if (drhd->devices[i] &&
545 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800546 drhd->devices[i]->devfn == devfn)
547 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700548 if (drhd->devices[i] &&
549 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100550 drhd->devices[i]->subordinate->number <= bus &&
551 drhd->devices[i]->subordinate->subordinate >= bus)
552 return drhd->iommu;
553 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800554
555 if (drhd->include_all)
556 return drhd->iommu;
557 }
558
559 return NULL;
560}
561
Weidong Han5331fe62008-12-08 23:00:00 +0800562static void domain_flush_cache(struct dmar_domain *domain,
563 void *addr, int size)
564{
565 if (!domain->iommu_coherency)
566 clflush_cache_range(addr, size);
567}
568
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700569/* Gets context entry for a given bus and devfn */
570static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
571 u8 bus, u8 devfn)
572{
573 struct root_entry *root;
574 struct context_entry *context;
575 unsigned long phy_addr;
576 unsigned long flags;
577
578 spin_lock_irqsave(&iommu->lock, flags);
579 root = &iommu->root_entry[bus];
580 context = get_context_addr_from_root(root);
581 if (!context) {
582 context = (struct context_entry *)alloc_pgtable_page();
583 if (!context) {
584 spin_unlock_irqrestore(&iommu->lock, flags);
585 return NULL;
586 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700587 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700588 phy_addr = virt_to_phys((void *)context);
589 set_root_value(root, phy_addr);
590 set_root_present(root);
591 __iommu_flush_cache(iommu, root, sizeof(*root));
592 }
593 spin_unlock_irqrestore(&iommu->lock, flags);
594 return &context[devfn];
595}
596
597static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
598{
599 struct root_entry *root;
600 struct context_entry *context;
601 int ret;
602 unsigned long flags;
603
604 spin_lock_irqsave(&iommu->lock, flags);
605 root = &iommu->root_entry[bus];
606 context = get_context_addr_from_root(root);
607 if (!context) {
608 ret = 0;
609 goto out;
610 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000611 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700612out:
613 spin_unlock_irqrestore(&iommu->lock, flags);
614 return ret;
615}
616
617static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
618{
619 struct root_entry *root;
620 struct context_entry *context;
621 unsigned long flags;
622
623 spin_lock_irqsave(&iommu->lock, flags);
624 root = &iommu->root_entry[bus];
625 context = get_context_addr_from_root(root);
626 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000627 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700628 __iommu_flush_cache(iommu, &context[devfn], \
629 sizeof(*context));
630 }
631 spin_unlock_irqrestore(&iommu->lock, flags);
632}
633
634static void free_context_table(struct intel_iommu *iommu)
635{
636 struct root_entry *root;
637 int i;
638 unsigned long flags;
639 struct context_entry *context;
640
641 spin_lock_irqsave(&iommu->lock, flags);
642 if (!iommu->root_entry) {
643 goto out;
644 }
645 for (i = 0; i < ROOT_ENTRY_NR; i++) {
646 root = &iommu->root_entry[i];
647 context = get_context_addr_from_root(root);
648 if (context)
649 free_pgtable_page(context);
650 }
651 free_pgtable_page(iommu->root_entry);
652 iommu->root_entry = NULL;
653out:
654 spin_unlock_irqrestore(&iommu->lock, flags);
655}
656
657/* page table handling */
658#define LEVEL_STRIDE (9)
659#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
660
661static inline int agaw_to_level(int agaw)
662{
663 return agaw + 2;
664}
665
666static inline int agaw_to_width(int agaw)
667{
668 return 30 + agaw * LEVEL_STRIDE;
669
670}
671
672static inline int width_to_agaw(int width)
673{
674 return (width - 30) / LEVEL_STRIDE;
675}
676
677static inline unsigned int level_to_offset_bits(int level)
678{
David Woodhouse6660c632009-06-27 22:41:00 +0100679 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700680}
681
David Woodhouse77dfa562009-06-27 16:40:08 +0100682static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683{
David Woodhouse6660c632009-06-27 22:41:00 +0100684 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700685}
686
David Woodhouse6660c632009-06-27 22:41:00 +0100687static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688{
David Woodhouse6660c632009-06-27 22:41:00 +0100689 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690}
691
David Woodhouse6660c632009-06-27 22:41:00 +0100692static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700693{
David Woodhouse6660c632009-06-27 22:41:00 +0100694 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700695}
696
David Woodhouse6660c632009-06-27 22:41:00 +0100697static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698{
David Woodhouse6660c632009-06-27 22:41:00 +0100699 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700700}
701
David Woodhouseb026fd22009-06-28 10:37:25 +0100702static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
703 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700704{
David Woodhouseb026fd22009-06-28 10:37:25 +0100705 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 struct dma_pte *parent, *pte = NULL;
707 int level = agaw_to_level(domain->agaw);
708 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700709
710 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100711 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700712 parent = domain->pgd;
713
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700714 while (level > 0) {
715 void *tmp_page;
716
David Woodhouseb026fd22009-06-28 10:37:25 +0100717 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700718 pte = &parent[offset];
719 if (level == 1)
720 break;
721
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000722 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100723 uint64_t pteval;
724
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700725 tmp_page = alloc_pgtable_page();
726
David Woodhouse206a73c2009-07-01 19:30:28 +0100727 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700728 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100729
David Woodhousec85994e2009-07-01 19:21:24 +0100730 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
731 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
732 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
733 /* Someone else set it while we were thinking; use theirs. */
734 free_pgtable_page(tmp_page);
735 } else {
736 dma_pte_addr(pte);
737 domain_flush_cache(domain, pte, sizeof(*pte));
738 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700739 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000740 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700741 level--;
742 }
743
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700744 return pte;
745}
746
747/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100748static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
749 unsigned long pfn,
750 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700751{
752 struct dma_pte *parent, *pte = NULL;
753 int total = agaw_to_level(domain->agaw);
754 int offset;
755
756 parent = domain->pgd;
757 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100758 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700759 pte = &parent[offset];
760 if (level == total)
761 return pte;
762
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000763 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000765 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766 total--;
767 }
768 return NULL;
769}
770
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100772static void dma_pte_clear_range(struct dmar_domain *domain,
773 unsigned long start_pfn,
774 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700775{
David Woodhouse04b18e62009-06-27 19:15:01 +0100776 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100777 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778
David Woodhouse04b18e62009-06-27 19:15:01 +0100779 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100780 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100781
David Woodhouse04b18e62009-06-27 19:15:01 +0100782 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100783 while (start_pfn <= last_pfn) {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100784 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
785 if (!pte) {
786 start_pfn = align_to_level(start_pfn + 1, 2);
787 continue;
788 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100789 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100790 dma_clear_pte(pte);
791 start_pfn++;
792 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100793 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
794
David Woodhouse310a5ab2009-06-28 18:52:20 +0100795 domain_flush_cache(domain, first_pte,
796 (void *)pte - (void *)first_pte);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700797 }
798}
799
800/* free page table pages. last level pte should already be cleared */
801static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100802 unsigned long start_pfn,
803 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804{
David Woodhouse6660c632009-06-27 22:41:00 +0100805 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100806 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 int total = agaw_to_level(domain->agaw);
808 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100809 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810
David Woodhouse6660c632009-06-27 22:41:00 +0100811 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
812 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700813
David Woodhousef3a0a522009-06-30 03:40:07 +0100814 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815 level = 2;
816 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100817 tmp = align_to_level(start_pfn, level);
818
David Woodhousef3a0a522009-06-30 03:40:07 +0100819 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100820 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821 return;
822
David Woodhouse3d7b0e42009-06-30 03:38:09 +0100823 while (tmp + level_size(level) - 1 <= last_pfn) {
David Woodhousef3a0a522009-06-30 03:40:07 +0100824 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
825 if (!pte) {
826 tmp = align_to_level(tmp + 1, level + 1);
827 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100829 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100830 if (dma_pte_present(pte)) {
831 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
832 dma_clear_pte(pte);
833 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100834 pte++;
835 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100836 } while (!first_pte_in_page(pte) &&
837 tmp + level_size(level) - 1 <= last_pfn);
838
David Woodhousef3a0a522009-06-30 03:40:07 +0100839 domain_flush_cache(domain, first_pte,
840 (void *)pte - (void *)first_pte);
841
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 }
843 level++;
844 }
845 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100846 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700847 free_pgtable_page(domain->pgd);
848 domain->pgd = NULL;
849 }
850}
851
852/* iommu handling */
853static int iommu_alloc_root_entry(struct intel_iommu *iommu)
854{
855 struct root_entry *root;
856 unsigned long flags;
857
858 root = (struct root_entry *)alloc_pgtable_page();
859 if (!root)
860 return -ENOMEM;
861
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700862 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863
864 spin_lock_irqsave(&iommu->lock, flags);
865 iommu->root_entry = root;
866 spin_unlock_irqrestore(&iommu->lock, flags);
867
868 return 0;
869}
870
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871static void iommu_set_root_entry(struct intel_iommu *iommu)
872{
873 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100874 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875 unsigned long flag;
876
877 addr = iommu->root_entry;
878
879 spin_lock_irqsave(&iommu->register_lock, flag);
880 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
881
David Woodhousec416daa2009-05-10 20:30:58 +0100882 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883
884 /* Make sure hardware complete it */
885 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100886 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887
888 spin_unlock_irqrestore(&iommu->register_lock, flag);
889}
890
891static void iommu_flush_write_buffer(struct intel_iommu *iommu)
892{
893 u32 val;
894 unsigned long flag;
895
David Woodhouse9af88142009-02-13 23:18:03 +0000896 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898
899 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100900 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901
902 /* Make sure hardware complete it */
903 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100904 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905
906 spin_unlock_irqrestore(&iommu->register_lock, flag);
907}
908
909/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100910static void __iommu_flush_context(struct intel_iommu *iommu,
911 u16 did, u16 source_id, u8 function_mask,
912 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700913{
914 u64 val = 0;
915 unsigned long flag;
916
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917 switch (type) {
918 case DMA_CCMD_GLOBAL_INVL:
919 val = DMA_CCMD_GLOBAL_INVL;
920 break;
921 case DMA_CCMD_DOMAIN_INVL:
922 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
923 break;
924 case DMA_CCMD_DEVICE_INVL:
925 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
926 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
927 break;
928 default:
929 BUG();
930 }
931 val |= DMA_CCMD_ICC;
932
933 spin_lock_irqsave(&iommu->register_lock, flag);
934 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
935
936 /* Make sure hardware complete it */
937 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
938 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
939
940 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941}
942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100944static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
945 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700946{
947 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
948 u64 val = 0, val_iva = 0;
949 unsigned long flag;
950
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700951 switch (type) {
952 case DMA_TLB_GLOBAL_FLUSH:
953 /* global flush doesn't need set IVA_REG */
954 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
955 break;
956 case DMA_TLB_DSI_FLUSH:
957 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
958 break;
959 case DMA_TLB_PSI_FLUSH:
960 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
961 /* Note: always flush non-leaf currently */
962 val_iva = size_order | addr;
963 break;
964 default:
965 BUG();
966 }
967 /* Note: set drain read/write */
968#if 0
969 /*
970 * This is probably to be super secure.. Looks like we can
971 * ignore it without any impact.
972 */
973 if (cap_read_drain(iommu->cap))
974 val |= DMA_TLB_READ_DRAIN;
975#endif
976 if (cap_write_drain(iommu->cap))
977 val |= DMA_TLB_WRITE_DRAIN;
978
979 spin_lock_irqsave(&iommu->register_lock, flag);
980 /* Note: Only uses first TLB reg currently */
981 if (val_iva)
982 dmar_writeq(iommu->reg + tlb_offset, val_iva);
983 dmar_writeq(iommu->reg + tlb_offset + 8, val);
984
985 /* Make sure hardware complete it */
986 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
987 dmar_readq, (!(val & DMA_TLB_IVT)), val);
988
989 spin_unlock_irqrestore(&iommu->register_lock, flag);
990
991 /* check IOTLB invalidation granularity */
992 if (DMA_TLB_IAIG(val) == 0)
993 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
994 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
995 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700996 (unsigned long long)DMA_TLB_IIRG(type),
997 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998}
999
Yu Zhao93a23a72009-05-18 13:51:37 +08001000static struct device_domain_info *iommu_support_dev_iotlb(
1001 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002{
Yu Zhao93a23a72009-05-18 13:51:37 +08001003 int found = 0;
1004 unsigned long flags;
1005 struct device_domain_info *info;
1006 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1007
1008 if (!ecap_dev_iotlb_support(iommu->ecap))
1009 return NULL;
1010
1011 if (!iommu->qi)
1012 return NULL;
1013
1014 spin_lock_irqsave(&device_domain_lock, flags);
1015 list_for_each_entry(info, &domain->devices, link)
1016 if (info->bus == bus && info->devfn == devfn) {
1017 found = 1;
1018 break;
1019 }
1020 spin_unlock_irqrestore(&device_domain_lock, flags);
1021
1022 if (!found || !info->dev)
1023 return NULL;
1024
1025 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1026 return NULL;
1027
1028 if (!dmar_find_matched_atsr_unit(info->dev))
1029 return NULL;
1030
1031 info->iommu = iommu;
1032
1033 return info;
1034}
1035
1036static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1037{
1038 if (!info)
1039 return;
1040
1041 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1042}
1043
1044static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1045{
1046 if (!info->dev || !pci_ats_enabled(info->dev))
1047 return;
1048
1049 pci_disable_ats(info->dev);
1050}
1051
1052static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1053 u64 addr, unsigned mask)
1054{
1055 u16 sid, qdep;
1056 unsigned long flags;
1057 struct device_domain_info *info;
1058
1059 spin_lock_irqsave(&device_domain_lock, flags);
1060 list_for_each_entry(info, &domain->devices, link) {
1061 if (!info->dev || !pci_ats_enabled(info->dev))
1062 continue;
1063
1064 sid = info->bus << 8 | info->devfn;
1065 qdep = pci_ats_queue_depth(info->dev);
1066 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1067 }
1068 spin_unlock_irqrestore(&device_domain_lock, flags);
1069}
1070
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001071static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001072 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001073{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001074 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001075 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077 BUG_ON(pages == 0);
1078
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001079 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001080 * Fallback to domain selective flush if no PSI support or the size is
1081 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001082 * PSI requires page size to be 2 ^ x, and the base address is naturally
1083 * aligned to the size
1084 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001085 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1086 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001087 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001088 else
1089 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1090 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001091
1092 /*
1093 * In caching mode, domain ID 0 is reserved for non-present to present
1094 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1095 */
1096 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001097 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001098}
1099
mark grossf8bab732008-02-08 04:18:38 -08001100static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1101{
1102 u32 pmen;
1103 unsigned long flags;
1104
1105 spin_lock_irqsave(&iommu->register_lock, flags);
1106 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1107 pmen &= ~DMA_PMEN_EPM;
1108 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1109
1110 /* wait for the protected region status bit to clear */
1111 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1112 readl, !(pmen & DMA_PMEN_PRS), pmen);
1113
1114 spin_unlock_irqrestore(&iommu->register_lock, flags);
1115}
1116
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001117static int iommu_enable_translation(struct intel_iommu *iommu)
1118{
1119 u32 sts;
1120 unsigned long flags;
1121
1122 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001123 iommu->gcmd |= DMA_GCMD_TE;
1124 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001125
1126 /* Make sure hardware complete it */
1127 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001128 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001130 spin_unlock_irqrestore(&iommu->register_lock, flags);
1131 return 0;
1132}
1133
1134static int iommu_disable_translation(struct intel_iommu *iommu)
1135{
1136 u32 sts;
1137 unsigned long flag;
1138
1139 spin_lock_irqsave(&iommu->register_lock, flag);
1140 iommu->gcmd &= ~DMA_GCMD_TE;
1141 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1142
1143 /* Make sure hardware complete it */
1144 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001145 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146
1147 spin_unlock_irqrestore(&iommu->register_lock, flag);
1148 return 0;
1149}
1150
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001151
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001152static int iommu_init_domains(struct intel_iommu *iommu)
1153{
1154 unsigned long ndomains;
1155 unsigned long nlongs;
1156
1157 ndomains = cap_ndoms(iommu->cap);
1158 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1159 nlongs = BITS_TO_LONGS(ndomains);
1160
1161 /* TBD: there might be 64K domains,
1162 * consider other allocation for future chip
1163 */
1164 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1165 if (!iommu->domain_ids) {
1166 printk(KERN_ERR "Allocating domain id array failed\n");
1167 return -ENOMEM;
1168 }
1169 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1170 GFP_KERNEL);
1171 if (!iommu->domains) {
1172 printk(KERN_ERR "Allocating domain array failed\n");
1173 kfree(iommu->domain_ids);
1174 return -ENOMEM;
1175 }
1176
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001177 spin_lock_init(&iommu->lock);
1178
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179 /*
1180 * if Caching mode is set, then invalid translations are tagged
1181 * with domainid 0. Hence we need to pre-allocate it.
1182 */
1183 if (cap_caching_mode(iommu->cap))
1184 set_bit(0, iommu->domain_ids);
1185 return 0;
1186}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188
1189static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001190static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001191
1192void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193{
1194 struct dmar_domain *domain;
1195 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001196 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001197
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001198 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1199 for (; i < cap_ndoms(iommu->cap); ) {
1200 domain = iommu->domains[i];
1201 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001202
1203 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001204 if (--domain->iommu_count == 0) {
1205 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1206 vm_domain_exit(domain);
1207 else
1208 domain_exit(domain);
1209 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001210 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212 i = find_next_bit(iommu->domain_ids,
1213 cap_ndoms(iommu->cap), i+1);
1214 }
1215
1216 if (iommu->gcmd & DMA_GCMD_TE)
1217 iommu_disable_translation(iommu);
1218
1219 if (iommu->irq) {
1220 set_irq_data(iommu->irq, NULL);
1221 /* This will mask the irq */
1222 free_irq(iommu->irq, iommu);
1223 destroy_irq(iommu->irq);
1224 }
1225
1226 kfree(iommu->domains);
1227 kfree(iommu->domain_ids);
1228
Weidong Hand9630fe2008-12-08 11:06:32 +08001229 g_iommus[iommu->seq_id] = NULL;
1230
1231 /* if all iommus are freed, free g_iommus */
1232 for (i = 0; i < g_num_of_iommus; i++) {
1233 if (g_iommus[i])
1234 break;
1235 }
1236
1237 if (i == g_num_of_iommus)
1238 kfree(g_iommus);
1239
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240 /* free context mapping */
1241 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242}
1243
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001244static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001245{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247
1248 domain = alloc_domain_mem();
1249 if (!domain)
1250 return NULL;
1251
Weidong Han8c11e792008-12-08 15:29:22 +08001252 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001253 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001254
1255 return domain;
1256}
1257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001258static int iommu_attach_domain(struct dmar_domain *domain,
1259 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001261 int num;
1262 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001263 unsigned long flags;
1264
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001265 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001266
1267 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001268
1269 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1270 if (num >= ndomains) {
1271 spin_unlock_irqrestore(&iommu->lock, flags);
1272 printk(KERN_ERR "IOMMU: no free domain ids\n");
1273 return -ENOMEM;
1274 }
1275
1276 domain->id = num;
1277 set_bit(num, iommu->domain_ids);
1278 set_bit(iommu->seq_id, &domain->iommu_bmp);
1279 iommu->domains[num] = domain;
1280 spin_unlock_irqrestore(&iommu->lock, flags);
1281
1282 return 0;
1283}
1284
1285static void iommu_detach_domain(struct dmar_domain *domain,
1286 struct intel_iommu *iommu)
1287{
1288 unsigned long flags;
1289 int num, ndomains;
1290 int found = 0;
1291
1292 spin_lock_irqsave(&iommu->lock, flags);
1293 ndomains = cap_ndoms(iommu->cap);
1294 num = find_first_bit(iommu->domain_ids, ndomains);
1295 for (; num < ndomains; ) {
1296 if (iommu->domains[num] == domain) {
1297 found = 1;
1298 break;
1299 }
1300 num = find_next_bit(iommu->domain_ids,
1301 cap_ndoms(iommu->cap), num+1);
1302 }
1303
1304 if (found) {
1305 clear_bit(num, iommu->domain_ids);
1306 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1307 iommu->domains[num] = NULL;
1308 }
Weidong Han8c11e792008-12-08 15:29:22 +08001309 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310}
1311
1312static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001313static struct lock_class_key reserved_alloc_key;
1314static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315
1316static void dmar_init_reserved_ranges(void)
1317{
1318 struct pci_dev *pdev = NULL;
1319 struct iova *iova;
1320 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321
David Millerf6611972008-02-06 01:36:23 -08001322 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001323
Mark Gross8a443df2008-03-04 14:59:31 -08001324 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1325 &reserved_alloc_key);
1326 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1327 &reserved_rbtree_key);
1328
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329 /* IOAPIC ranges shouldn't be accessed by DMA */
1330 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1331 IOVA_PFN(IOAPIC_RANGE_END));
1332 if (!iova)
1333 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1334
1335 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1336 for_each_pci_dev(pdev) {
1337 struct resource *r;
1338
1339 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1340 r = &pdev->resource[i];
1341 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1342 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001343 iova = reserve_iova(&reserved_iova_list,
1344 IOVA_PFN(r->start),
1345 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346 if (!iova)
1347 printk(KERN_ERR "Reserve iova failed\n");
1348 }
1349 }
1350
1351}
1352
1353static void domain_reserve_special_ranges(struct dmar_domain *domain)
1354{
1355 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1356}
1357
1358static inline int guestwidth_to_adjustwidth(int gaw)
1359{
1360 int agaw;
1361 int r = (gaw - 12) % 9;
1362
1363 if (r == 0)
1364 agaw = gaw;
1365 else
1366 agaw = gaw + 9 - r;
1367 if (agaw > 64)
1368 agaw = 64;
1369 return agaw;
1370}
1371
1372static int domain_init(struct dmar_domain *domain, int guest_width)
1373{
1374 struct intel_iommu *iommu;
1375 int adjust_width, agaw;
1376 unsigned long sagaw;
1377
David Millerf6611972008-02-06 01:36:23 -08001378 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001379 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001380
1381 domain_reserve_special_ranges(domain);
1382
1383 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001384 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385 if (guest_width > cap_mgaw(iommu->cap))
1386 guest_width = cap_mgaw(iommu->cap);
1387 domain->gaw = guest_width;
1388 adjust_width = guestwidth_to_adjustwidth(guest_width);
1389 agaw = width_to_agaw(adjust_width);
1390 sagaw = cap_sagaw(iommu->cap);
1391 if (!test_bit(agaw, &sagaw)) {
1392 /* hardware doesn't support it, choose a bigger one */
1393 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1394 agaw = find_next_bit(&sagaw, 5, agaw);
1395 if (agaw >= 5)
1396 return -ENODEV;
1397 }
1398 domain->agaw = agaw;
1399 INIT_LIST_HEAD(&domain->devices);
1400
Weidong Han8e604092008-12-08 15:49:06 +08001401 if (ecap_coherent(iommu->ecap))
1402 domain->iommu_coherency = 1;
1403 else
1404 domain->iommu_coherency = 0;
1405
Sheng Yang58c610b2009-03-18 15:33:05 +08001406 if (ecap_sc_support(iommu->ecap))
1407 domain->iommu_snooping = 1;
1408 else
1409 domain->iommu_snooping = 0;
1410
Weidong Hanc7151a82008-12-08 22:51:37 +08001411 domain->iommu_count = 1;
1412
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413 /* always allocate the top pgd */
1414 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1415 if (!domain->pgd)
1416 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001417 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 return 0;
1419}
1420
1421static void domain_exit(struct dmar_domain *domain)
1422{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001423 struct dmar_drhd_unit *drhd;
1424 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001425
1426 /* Domain 0 is reserved, so dont process it */
1427 if (!domain)
1428 return;
1429
1430 domain_remove_dev_info(domain);
1431 /* destroy iovas */
1432 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433
1434 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001435 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436
1437 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001438 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001439
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001440 for_each_active_iommu(iommu, drhd)
1441 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1442 iommu_detach_domain(domain, iommu);
1443
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 free_domain_mem(domain);
1445}
1446
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001447static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1448 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449{
1450 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001451 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001452 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001453 struct dma_pte *pgd;
1454 unsigned long num;
1455 unsigned long ndomains;
1456 int id;
1457 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001458 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001459
1460 pr_debug("Set context mapping for %02x:%02x.%d\n",
1461 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001462
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001464 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1465 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001466
David Woodhouse276dbf92009-04-04 01:45:37 +01001467 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001468 if (!iommu)
1469 return -ENODEV;
1470
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001471 context = device_to_context_entry(iommu, bus, devfn);
1472 if (!context)
1473 return -ENOMEM;
1474 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001475 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476 spin_unlock_irqrestore(&iommu->lock, flags);
1477 return 0;
1478 }
1479
Weidong Hanea6606b2008-12-08 23:08:15 +08001480 id = domain->id;
1481 pgd = domain->pgd;
1482
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001483 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1484 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001485 int found = 0;
1486
1487 /* find an available domain id for this device in iommu */
1488 ndomains = cap_ndoms(iommu->cap);
1489 num = find_first_bit(iommu->domain_ids, ndomains);
1490 for (; num < ndomains; ) {
1491 if (iommu->domains[num] == domain) {
1492 id = num;
1493 found = 1;
1494 break;
1495 }
1496 num = find_next_bit(iommu->domain_ids,
1497 cap_ndoms(iommu->cap), num+1);
1498 }
1499
1500 if (found == 0) {
1501 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1502 if (num >= ndomains) {
1503 spin_unlock_irqrestore(&iommu->lock, flags);
1504 printk(KERN_ERR "IOMMU: no free domain ids\n");
1505 return -EFAULT;
1506 }
1507
1508 set_bit(num, iommu->domain_ids);
1509 iommu->domains[num] = domain;
1510 id = num;
1511 }
1512
1513 /* Skip top levels of page tables for
1514 * iommu which has less agaw than default.
1515 */
1516 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1517 pgd = phys_to_virt(dma_pte_addr(pgd));
1518 if (!dma_pte_present(pgd)) {
1519 spin_unlock_irqrestore(&iommu->lock, flags);
1520 return -ENOMEM;
1521 }
1522 }
1523 }
1524
1525 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001526
Yu Zhao93a23a72009-05-18 13:51:37 +08001527 if (translation != CONTEXT_TT_PASS_THROUGH) {
1528 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1529 translation = info ? CONTEXT_TT_DEV_IOTLB :
1530 CONTEXT_TT_MULTI_LEVEL;
1531 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001532 /*
1533 * In pass through mode, AW must be programmed to indicate the largest
1534 * AGAW value supported by hardware. And ASR is ignored by hardware.
1535 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001536 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001537 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001538 else {
1539 context_set_address_root(context, virt_to_phys(pgd));
1540 context_set_address_width(context, iommu->agaw);
1541 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001542
1543 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001544 context_set_fault_enable(context);
1545 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001546 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001547
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001548 /*
1549 * It's a non-present to present mapping. If hardware doesn't cache
1550 * non-present entry we only need to flush the write-buffer. If the
1551 * _does_ cache non-present entries, then it does so in the special
1552 * domain #0, which we have to flush:
1553 */
1554 if (cap_caching_mode(iommu->cap)) {
1555 iommu->flush.flush_context(iommu, 0,
1556 (((u16)bus) << 8) | devfn,
1557 DMA_CCMD_MASK_NOBIT,
1558 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001559 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001560 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001561 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001562 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001563 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001564 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001565
1566 spin_lock_irqsave(&domain->iommu_lock, flags);
1567 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1568 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001569 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001570 }
1571 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572 return 0;
1573}
1574
1575static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001576domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1577 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001578{
1579 int ret;
1580 struct pci_dev *tmp, *parent;
1581
David Woodhouse276dbf92009-04-04 01:45:37 +01001582 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001583 pdev->bus->number, pdev->devfn,
1584 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001585 if (ret)
1586 return ret;
1587
1588 /* dependent device mapping */
1589 tmp = pci_find_upstream_pcie_bridge(pdev);
1590 if (!tmp)
1591 return 0;
1592 /* Secondary interface's bus number and devfn 0 */
1593 parent = pdev->bus->self;
1594 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001595 ret = domain_context_mapping_one(domain,
1596 pci_domain_nr(parent->bus),
1597 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001598 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599 if (ret)
1600 return ret;
1601 parent = parent->bus->self;
1602 }
1603 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1604 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001605 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001606 tmp->subordinate->number, 0,
1607 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608 else /* this is a legacy PCI bridge */
1609 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001610 pci_domain_nr(tmp->bus),
1611 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001612 tmp->devfn,
1613 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001614}
1615
Weidong Han5331fe62008-12-08 23:00:00 +08001616static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001617{
1618 int ret;
1619 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001620 struct intel_iommu *iommu;
1621
David Woodhouse276dbf92009-04-04 01:45:37 +01001622 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1623 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001624 if (!iommu)
1625 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001626
David Woodhouse276dbf92009-04-04 01:45:37 +01001627 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001628 if (!ret)
1629 return ret;
1630 /* dependent device mapping */
1631 tmp = pci_find_upstream_pcie_bridge(pdev);
1632 if (!tmp)
1633 return ret;
1634 /* Secondary interface's bus number and devfn 0 */
1635 parent = pdev->bus->self;
1636 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001637 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001638 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639 if (!ret)
1640 return ret;
1641 parent = parent->bus->self;
1642 }
1643 if (tmp->is_pcie)
David Woodhouse276dbf92009-04-04 01:45:37 +01001644 return device_context_mapped(iommu, tmp->subordinate->number,
1645 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001647 return device_context_mapped(iommu, tmp->bus->number,
1648 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649}
1650
Fenghua Yuf5329592009-08-04 15:09:37 -07001651/* Returns a number of VTD pages, but aligned to MM page size */
1652static inline unsigned long aligned_nrpages(unsigned long host_addr,
1653 size_t size)
1654{
1655 host_addr &= ~PAGE_MASK;
1656 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1657}
1658
David Woodhouse9051aa02009-06-29 12:30:54 +01001659static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1660 struct scatterlist *sg, unsigned long phys_pfn,
1661 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001662{
1663 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001664 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001665 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001666 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001667
1668 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1669
1670 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1671 return -EINVAL;
1672
1673 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1674
David Woodhouse9051aa02009-06-29 12:30:54 +01001675 if (sg)
1676 sg_res = 0;
1677 else {
1678 sg_res = nr_pages + 1;
1679 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1680 }
1681
David Woodhousee1605492009-06-29 11:17:38 +01001682 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001683 uint64_t tmp;
1684
David Woodhousee1605492009-06-29 11:17:38 +01001685 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001686 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001687 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1688 sg->dma_length = sg->length;
1689 pteval = page_to_phys(sg_page(sg)) | prot;
1690 }
1691 if (!pte) {
1692 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1693 if (!pte)
1694 return -ENOMEM;
1695 }
1696 /* We don't need lock here, nobody else
1697 * touches the iova range
1698 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001699 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001700 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001701 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001702 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1703 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001704 if (dumps) {
1705 dumps--;
1706 debug_dma_dump_mappings(NULL);
1707 }
1708 WARN_ON(1);
1709 }
David Woodhousee1605492009-06-29 11:17:38 +01001710 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001711 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001712 domain_flush_cache(domain, first_pte,
1713 (void *)pte - (void *)first_pte);
1714 pte = NULL;
1715 }
1716 iov_pfn++;
1717 pteval += VTD_PAGE_SIZE;
1718 sg_res--;
1719 if (!sg_res)
1720 sg = sg_next(sg);
1721 }
1722 return 0;
1723}
1724
David Woodhouse9051aa02009-06-29 12:30:54 +01001725static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1726 struct scatterlist *sg, unsigned long nr_pages,
1727 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728{
David Woodhouse9051aa02009-06-29 12:30:54 +01001729 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1730}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001731
David Woodhouse9051aa02009-06-29 12:30:54 +01001732static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1733 unsigned long phys_pfn, unsigned long nr_pages,
1734 int prot)
1735{
1736 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737}
1738
Weidong Hanc7151a82008-12-08 22:51:37 +08001739static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740{
Weidong Hanc7151a82008-12-08 22:51:37 +08001741 if (!iommu)
1742 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001743
1744 clear_context_table(iommu, bus, devfn);
1745 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001746 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001747 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001748}
1749
1750static void domain_remove_dev_info(struct dmar_domain *domain)
1751{
1752 struct device_domain_info *info;
1753 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001754 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755
1756 spin_lock_irqsave(&device_domain_lock, flags);
1757 while (!list_empty(&domain->devices)) {
1758 info = list_entry(domain->devices.next,
1759 struct device_domain_info, link);
1760 list_del(&info->link);
1761 list_del(&info->global);
1762 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001763 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001764 spin_unlock_irqrestore(&device_domain_lock, flags);
1765
Yu Zhao93a23a72009-05-18 13:51:37 +08001766 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001767 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001768 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001769 free_devinfo_mem(info);
1770
1771 spin_lock_irqsave(&device_domain_lock, flags);
1772 }
1773 spin_unlock_irqrestore(&device_domain_lock, flags);
1774}
1775
1776/*
1777 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001778 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001779 */
Kay, Allen M38717942008-09-09 18:37:29 +03001780static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001781find_domain(struct pci_dev *pdev)
1782{
1783 struct device_domain_info *info;
1784
1785 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001786 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 if (info)
1788 return info->domain;
1789 return NULL;
1790}
1791
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792/* domain is initialized */
1793static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1794{
1795 struct dmar_domain *domain, *found = NULL;
1796 struct intel_iommu *iommu;
1797 struct dmar_drhd_unit *drhd;
1798 struct device_domain_info *info, *tmp;
1799 struct pci_dev *dev_tmp;
1800 unsigned long flags;
1801 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001802 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001803 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001804
1805 domain = find_domain(pdev);
1806 if (domain)
1807 return domain;
1808
David Woodhouse276dbf92009-04-04 01:45:37 +01001809 segment = pci_domain_nr(pdev->bus);
1810
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001811 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1812 if (dev_tmp) {
1813 if (dev_tmp->is_pcie) {
1814 bus = dev_tmp->subordinate->number;
1815 devfn = 0;
1816 } else {
1817 bus = dev_tmp->bus->number;
1818 devfn = dev_tmp->devfn;
1819 }
1820 spin_lock_irqsave(&device_domain_lock, flags);
1821 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001822 if (info->segment == segment &&
1823 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001824 found = info->domain;
1825 break;
1826 }
1827 }
1828 spin_unlock_irqrestore(&device_domain_lock, flags);
1829 /* pcie-pci bridge already has a domain, uses it */
1830 if (found) {
1831 domain = found;
1832 goto found_domain;
1833 }
1834 }
1835
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001836 domain = alloc_domain();
1837 if (!domain)
1838 goto error;
1839
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840 /* Allocate new domain for the device */
1841 drhd = dmar_find_matched_drhd_unit(pdev);
1842 if (!drhd) {
1843 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1844 pci_name(pdev));
1845 return NULL;
1846 }
1847 iommu = drhd->iommu;
1848
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001849 ret = iommu_attach_domain(domain, iommu);
1850 if (ret) {
1851 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001852 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001853 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001854
1855 if (domain_init(domain, gaw)) {
1856 domain_exit(domain);
1857 goto error;
1858 }
1859
1860 /* register pcie-to-pci device */
1861 if (dev_tmp) {
1862 info = alloc_devinfo_mem();
1863 if (!info) {
1864 domain_exit(domain);
1865 goto error;
1866 }
David Woodhouse276dbf92009-04-04 01:45:37 +01001867 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001868 info->bus = bus;
1869 info->devfn = devfn;
1870 info->dev = NULL;
1871 info->domain = domain;
1872 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001873 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874
1875 /* pcie-to-pci bridge already has a domain, uses it */
1876 found = NULL;
1877 spin_lock_irqsave(&device_domain_lock, flags);
1878 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001879 if (tmp->segment == segment &&
1880 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881 found = tmp->domain;
1882 break;
1883 }
1884 }
1885 if (found) {
1886 free_devinfo_mem(info);
1887 domain_exit(domain);
1888 domain = found;
1889 } else {
1890 list_add(&info->link, &domain->devices);
1891 list_add(&info->global, &device_domain_list);
1892 }
1893 spin_unlock_irqrestore(&device_domain_lock, flags);
1894 }
1895
1896found_domain:
1897 info = alloc_devinfo_mem();
1898 if (!info)
1899 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01001900 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001901 info->bus = pdev->bus->number;
1902 info->devfn = pdev->devfn;
1903 info->dev = pdev;
1904 info->domain = domain;
1905 spin_lock_irqsave(&device_domain_lock, flags);
1906 /* somebody is fast */
1907 found = find_domain(pdev);
1908 if (found != NULL) {
1909 spin_unlock_irqrestore(&device_domain_lock, flags);
1910 if (found != domain) {
1911 domain_exit(domain);
1912 domain = found;
1913 }
1914 free_devinfo_mem(info);
1915 return domain;
1916 }
1917 list_add(&info->link, &domain->devices);
1918 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001919 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001920 spin_unlock_irqrestore(&device_domain_lock, flags);
1921 return domain;
1922error:
1923 /* recheck it here, maybe others set it */
1924 return find_domain(pdev);
1925}
1926
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001927static int iommu_identity_mapping;
1928
David Woodhouseb2132032009-06-26 18:50:28 +01001929static int iommu_domain_identity_map(struct dmar_domain *domain,
1930 unsigned long long start,
1931 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932{
David Woodhousec5395d52009-06-28 16:35:56 +01001933 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1934 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935
David Woodhousec5395d52009-06-28 16:35:56 +01001936 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1937 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001939 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940 }
1941
David Woodhousec5395d52009-06-28 16:35:56 +01001942 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1943 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944 /*
1945 * RMRR range might have overlap with physical memory range,
1946 * clear it first
1947 */
David Woodhousec5395d52009-06-28 16:35:56 +01001948 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001949
David Woodhousec5395d52009-06-28 16:35:56 +01001950 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1951 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001952 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001953}
1954
1955static int iommu_prepare_identity_map(struct pci_dev *pdev,
1956 unsigned long long start,
1957 unsigned long long end)
1958{
1959 struct dmar_domain *domain;
1960 int ret;
1961
1962 printk(KERN_INFO
1963 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1964 pci_name(pdev), start, end);
1965
David Woodhousec7ab48d2009-06-26 19:10:36 +01001966 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001967 if (!domain)
1968 return -ENOMEM;
1969
1970 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001971 if (ret)
1972 goto error;
1973
1974 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001975 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001976 if (ret)
1977 goto error;
1978
1979 return 0;
1980
1981 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982 domain_exit(domain);
1983 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984}
1985
1986static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1987 struct pci_dev *pdev)
1988{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001989 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990 return 0;
1991 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1992 rmrr->end_address + 1);
1993}
1994
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001995#ifdef CONFIG_DMAR_FLOPPY_WA
1996static inline void iommu_prepare_isa(void)
1997{
1998 struct pci_dev *pdev;
1999 int ret;
2000
2001 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2002 if (!pdev)
2003 return;
2004
David Woodhousec7ab48d2009-06-26 19:10:36 +01002005 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002006 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2007
2008 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002009 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2010 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002011
2012}
2013#else
2014static inline void iommu_prepare_isa(void)
2015{
2016 return;
2017}
2018#endif /* !CONFIG_DMAR_FLPY_WA */
2019
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002020/* Initialize each context entry as pass through.*/
2021static int __init init_context_pass_through(void)
2022{
2023 struct pci_dev *pdev = NULL;
2024 struct dmar_domain *domain;
2025 int ret;
2026
2027 for_each_pci_dev(pdev) {
2028 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2029 ret = domain_context_mapping(domain, pdev,
2030 CONTEXT_TT_PASS_THROUGH);
2031 if (ret)
2032 return ret;
2033 }
2034 return 0;
2035}
2036
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002037static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002038
2039static int __init si_domain_work_fn(unsigned long start_pfn,
2040 unsigned long end_pfn, void *datax)
2041{
2042 int *ret = datax;
2043
2044 *ret = iommu_domain_identity_map(si_domain,
2045 (uint64_t)start_pfn << PAGE_SHIFT,
2046 (uint64_t)end_pfn << PAGE_SHIFT);
2047 return *ret;
2048
2049}
2050
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002051static int si_domain_init(void)
2052{
2053 struct dmar_drhd_unit *drhd;
2054 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002055 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002056
2057 si_domain = alloc_domain();
2058 if (!si_domain)
2059 return -EFAULT;
2060
David Woodhousec7ab48d2009-06-26 19:10:36 +01002061 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002062
2063 for_each_active_iommu(iommu, drhd) {
2064 ret = iommu_attach_domain(si_domain, iommu);
2065 if (ret) {
2066 domain_exit(si_domain);
2067 return -EFAULT;
2068 }
2069 }
2070
2071 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2072 domain_exit(si_domain);
2073 return -EFAULT;
2074 }
2075
2076 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2077
David Woodhousec7ab48d2009-06-26 19:10:36 +01002078 for_each_online_node(nid) {
2079 work_with_active_regions(nid, si_domain_work_fn, &ret);
2080 if (ret)
2081 return ret;
2082 }
2083
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002084 return 0;
2085}
2086
2087static void domain_remove_one_dev_info(struct dmar_domain *domain,
2088 struct pci_dev *pdev);
2089static int identity_mapping(struct pci_dev *pdev)
2090{
2091 struct device_domain_info *info;
2092
2093 if (likely(!iommu_identity_mapping))
2094 return 0;
2095
2096
2097 list_for_each_entry(info, &si_domain->devices, link)
2098 if (info->dev == pdev)
2099 return 1;
2100 return 0;
2101}
2102
2103static int domain_add_dev_info(struct dmar_domain *domain,
2104 struct pci_dev *pdev)
2105{
2106 struct device_domain_info *info;
2107 unsigned long flags;
2108
2109 info = alloc_devinfo_mem();
2110 if (!info)
2111 return -ENOMEM;
2112
2113 info->segment = pci_domain_nr(pdev->bus);
2114 info->bus = pdev->bus->number;
2115 info->devfn = pdev->devfn;
2116 info->dev = pdev;
2117 info->domain = domain;
2118
2119 spin_lock_irqsave(&device_domain_lock, flags);
2120 list_add(&info->link, &domain->devices);
2121 list_add(&info->global, &device_domain_list);
2122 pdev->dev.archdata.iommu = info;
2123 spin_unlock_irqrestore(&device_domain_lock, flags);
2124
2125 return 0;
2126}
2127
David Woodhouse6941af22009-07-04 18:24:27 +01002128static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2129{
2130 if (iommu_identity_mapping == 2)
2131 return IS_GFX_DEVICE(pdev);
2132
David Woodhouse3dfc8132009-07-04 19:11:08 +01002133 /*
2134 * We want to start off with all devices in the 1:1 domain, and
2135 * take them out later if we find they can't access all of memory.
2136 *
2137 * However, we can't do this for PCI devices behind bridges,
2138 * because all PCI devices behind the same bridge will end up
2139 * with the same source-id on their transactions.
2140 *
2141 * Practically speaking, we can't change things around for these
2142 * devices at run-time, because we can't be sure there'll be no
2143 * DMA transactions in flight for any of their siblings.
2144 *
2145 * So PCI devices (unless they're on the root bus) as well as
2146 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2147 * the 1:1 domain, just in _case_ one of their siblings turns out
2148 * not to be able to map all of memory.
2149 */
2150 if (!pdev->is_pcie) {
2151 if (!pci_is_root_bus(pdev->bus))
2152 return 0;
2153 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2154 return 0;
2155 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2156 return 0;
2157
2158 /*
2159 * At boot time, we don't yet know if devices will be 64-bit capable.
2160 * Assume that they will -- if they turn out not to be, then we can
2161 * take them out of the 1:1 domain later.
2162 */
David Woodhouse6941af22009-07-04 18:24:27 +01002163 if (!startup)
2164 return pdev->dma_mask > DMA_BIT_MASK(32);
2165
2166 return 1;
2167}
2168
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002169static int iommu_prepare_static_identity_mapping(void)
2170{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002171 struct pci_dev *pdev = NULL;
2172 int ret;
2173
2174 ret = si_domain_init();
2175 if (ret)
2176 return -EFAULT;
2177
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002178 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002179 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse62edf5d2009-07-04 10:59:46 +01002180 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2181 pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002182
David Woodhouse62edf5d2009-07-04 10:59:46 +01002183 ret = domain_context_mapping(si_domain, pdev,
2184 CONTEXT_TT_MULTI_LEVEL);
2185 if (ret)
2186 return ret;
2187 ret = domain_add_dev_info(si_domain, pdev);
2188 if (ret)
2189 return ret;
2190 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002191 }
2192
2193 return 0;
2194}
2195
2196int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002197{
2198 struct dmar_drhd_unit *drhd;
2199 struct dmar_rmrr_unit *rmrr;
2200 struct pci_dev *pdev;
2201 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002202 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002203 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204
2205 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002206 * In case pass through can not be enabled, iommu tries to use identity
2207 * mapping.
2208 */
2209 if (iommu_pass_through)
2210 iommu_identity_mapping = 1;
2211
2212 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002213 * for each drhd
2214 * allocate root
2215 * initialize and program root entry to not present
2216 * endfor
2217 */
2218 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002219 g_num_of_iommus++;
2220 /*
2221 * lock not needed as this is only incremented in the single
2222 * threaded kernel __init code path all other access are read
2223 * only
2224 */
2225 }
2226
Weidong Hand9630fe2008-12-08 11:06:32 +08002227 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2228 GFP_KERNEL);
2229 if (!g_iommus) {
2230 printk(KERN_ERR "Allocating global iommu array failed\n");
2231 ret = -ENOMEM;
2232 goto error;
2233 }
2234
mark gross80b20dd2008-04-18 13:53:58 -07002235 deferred_flush = kzalloc(g_num_of_iommus *
2236 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2237 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002238 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002239 ret = -ENOMEM;
2240 goto error;
2241 }
2242
mark gross5e0d2a62008-03-04 15:22:08 -08002243 for_each_drhd_unit(drhd) {
2244 if (drhd->ignored)
2245 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002246
2247 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002248 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002249
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002250 ret = iommu_init_domains(iommu);
2251 if (ret)
2252 goto error;
2253
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002254 /*
2255 * TBD:
2256 * we could share the same root & context tables
2257 * amoung all IOMMU's. Need to Split it later.
2258 */
2259 ret = iommu_alloc_root_entry(iommu);
2260 if (ret) {
2261 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2262 goto error;
2263 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002264 if (!ecap_pass_through(iommu->ecap))
2265 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002266 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002267 if (iommu_pass_through)
2268 if (!pass_through) {
2269 printk(KERN_INFO
2270 "Pass Through is not supported by hardware.\n");
2271 iommu_pass_through = 0;
2272 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002273
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002274 /*
2275 * Start from the sane iommu hardware state.
2276 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002277 for_each_drhd_unit(drhd) {
2278 if (drhd->ignored)
2279 continue;
2280
2281 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002282
2283 /*
2284 * If the queued invalidation is already initialized by us
2285 * (for example, while enabling interrupt-remapping) then
2286 * we got the things already rolling from a sane state.
2287 */
2288 if (iommu->qi)
2289 continue;
2290
2291 /*
2292 * Clear any previous faults.
2293 */
2294 dmar_fault(-1, iommu);
2295 /*
2296 * Disable queued invalidation if supported and already enabled
2297 * before OS handover.
2298 */
2299 dmar_disable_qi(iommu);
2300 }
2301
2302 for_each_drhd_unit(drhd) {
2303 if (drhd->ignored)
2304 continue;
2305
2306 iommu = drhd->iommu;
2307
Youquan Songa77b67d2008-10-16 16:31:56 -07002308 if (dmar_enable_qi(iommu)) {
2309 /*
2310 * Queued Invalidate not enabled, use Register Based
2311 * Invalidate
2312 */
2313 iommu->flush.flush_context = __iommu_flush_context;
2314 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2315 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002316 "invalidation\n",
2317 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002318 } else {
2319 iommu->flush.flush_context = qi_flush_context;
2320 iommu->flush.flush_iotlb = qi_flush_iotlb;
2321 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002322 "invalidation\n",
2323 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002324 }
2325 }
2326
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002327 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002328 * If pass through is set and enabled, context entries of all pci
2329 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002330 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002331 if (iommu_pass_through) {
2332 ret = init_context_pass_through();
2333 if (ret) {
2334 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2335 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002336 }
2337 }
2338
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002339 /*
2340 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002341 * identity mappings for rmrr, gfx, and isa and may fall back to static
2342 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002343 */
2344 if (!iommu_pass_through) {
David Woodhouse62edf5d2009-07-04 10:59:46 +01002345#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2346 if (!iommu_identity_mapping)
2347 iommu_identity_mapping = 2;
2348#endif
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002349 if (iommu_identity_mapping)
2350 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002351 /*
2352 * For each rmrr
2353 * for each dev attached to rmrr
2354 * do
2355 * locate drhd for dev, alloc domain for dev
2356 * allocate free domain
2357 * allocate page table entries for rmrr
2358 * if context not allocated for bus
2359 * allocate and init context
2360 * set present in root table for this bus
2361 * init context with domain, translation etc
2362 * endfor
2363 * endfor
2364 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002365 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002366 for_each_rmrr_units(rmrr) {
2367 for (i = 0; i < rmrr->devices_cnt; i++) {
2368 pdev = rmrr->devices[i];
2369 /*
2370 * some BIOS lists non-exist devices in DMAR
2371 * table.
2372 */
2373 if (!pdev)
2374 continue;
2375 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2376 if (ret)
2377 printk(KERN_ERR
2378 "IOMMU: mapping reserved region failed\n");
2379 }
2380 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002381
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002382 iommu_prepare_isa();
2383 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002384
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385 /*
2386 * for each drhd
2387 * enable fault log
2388 * global invalidate context cache
2389 * global invalidate iotlb
2390 * enable translation
2391 */
2392 for_each_drhd_unit(drhd) {
2393 if (drhd->ignored)
2394 continue;
2395 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396
2397 iommu_flush_write_buffer(iommu);
2398
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002399 ret = dmar_set_interrupt(iommu);
2400 if (ret)
2401 goto error;
2402
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403 iommu_set_root_entry(iommu);
2404
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002405 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002406 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002407 iommu_disable_protect_mem_regions(iommu);
2408
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002409 ret = iommu_enable_translation(iommu);
2410 if (ret)
2411 goto error;
2412 }
2413
2414 return 0;
2415error:
2416 for_each_drhd_unit(drhd) {
2417 if (drhd->ignored)
2418 continue;
2419 iommu = drhd->iommu;
2420 free_iommu(iommu);
2421 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002422 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002423 return ret;
2424}
2425
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002426/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002427static struct iova *intel_alloc_iova(struct device *dev,
2428 struct dmar_domain *domain,
2429 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002430{
2431 struct pci_dev *pdev = to_pci_dev(dev);
2432 struct iova *iova = NULL;
2433
David Woodhouse875764d2009-06-28 21:20:51 +01002434 /* Restrict dma_mask to the width that the iommu can handle */
2435 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2436
2437 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002438 /*
2439 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002440 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002441 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002442 */
David Woodhouse875764d2009-06-28 21:20:51 +01002443 iova = alloc_iova(&domain->iovad, nrpages,
2444 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2445 if (iova)
2446 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002447 }
David Woodhouse875764d2009-06-28 21:20:51 +01002448 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2449 if (unlikely(!iova)) {
2450 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2451 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002452 return NULL;
2453 }
2454
2455 return iova;
2456}
2457
2458static struct dmar_domain *
2459get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002460{
2461 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002462 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002463
2464 domain = get_domain_for_dev(pdev,
2465 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2466 if (!domain) {
2467 printk(KERN_ERR
2468 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002469 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002470 }
2471
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002472 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002473 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002474 ret = domain_context_mapping(domain, pdev,
2475 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002476 if (ret) {
2477 printk(KERN_ERR
2478 "Domain context map for %s failed",
2479 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002480 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002481 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002482 }
2483
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002484 return domain;
2485}
2486
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487static int iommu_dummy(struct pci_dev *pdev)
2488{
2489 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2490}
2491
2492/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002493static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494{
David Woodhouse73676832009-07-04 14:08:36 +01002495 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002496 int found;
2497
David Woodhouse73676832009-07-04 14:08:36 +01002498 if (unlikely(dev->bus != &pci_bus_type))
2499 return 1;
2500
2501 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002502 if (iommu_dummy(pdev))
2503 return 1;
2504
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002505 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002506 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002507
2508 found = identity_mapping(pdev);
2509 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002510 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002511 return 1;
2512 else {
2513 /*
2514 * 32 bit DMA is removed from si_domain and fall back
2515 * to non-identity mapping.
2516 */
2517 domain_remove_one_dev_info(si_domain, pdev);
2518 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2519 pci_name(pdev));
2520 return 0;
2521 }
2522 } else {
2523 /*
2524 * In case of a detached 64 bit DMA device from vm, the device
2525 * is put into si_domain for identity mapping.
2526 */
David Woodhouse6941af22009-07-04 18:24:27 +01002527 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002528 int ret;
2529 ret = domain_add_dev_info(si_domain, pdev);
David Woodhouse1b7bc0a2009-07-04 10:49:46 +01002530 if (ret)
2531 return 0;
2532 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002533 if (!ret) {
2534 printk(KERN_INFO "64bit %s uses identity mapping\n",
2535 pci_name(pdev));
2536 return 1;
2537 }
2538 }
2539 }
2540
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002541 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002542}
2543
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002544static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2545 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002546{
2547 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002548 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002549 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002550 struct iova *iova;
2551 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002552 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002553 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002554 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002555
2556 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002557
David Woodhouse73676832009-07-04 14:08:36 +01002558 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002559 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002560
2561 domain = get_valid_domain_for_dev(pdev);
2562 if (!domain)
2563 return 0;
2564
Weidong Han8c11e792008-12-08 15:29:22 +08002565 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002566 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002567
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002568 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2569 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002570 if (!iova)
2571 goto error;
2572
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002573 /*
2574 * Check if DMAR supports zero-length reads on write only
2575 * mappings..
2576 */
2577 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002578 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002579 prot |= DMA_PTE_READ;
2580 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2581 prot |= DMA_PTE_WRITE;
2582 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002583 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002584 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002585 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002586 * is not a big problem
2587 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002588 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002589 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002590 if (ret)
2591 goto error;
2592
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002593 /* it's a non-present to present mapping. Only flush if caching mode */
2594 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002595 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002596 else
Weidong Han8c11e792008-12-08 15:29:22 +08002597 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002598
David Woodhouse03d6a242009-06-28 15:33:46 +01002599 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2600 start_paddr += paddr & ~PAGE_MASK;
2601 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002602
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002603error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002604 if (iova)
2605 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002606 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002607 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002608 return 0;
2609}
2610
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002611static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2612 unsigned long offset, size_t size,
2613 enum dma_data_direction dir,
2614 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002615{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002616 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2617 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002618}
2619
mark gross5e0d2a62008-03-04 15:22:08 -08002620static void flush_unmaps(void)
2621{
mark gross80b20dd2008-04-18 13:53:58 -07002622 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002623
mark gross5e0d2a62008-03-04 15:22:08 -08002624 timer_on = 0;
2625
2626 /* just flush them all */
2627 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002628 struct intel_iommu *iommu = g_iommus[i];
2629 if (!iommu)
2630 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002631
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002632 if (!deferred_flush[i].next)
2633 continue;
2634
2635 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002636 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002637 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002638 unsigned long mask;
2639 struct iova *iova = deferred_flush[i].iova[j];
2640
2641 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2642 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2643 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2644 iova->pfn_lo << PAGE_SHIFT, mask);
2645 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002646 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002647 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002648 }
2649
mark gross5e0d2a62008-03-04 15:22:08 -08002650 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002651}
2652
2653static void flush_unmaps_timeout(unsigned long data)
2654{
mark gross80b20dd2008-04-18 13:53:58 -07002655 unsigned long flags;
2656
2657 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002658 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002659 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002660}
2661
2662static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2663{
2664 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002665 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002666 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002667
2668 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002669 if (list_size == HIGH_WATER_MARK)
2670 flush_unmaps();
2671
Weidong Han8c11e792008-12-08 15:29:22 +08002672 iommu = domain_get_iommu(dom);
2673 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002674
mark gross80b20dd2008-04-18 13:53:58 -07002675 next = deferred_flush[iommu_id].next;
2676 deferred_flush[iommu_id].domain[next] = dom;
2677 deferred_flush[iommu_id].iova[next] = iova;
2678 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002679
2680 if (!timer_on) {
2681 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2682 timer_on = 1;
2683 }
2684 list_size++;
2685 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2686}
2687
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002688static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2689 size_t size, enum dma_data_direction dir,
2690 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002691{
2692 struct pci_dev *pdev = to_pci_dev(dev);
2693 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002694 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002695 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002696 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002697
David Woodhouse73676832009-07-04 14:08:36 +01002698 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002699 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002700
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002701 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002702 BUG_ON(!domain);
2703
Weidong Han8c11e792008-12-08 15:29:22 +08002704 iommu = domain_get_iommu(domain);
2705
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002706 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002707 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2708 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002709 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002710
David Woodhoused794dc92009-06-28 00:27:49 +01002711 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2712 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002713
David Woodhoused794dc92009-06-28 00:27:49 +01002714 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2715 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002716
2717 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002718 dma_pte_clear_range(domain, start_pfn, last_pfn);
2719
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002720 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002721 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2722
mark gross5e0d2a62008-03-04 15:22:08 -08002723 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002724 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002725 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002726 /* free iova */
2727 __free_iova(&domain->iovad, iova);
2728 } else {
2729 add_unmap(domain, iova);
2730 /*
2731 * queue up the release of the unmap to save the 1/6th of the
2732 * cpu used up by the iotlb flush operation...
2733 */
mark gross5e0d2a62008-03-04 15:22:08 -08002734 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002735}
2736
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002737static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2738 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002739{
2740 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2741}
2742
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002743static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2744 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002745{
2746 void *vaddr;
2747 int order;
2748
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002749 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750 order = get_order(size);
2751 flags &= ~(GFP_DMA | GFP_DMA32);
2752
2753 vaddr = (void *)__get_free_pages(flags, order);
2754 if (!vaddr)
2755 return NULL;
2756 memset(vaddr, 0, size);
2757
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002758 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2759 DMA_BIDIRECTIONAL,
2760 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761 if (*dma_handle)
2762 return vaddr;
2763 free_pages((unsigned long)vaddr, order);
2764 return NULL;
2765}
2766
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002767static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2768 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002769{
2770 int order;
2771
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002772 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002773 order = get_order(size);
2774
2775 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2776 free_pages((unsigned long)vaddr, order);
2777}
2778
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002779static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2780 int nelems, enum dma_data_direction dir,
2781 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002782{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002783 struct pci_dev *pdev = to_pci_dev(hwdev);
2784 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002785 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002786 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002787 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002788
David Woodhouse73676832009-07-04 14:08:36 +01002789 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002790 return;
2791
2792 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002793 BUG_ON(!domain);
2794
2795 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002796
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002797 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002798 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2799 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002800 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002801
David Woodhoused794dc92009-06-28 00:27:49 +01002802 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2803 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002804
2805 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002806 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002807
David Woodhoused794dc92009-06-28 00:27:49 +01002808 /* free page tables */
2809 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2810
David Woodhouse03d6a242009-06-28 15:33:46 +01002811 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002812 (last_pfn - start_pfn + 1));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002813
2814 /* free iova */
2815 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002816}
2817
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002818static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002819 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002820{
2821 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002822 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002823
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002824 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002825 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002826 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002827 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002828 }
2829 return nelems;
2830}
2831
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002832static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2833 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002834{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002835 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 struct pci_dev *pdev = to_pci_dev(hwdev);
2837 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002838 size_t size = 0;
2839 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002840 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002841 struct iova *iova = NULL;
2842 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002843 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002844 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002845 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002846
2847 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002848 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002849 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002850
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002851 domain = get_valid_domain_for_dev(pdev);
2852 if (!domain)
2853 return 0;
2854
Weidong Han8c11e792008-12-08 15:29:22 +08002855 iommu = domain_get_iommu(domain);
2856
David Woodhouseb536d242009-06-28 14:49:31 +01002857 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002858 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002859
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002860 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2861 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002862 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002863 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864 return 0;
2865 }
2866
2867 /*
2868 * Check if DMAR supports zero-length reads on write only
2869 * mappings..
2870 */
2871 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002872 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002873 prot |= DMA_PTE_READ;
2874 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2875 prot |= DMA_PTE_WRITE;
2876
David Woodhouseb536d242009-06-28 14:49:31 +01002877 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002878
Fenghua Yuf5329592009-08-04 15:09:37 -07002879 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01002880 if (unlikely(ret)) {
2881 /* clear the page */
2882 dma_pte_clear_range(domain, start_vpfn,
2883 start_vpfn + size - 1);
2884 /* free page tables */
2885 dma_pte_free_pagetable(domain, start_vpfn,
2886 start_vpfn + size - 1);
2887 /* free iova */
2888 __free_iova(&domain->iovad, iova);
2889 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002890 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002891
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002892 /* it's a non-present to present mapping. Only flush if caching mode */
2893 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002894 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002895 else
Weidong Han8c11e792008-12-08 15:29:22 +08002896 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002897
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002898 return nelems;
2899}
2900
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002901static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2902{
2903 return !dma_addr;
2904}
2905
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002906struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002907 .alloc_coherent = intel_alloc_coherent,
2908 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002909 .map_sg = intel_map_sg,
2910 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002911 .map_page = intel_map_page,
2912 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002913 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002914};
2915
2916static inline int iommu_domain_cache_init(void)
2917{
2918 int ret = 0;
2919
2920 iommu_domain_cache = kmem_cache_create("iommu_domain",
2921 sizeof(struct dmar_domain),
2922 0,
2923 SLAB_HWCACHE_ALIGN,
2924
2925 NULL);
2926 if (!iommu_domain_cache) {
2927 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2928 ret = -ENOMEM;
2929 }
2930
2931 return ret;
2932}
2933
2934static inline int iommu_devinfo_cache_init(void)
2935{
2936 int ret = 0;
2937
2938 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2939 sizeof(struct device_domain_info),
2940 0,
2941 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002942 NULL);
2943 if (!iommu_devinfo_cache) {
2944 printk(KERN_ERR "Couldn't create devinfo cache\n");
2945 ret = -ENOMEM;
2946 }
2947
2948 return ret;
2949}
2950
2951static inline int iommu_iova_cache_init(void)
2952{
2953 int ret = 0;
2954
2955 iommu_iova_cache = kmem_cache_create("iommu_iova",
2956 sizeof(struct iova),
2957 0,
2958 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959 NULL);
2960 if (!iommu_iova_cache) {
2961 printk(KERN_ERR "Couldn't create iova cache\n");
2962 ret = -ENOMEM;
2963 }
2964
2965 return ret;
2966}
2967
2968static int __init iommu_init_mempool(void)
2969{
2970 int ret;
2971 ret = iommu_iova_cache_init();
2972 if (ret)
2973 return ret;
2974
2975 ret = iommu_domain_cache_init();
2976 if (ret)
2977 goto domain_error;
2978
2979 ret = iommu_devinfo_cache_init();
2980 if (!ret)
2981 return ret;
2982
2983 kmem_cache_destroy(iommu_domain_cache);
2984domain_error:
2985 kmem_cache_destroy(iommu_iova_cache);
2986
2987 return -ENOMEM;
2988}
2989
2990static void __init iommu_exit_mempool(void)
2991{
2992 kmem_cache_destroy(iommu_devinfo_cache);
2993 kmem_cache_destroy(iommu_domain_cache);
2994 kmem_cache_destroy(iommu_iova_cache);
2995
2996}
2997
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002998static void __init init_no_remapping_devices(void)
2999{
3000 struct dmar_drhd_unit *drhd;
3001
3002 for_each_drhd_unit(drhd) {
3003 if (!drhd->include_all) {
3004 int i;
3005 for (i = 0; i < drhd->devices_cnt; i++)
3006 if (drhd->devices[i] != NULL)
3007 break;
3008 /* ignore DMAR unit if no pci devices exist */
3009 if (i == drhd->devices_cnt)
3010 drhd->ignored = 1;
3011 }
3012 }
3013
3014 if (dmar_map_gfx)
3015 return;
3016
3017 for_each_drhd_unit(drhd) {
3018 int i;
3019 if (drhd->ignored || drhd->include_all)
3020 continue;
3021
3022 for (i = 0; i < drhd->devices_cnt; i++)
3023 if (drhd->devices[i] &&
3024 !IS_GFX_DEVICE(drhd->devices[i]))
3025 break;
3026
3027 if (i < drhd->devices_cnt)
3028 continue;
3029
3030 /* bypass IOMMU if it is just for gfx devices */
3031 drhd->ignored = 1;
3032 for (i = 0; i < drhd->devices_cnt; i++) {
3033 if (!drhd->devices[i])
3034 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003035 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003036 }
3037 }
3038}
3039
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003040#ifdef CONFIG_SUSPEND
3041static int init_iommu_hw(void)
3042{
3043 struct dmar_drhd_unit *drhd;
3044 struct intel_iommu *iommu = NULL;
3045
3046 for_each_active_iommu(iommu, drhd)
3047 if (iommu->qi)
3048 dmar_reenable_qi(iommu);
3049
3050 for_each_active_iommu(iommu, drhd) {
3051 iommu_flush_write_buffer(iommu);
3052
3053 iommu_set_root_entry(iommu);
3054
3055 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003056 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003057 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003058 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003059 iommu_disable_protect_mem_regions(iommu);
3060 iommu_enable_translation(iommu);
3061 }
3062
3063 return 0;
3064}
3065
3066static void iommu_flush_all(void)
3067{
3068 struct dmar_drhd_unit *drhd;
3069 struct intel_iommu *iommu;
3070
3071 for_each_active_iommu(iommu, drhd) {
3072 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003073 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003074 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003075 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003076 }
3077}
3078
3079static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3080{
3081 struct dmar_drhd_unit *drhd;
3082 struct intel_iommu *iommu = NULL;
3083 unsigned long flag;
3084
3085 for_each_active_iommu(iommu, drhd) {
3086 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3087 GFP_ATOMIC);
3088 if (!iommu->iommu_state)
3089 goto nomem;
3090 }
3091
3092 iommu_flush_all();
3093
3094 for_each_active_iommu(iommu, drhd) {
3095 iommu_disable_translation(iommu);
3096
3097 spin_lock_irqsave(&iommu->register_lock, flag);
3098
3099 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3100 readl(iommu->reg + DMAR_FECTL_REG);
3101 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3102 readl(iommu->reg + DMAR_FEDATA_REG);
3103 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3104 readl(iommu->reg + DMAR_FEADDR_REG);
3105 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3106 readl(iommu->reg + DMAR_FEUADDR_REG);
3107
3108 spin_unlock_irqrestore(&iommu->register_lock, flag);
3109 }
3110 return 0;
3111
3112nomem:
3113 for_each_active_iommu(iommu, drhd)
3114 kfree(iommu->iommu_state);
3115
3116 return -ENOMEM;
3117}
3118
3119static int iommu_resume(struct sys_device *dev)
3120{
3121 struct dmar_drhd_unit *drhd;
3122 struct intel_iommu *iommu = NULL;
3123 unsigned long flag;
3124
3125 if (init_iommu_hw()) {
3126 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3127 return -EIO;
3128 }
3129
3130 for_each_active_iommu(iommu, drhd) {
3131
3132 spin_lock_irqsave(&iommu->register_lock, flag);
3133
3134 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3135 iommu->reg + DMAR_FECTL_REG);
3136 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3137 iommu->reg + DMAR_FEDATA_REG);
3138 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3139 iommu->reg + DMAR_FEADDR_REG);
3140 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3141 iommu->reg + DMAR_FEUADDR_REG);
3142
3143 spin_unlock_irqrestore(&iommu->register_lock, flag);
3144 }
3145
3146 for_each_active_iommu(iommu, drhd)
3147 kfree(iommu->iommu_state);
3148
3149 return 0;
3150}
3151
3152static struct sysdev_class iommu_sysclass = {
3153 .name = "iommu",
3154 .resume = iommu_resume,
3155 .suspend = iommu_suspend,
3156};
3157
3158static struct sys_device device_iommu = {
3159 .cls = &iommu_sysclass,
3160};
3161
3162static int __init init_iommu_sysfs(void)
3163{
3164 int error;
3165
3166 error = sysdev_class_register(&iommu_sysclass);
3167 if (error)
3168 return error;
3169
3170 error = sysdev_register(&device_iommu);
3171 if (error)
3172 sysdev_class_unregister(&iommu_sysclass);
3173
3174 return error;
3175}
3176
3177#else
3178static int __init init_iommu_sysfs(void)
3179{
3180 return 0;
3181}
3182#endif /* CONFIG_PM */
3183
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003184int __init intel_iommu_init(void)
3185{
3186 int ret = 0;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003187 int force_on = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003189 /* VT-d is required for a TXT/tboot launch, so enforce that */
3190 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003191
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003192 if (dmar_table_init()) {
3193 if (force_on)
3194 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003195 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003196 }
3197
3198 if (dmar_dev_scope_init()) {
3199 if (force_on)
3200 panic("tboot: Failed to initialize DMAR device scope\n");
3201 return -ENODEV;
3202 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003203
Suresh Siddha2ae21012008-07-10 11:16:43 -07003204 /*
3205 * Check the need for DMA-remapping initialization now.
3206 * Above initialization will also be used by Interrupt-remapping.
3207 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003208 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003209 return -ENODEV;
3210
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003211 iommu_init_mempool();
3212 dmar_init_reserved_ranges();
3213
3214 init_no_remapping_devices();
3215
3216 ret = init_dmars();
3217 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003218 if (force_on)
3219 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003220 printk(KERN_ERR "IOMMU: dmar init failed\n");
3221 put_iova_domain(&reserved_iova_list);
3222 iommu_exit_mempool();
3223 return ret;
3224 }
3225 printk(KERN_INFO
3226 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3227
mark gross5e0d2a62008-03-04 15:22:08 -08003228 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003229 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003230
3231 if (!iommu_pass_through) {
3232 printk(KERN_INFO
3233 "Multi-level page-table translation for DMAR.\n");
3234 dma_ops = &intel_dma_ops;
3235 } else
3236 printk(KERN_INFO
3237 "DMAR: Pass through translation for DMAR.\n");
3238
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003239 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003240
3241 register_iommu(&intel_iommu_ops);
3242
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003243 return 0;
3244}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003245
Han, Weidong3199aa62009-02-26 17:31:12 +08003246static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3247 struct pci_dev *pdev)
3248{
3249 struct pci_dev *tmp, *parent;
3250
3251 if (!iommu || !pdev)
3252 return;
3253
3254 /* dependent device detach */
3255 tmp = pci_find_upstream_pcie_bridge(pdev);
3256 /* Secondary interface's bus number and devfn 0 */
3257 if (tmp) {
3258 parent = pdev->bus->self;
3259 while (parent != tmp) {
3260 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003261 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003262 parent = parent->bus->self;
3263 }
3264 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3265 iommu_detach_dev(iommu,
3266 tmp->subordinate->number, 0);
3267 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003268 iommu_detach_dev(iommu, tmp->bus->number,
3269 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003270 }
3271}
3272
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003273static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003274 struct pci_dev *pdev)
3275{
3276 struct device_domain_info *info;
3277 struct intel_iommu *iommu;
3278 unsigned long flags;
3279 int found = 0;
3280 struct list_head *entry, *tmp;
3281
David Woodhouse276dbf92009-04-04 01:45:37 +01003282 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3283 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003284 if (!iommu)
3285 return;
3286
3287 spin_lock_irqsave(&device_domain_lock, flags);
3288 list_for_each_safe(entry, tmp, &domain->devices) {
3289 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf92009-04-04 01:45:37 +01003290 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003291 if (info->bus == pdev->bus->number &&
3292 info->devfn == pdev->devfn) {
3293 list_del(&info->link);
3294 list_del(&info->global);
3295 if (info->dev)
3296 info->dev->dev.archdata.iommu = NULL;
3297 spin_unlock_irqrestore(&device_domain_lock, flags);
3298
Yu Zhao93a23a72009-05-18 13:51:37 +08003299 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003300 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003301 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003302 free_devinfo_mem(info);
3303
3304 spin_lock_irqsave(&device_domain_lock, flags);
3305
3306 if (found)
3307 break;
3308 else
3309 continue;
3310 }
3311
3312 /* if there is no other devices under the same iommu
3313 * owned by this domain, clear this iommu in iommu_bmp
3314 * update iommu count and coherency
3315 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003316 if (iommu == device_to_iommu(info->segment, info->bus,
3317 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003318 found = 1;
3319 }
3320
3321 if (found == 0) {
3322 unsigned long tmp_flags;
3323 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3324 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3325 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003326 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003327 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3328 }
3329
3330 spin_unlock_irqrestore(&device_domain_lock, flags);
3331}
3332
3333static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3334{
3335 struct device_domain_info *info;
3336 struct intel_iommu *iommu;
3337 unsigned long flags1, flags2;
3338
3339 spin_lock_irqsave(&device_domain_lock, flags1);
3340 while (!list_empty(&domain->devices)) {
3341 info = list_entry(domain->devices.next,
3342 struct device_domain_info, link);
3343 list_del(&info->link);
3344 list_del(&info->global);
3345 if (info->dev)
3346 info->dev->dev.archdata.iommu = NULL;
3347
3348 spin_unlock_irqrestore(&device_domain_lock, flags1);
3349
Yu Zhao93a23a72009-05-18 13:51:37 +08003350 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003351 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003352 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003353 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003354
3355 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003356 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003357 */
3358 spin_lock_irqsave(&domain->iommu_lock, flags2);
3359 if (test_and_clear_bit(iommu->seq_id,
3360 &domain->iommu_bmp)) {
3361 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003362 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003363 }
3364 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3365
3366 free_devinfo_mem(info);
3367 spin_lock_irqsave(&device_domain_lock, flags1);
3368 }
3369 spin_unlock_irqrestore(&device_domain_lock, flags1);
3370}
3371
Weidong Han5e98c4b2008-12-08 23:03:27 +08003372/* domain id for virtual machine, it won't be set in context */
3373static unsigned long vm_domid;
3374
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003375static int vm_domain_min_agaw(struct dmar_domain *domain)
3376{
3377 int i;
3378 int min_agaw = domain->agaw;
3379
3380 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3381 for (; i < g_num_of_iommus; ) {
3382 if (min_agaw > g_iommus[i]->agaw)
3383 min_agaw = g_iommus[i]->agaw;
3384
3385 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3386 }
3387
3388 return min_agaw;
3389}
3390
Weidong Han5e98c4b2008-12-08 23:03:27 +08003391static struct dmar_domain *iommu_alloc_vm_domain(void)
3392{
3393 struct dmar_domain *domain;
3394
3395 domain = alloc_domain_mem();
3396 if (!domain)
3397 return NULL;
3398
3399 domain->id = vm_domid++;
3400 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3401 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3402
3403 return domain;
3404}
3405
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003406static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003407{
3408 int adjust_width;
3409
3410 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003411 spin_lock_init(&domain->iommu_lock);
3412
3413 domain_reserve_special_ranges(domain);
3414
3415 /* calculate AGAW */
3416 domain->gaw = guest_width;
3417 adjust_width = guestwidth_to_adjustwidth(guest_width);
3418 domain->agaw = width_to_agaw(adjust_width);
3419
3420 INIT_LIST_HEAD(&domain->devices);
3421
3422 domain->iommu_count = 0;
3423 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003424 domain->iommu_snooping = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003425 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003426
3427 /* always allocate the top pgd */
3428 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3429 if (!domain->pgd)
3430 return -ENOMEM;
3431 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3432 return 0;
3433}
3434
3435static void iommu_free_vm_domain(struct dmar_domain *domain)
3436{
3437 unsigned long flags;
3438 struct dmar_drhd_unit *drhd;
3439 struct intel_iommu *iommu;
3440 unsigned long i;
3441 unsigned long ndomains;
3442
3443 for_each_drhd_unit(drhd) {
3444 if (drhd->ignored)
3445 continue;
3446 iommu = drhd->iommu;
3447
3448 ndomains = cap_ndoms(iommu->cap);
3449 i = find_first_bit(iommu->domain_ids, ndomains);
3450 for (; i < ndomains; ) {
3451 if (iommu->domains[i] == domain) {
3452 spin_lock_irqsave(&iommu->lock, flags);
3453 clear_bit(i, iommu->domain_ids);
3454 iommu->domains[i] = NULL;
3455 spin_unlock_irqrestore(&iommu->lock, flags);
3456 break;
3457 }
3458 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3459 }
3460 }
3461}
3462
3463static void vm_domain_exit(struct dmar_domain *domain)
3464{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003465 /* Domain 0 is reserved, so dont process it */
3466 if (!domain)
3467 return;
3468
3469 vm_domain_remove_all_dev_info(domain);
3470 /* destroy iovas */
3471 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003472
3473 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003474 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003475
3476 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003477 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003478
3479 iommu_free_vm_domain(domain);
3480 free_domain_mem(domain);
3481}
3482
Joerg Roedel5d450802008-12-03 14:52:32 +01003483static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003484{
Joerg Roedel5d450802008-12-03 14:52:32 +01003485 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003486
Joerg Roedel5d450802008-12-03 14:52:32 +01003487 dmar_domain = iommu_alloc_vm_domain();
3488 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003489 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003490 "intel_iommu_domain_init: dmar_domain == NULL\n");
3491 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003492 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003493 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003494 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003495 "intel_iommu_domain_init() failed\n");
3496 vm_domain_exit(dmar_domain);
3497 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003498 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003499 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003500
Joerg Roedel5d450802008-12-03 14:52:32 +01003501 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003502}
Kay, Allen M38717942008-09-09 18:37:29 +03003503
Joerg Roedel5d450802008-12-03 14:52:32 +01003504static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003505{
Joerg Roedel5d450802008-12-03 14:52:32 +01003506 struct dmar_domain *dmar_domain = domain->priv;
3507
3508 domain->priv = NULL;
3509 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003510}
Kay, Allen M38717942008-09-09 18:37:29 +03003511
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003512static int intel_iommu_attach_device(struct iommu_domain *domain,
3513 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003514{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003515 struct dmar_domain *dmar_domain = domain->priv;
3516 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003517 struct intel_iommu *iommu;
3518 int addr_width;
3519 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003520 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003521
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003522 /* normally pdev is not mapped */
3523 if (unlikely(domain_context_mapped(pdev))) {
3524 struct dmar_domain *old_domain;
3525
3526 old_domain = find_domain(pdev);
3527 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003528 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3529 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3530 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003531 else
3532 domain_remove_dev_info(old_domain);
3533 }
3534 }
3535
David Woodhouse276dbf92009-04-04 01:45:37 +01003536 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3537 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003538 if (!iommu)
3539 return -ENODEV;
3540
3541 /* check if this iommu agaw is sufficient for max mapped address */
3542 addr_width = agaw_to_width(iommu->agaw);
3543 end = DOMAIN_MAX_ADDR(addr_width);
3544 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003545 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003546 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3547 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003548 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003549 return -EFAULT;
3550 }
3551
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003552 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003553 if (ret)
3554 return ret;
3555
Yu Zhao93a23a72009-05-18 13:51:37 +08003556 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003557 return ret;
3558}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003559
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003560static void intel_iommu_detach_device(struct iommu_domain *domain,
3561 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003562{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003563 struct dmar_domain *dmar_domain = domain->priv;
3564 struct pci_dev *pdev = to_pci_dev(dev);
3565
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003566 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003567}
Kay, Allen M38717942008-09-09 18:37:29 +03003568
Joerg Roedeldde57a22008-12-03 15:04:09 +01003569static int intel_iommu_map_range(struct iommu_domain *domain,
3570 unsigned long iova, phys_addr_t hpa,
3571 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003572{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003573 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003574 u64 max_addr;
3575 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003576 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003577 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003578
Joerg Roedeldde57a22008-12-03 15:04:09 +01003579 if (iommu_prot & IOMMU_READ)
3580 prot |= DMA_PTE_READ;
3581 if (iommu_prot & IOMMU_WRITE)
3582 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003583 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3584 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003585
David Woodhouse163cc522009-06-28 00:51:17 +01003586 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003587 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003588 int min_agaw;
3589 u64 end;
3590
3591 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003592 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003593 addr_width = agaw_to_width(min_agaw);
3594 end = DOMAIN_MAX_ADDR(addr_width);
3595 end = end & VTD_PAGE_MASK;
3596 if (end < max_addr) {
3597 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3598 "sufficient for the mapped address (%llx)\n",
3599 __func__, min_agaw, max_addr);
3600 return -EFAULT;
3601 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003602 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003603 }
David Woodhousead051222009-06-28 14:22:28 +01003604 /* Round up size to next multiple of PAGE_SIZE, if it and
3605 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003606 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003607 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3608 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003609 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003610}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003611
Joerg Roedeldde57a22008-12-03 15:04:09 +01003612static void intel_iommu_unmap_range(struct iommu_domain *domain,
3613 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003614{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003615 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003616
Sheng Yang4b99d352009-07-08 11:52:52 +01003617 if (!size)
3618 return;
3619
David Woodhouse163cc522009-06-28 00:51:17 +01003620 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3621 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003622
David Woodhouse163cc522009-06-28 00:51:17 +01003623 if (dmar_domain->max_addr == iova + size)
3624 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003625}
Kay, Allen M38717942008-09-09 18:37:29 +03003626
Joerg Roedeld14d6572008-12-03 15:06:57 +01003627static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3628 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003629{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003630 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003631 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003632 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003633
David Woodhouseb026fd22009-06-28 10:37:25 +01003634 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003635 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003636 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003637
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003638 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003639}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003640
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003641static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3642 unsigned long cap)
3643{
3644 struct dmar_domain *dmar_domain = domain->priv;
3645
3646 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3647 return dmar_domain->iommu_snooping;
3648
3649 return 0;
3650}
3651
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003652static struct iommu_ops intel_iommu_ops = {
3653 .domain_init = intel_iommu_domain_init,
3654 .domain_destroy = intel_iommu_domain_destroy,
3655 .attach_dev = intel_iommu_attach_device,
3656 .detach_dev = intel_iommu_detach_device,
3657 .map = intel_iommu_map_range,
3658 .unmap = intel_iommu_unmap_range,
3659 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003660 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003661};
David Woodhouse9af88142009-02-13 23:18:03 +00003662
3663static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3664{
3665 /*
3666 * Mobile 4 Series Chipset neglects to set RWBF capability,
3667 * but needs it:
3668 */
3669 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3670 rwbf_quirk = 1;
3671}
3672
3673DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);